Compare commits

...

10 Commits

Author SHA1 Message Date
e049e4e104 sanity check for shell shell configs 2026-03-07 14:52:19 +08:00
8d0cd7861b update tests 2026-03-07 14:52:00 +08:00
7920a1bab3 init deepagent config 2026-03-07 14:51:49 +08:00
30e93ed0a7 update prompt store retreival 2026-03-07 14:51:20 +08:00
0087ac41e6 build_deepagent in build_server_utils 2026-03-07 14:51:04 +08:00
5358b46113 remove override support 2026-03-07 14:50:48 +08:00
3306da038d front_api update 2026-03-07 14:50:25 +08:00
ac99dfd56b front end update:
- chat support markdown
- stop button for chat
- configurable for deepagent
2026-03-07 14:50:01 +08:00
3932d695bf update comments 2026-03-06 18:38:07 +08:00
9ed141ba42 update tests 2026-03-06 18:37:59 +08:00
13 changed files with 305 additions and 51 deletions

View File

@@ -39,6 +39,7 @@ type EditableAgent = {
prompts: Record<string, string>; prompts: Record<string, string>;
apiKey: string; apiKey: string;
llmName: string; llmName: string;
actBackend: DeepAgentActBackend;
}; };
type AgentChatMessage = { type AgentChatMessage = {
@@ -48,6 +49,7 @@ type AgentChatMessage = {
}; };
type ActiveTab = "agents" | "discussions" | "mcp"; type ActiveTab = "agents" | "discussions" | "mcp";
type DeepAgentActBackend = "state_bk" | "local_shell" | "daytona_sandbox";
type McpTransport = "streamable_http" | "sse" | "stdio"; type McpTransport = "streamable_http" | "sse" | "stdio";
type McpEntry = { type McpEntry = {
id: string; id: string;
@@ -62,6 +64,15 @@ type McpEntry = {
const DEFAULT_LLM_NAME = "qwen-plus"; const DEFAULT_LLM_NAME = "qwen-plus";
const DEFAULT_API_KEY = ""; const DEFAULT_API_KEY = "";
const DEFAULT_DEEPAGENT_ACT_BACKEND: DeepAgentActBackend = "state_bk";
const DEEPAGENT_BACKEND_OPTIONS: Array<{
value: DeepAgentActBackend;
label: string;
}> = [
{ value: "state_bk", label: "state_bk" },
{ value: "local_shell", label: "local_shell" },
{ value: "daytona_sandbox", label: "daytona_sandbox" },
];
const LOCAL_DASHSCOPE_BASE = "http://127.0.0.1:8500/v1/apps"; const LOCAL_DASHSCOPE_BASE = "http://127.0.0.1:8500/v1/apps";
const MCP_TRANSPORT_OPTIONS: McpTransport[] = ["streamable_http", "sse", "stdio"]; const MCP_TRANSPORT_OPTIONS: McpTransport[] = ["streamable_http", "sse", "stdio"];
const GRAPH_ARCH_IMAGE_MODULES = import.meta.glob( const GRAPH_ARCH_IMAGE_MODULES = import.meta.glob(
@@ -74,6 +85,9 @@ const FALLBACK_PROMPTS_BY_GRAPH: Record<string, Record<string, string>> = {
chat_prompt: "", chat_prompt: "",
tool_prompt: "", tool_prompt: "",
}, },
deepagent: {
sys_prompt: "",
},
react: { react: {
sys_prompt: "", sys_prompt: "",
}, },
@@ -412,6 +426,26 @@ function createConversationId(): string {
return `conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`; return `conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
} }
function normalizeDeepAgentActBackend(value: unknown): DeepAgentActBackend {
if (value === "local_shell" || value === "localshell") {
return "local_shell";
}
if (value === "daytona_sandbox" || value === "daytonasandbox") {
return "daytona_sandbox";
}
if (value === "state_bk" || value === "statebk") {
return "state_bk";
}
return DEFAULT_DEEPAGENT_ACT_BACKEND;
}
function buildGraphParams(editor: EditableAgent): Record<string, unknown> {
if (editor.graphId === "deepagent") {
return { act_bkend: editor.actBackend };
}
return {};
}
function toEditable( function toEditable(
config: GraphConfigReadResponse, config: GraphConfigReadResponse,
draft: boolean draft: boolean
@@ -428,6 +462,7 @@ function toEditable(
prompts: config.prompt_dict || {}, prompts: config.prompt_dict || {},
apiKey: config.api_key || DEFAULT_API_KEY, apiKey: config.api_key || DEFAULT_API_KEY,
llmName: DEFAULT_LLM_NAME, llmName: DEFAULT_LLM_NAME,
actBackend: DEFAULT_DEEPAGENT_ACT_BACKEND,
}; };
} }
@@ -455,6 +490,7 @@ export default function App() {
const [chatInput, setChatInput] = useState<string>(""); const [chatInput, setChatInput] = useState<string>("");
const [chatMessages, setChatMessages] = useState<AgentChatMessage[]>([]); const [chatMessages, setChatMessages] = useState<AgentChatMessage[]>([]);
const [chatSending, setChatSending] = useState(false); const [chatSending, setChatSending] = useState(false);
const [chatAbortController, setChatAbortController] = useState<AbortController | null>(null);
const [busy, setBusy] = useState(false); const [busy, setBusy] = useState(false);
const configKeySet = useMemo( const configKeySet = useMemo(
@@ -714,6 +750,10 @@ export default function App() {
graphId, graphId,
prompts: { ...defaults.prompt_dict }, prompts: { ...defaults.prompt_dict },
toolKeys: defaults.tool_keys || [], toolKeys: defaults.tool_keys || [],
actBackend:
graphId === "deepagent"
? prev.actBackend || DEFAULT_DEEPAGENT_ACT_BACKEND
: DEFAULT_DEEPAGENT_ACT_BACKEND,
}; };
if (next.isDraft) { if (next.isDraft) {
setDraftAgents((drafts) => drafts.map((draft) => (draft.id === next.id ? next : draft))); setDraftAgents((drafts) => drafts.map((draft) => (draft.id === next.id ? next : draft)));
@@ -905,9 +945,8 @@ export default function App() {
const active = await getPipelineDefaultConfig(editor.pipelineId.trim()); const active = await getPipelineDefaultConfig(editor.pipelineId.trim());
targetPromptSetId = active.prompt_set_id; targetPromptSetId = active.prompt_set_id;
} catch { } catch {
throw new Error( // For a brand-new pipeline, let backend create a new prompt set.
"No active prompt set for this pipeline. Create/activate one via backend first." targetPromptSetId = undefined;
);
} }
} }
const upsertResp = await upsertGraphConfig({ const upsertResp = await upsertGraphConfig({
@@ -929,6 +968,7 @@ export default function App() {
api_key: editor.apiKey.trim(), api_key: editor.apiKey.trim(),
llm_name: editor.llmName || DEFAULT_LLM_NAME, llm_name: editor.llmName || DEFAULT_LLM_NAME,
enabled: isEditorRunning, enabled: isEditorRunning,
graph_params: buildGraphParams(editor),
}); });
await refreshRunning(); await refreshRunning();
} catch (error) { } catch (error) {
@@ -1012,6 +1052,7 @@ export default function App() {
api_key: editor.apiKey.trim(), api_key: editor.apiKey.trim(),
llm_name: editor.llmName, llm_name: editor.llmName,
enabled: true, enabled: true,
graph_params: buildGraphParams(editor),
}); });
await refreshRunning(); await refreshRunning();
if (resp.reload_required) { if (resp.reload_required) {
@@ -1081,6 +1122,9 @@ export default function App() {
} }
const authKey = runtimeFastApiKey.trim() || "dev-key"; const authKey = runtimeFastApiKey.trim() || "dev-key";
const controller = new AbortController();
setChatAbortController(controller);
const userMessage: AgentChatMessage = { const userMessage: AgentChatMessage = {
id: `user-${Date.now()}`, id: `user-${Date.now()}`,
role: "user", role: "user",
@@ -1104,6 +1148,7 @@ export default function App() {
sessionId: chatConversationId, sessionId: chatConversationId,
apiKey: authKey, apiKey: authKey,
message, message,
signal: controller.signal,
onText: (text) => { onText: (text) => {
setChatMessages((prev) => setChatMessages((prev) =>
prev.map((item) => prev.map((item) =>
@@ -1118,6 +1163,19 @@ export default function App() {
}, },
}); });
} catch (error) { } catch (error) {
if ((error as Error).message === "Request cancelled") {
setChatMessages((prev) =>
prev.map((item) =>
item.id === assistantMessageId
? {
...item,
content: item.content + "\n\n*[Stopped]*",
}
: item
)
);
return;
}
const messageText = (error as Error).message || "Chat request failed."; const messageText = (error as Error).message || "Chat request failed.";
setStatusMessage(messageText); setStatusMessage(messageText);
setChatMessages((prev) => setChatMessages((prev) =>
@@ -1132,6 +1190,7 @@ export default function App() {
); );
} finally { } finally {
setChatSending(false); setChatSending(false);
setChatAbortController(null);
} }
} }
@@ -1337,6 +1396,28 @@ export default function App() {
/> />
</label> </label>
{editor.graphId === "deepagent" ? (
<label>
act_bkend
<select
value={editor.actBackend}
onChange={(e) =>
updateEditor(
"actBackend",
normalizeDeepAgentActBackend(e.target.value)
)
}
disabled={busy}
>
{DEEPAGENT_BACKEND_OPTIONS.map((option) => (
<option key={option.value} value={option.value}>
{option.label}
</option>
))}
</select>
</label>
) : null}
<div className="prompt-section"> <div className="prompt-section">
<h3>Prompts</h3> <h3>Prompts</h3>
{Object.keys(editor.prompts).length === 0 ? ( {Object.keys(editor.prompts).length === 0 ? (
@@ -1670,7 +1751,11 @@ export default function App() {
className={`chat-modal-message ${message.role === "assistant" ? "assistant" : "user"}`} className={`chat-modal-message ${message.role === "assistant" ? "assistant" : "user"}`}
> >
<strong>{message.role === "assistant" ? "Agent" : "You"}</strong> <strong>{message.role === "assistant" ? "Agent" : "You"}</strong>
<p>{message.content || (chatSending && message.role === "assistant" ? "..." : "")}</p> <div className="chat-message-content">
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{message.content || (chatSending && message.role === "assistant" ? "..." : "")}
</ReactMarkdown>
</div>
</article> </article>
)) ))
)} )}
@@ -1679,19 +1764,41 @@ export default function App() {
<textarea <textarea
value={chatInput} value={chatInput}
onChange={(event) => setChatInput(event.target.value)} onChange={(event) => setChatInput(event.target.value)}
placeholder="Type your message..." onKeyDown={(event) => {
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
if (chatInput.trim() && !chatSending) {
sendAgentChatMessage().catch(() => undefined);
}
}
}}
placeholder="Type your message... (Enter to send, Shift+Enter for new line)"
rows={3} rows={3}
disabled={chatSending} disabled={chatSending}
/> />
<button <div className="chat-modal-actions">
type="button" {chatSending ? (
onClick={() => { <button
sendAgentChatMessage().catch(() => undefined); type="button"
}} className="chat-stop-button"
disabled={chatSending || !chatInput.trim()} onClick={() => {
> chatAbortController?.abort();
Send (Stream) }}
</button> >
Stop
</button>
) : (
<button
type="button"
onClick={() => {
sendAgentChatMessage().catch(() => undefined);
}}
disabled={chatSending || !chatInput.trim()}
>
Send (Stream)
</button>
)}
</div>
</div> </div>
</section> </section>
</div> </div>

View File

@@ -173,6 +173,7 @@ type StreamAgentChatOptions = {
apiKey: string; apiKey: string;
message: string; message: string;
onText: (text: string) => void; onText: (text: string) => void;
signal?: AbortSignal;
}; };
function parseErrorDetail(payload: unknown): string | null { function parseErrorDetail(payload: unknown): string | null {
@@ -186,7 +187,7 @@ function parseErrorDetail(payload: unknown): string | null {
export async function streamAgentChatResponse( export async function streamAgentChatResponse(
options: StreamAgentChatOptions options: StreamAgentChatOptions
): Promise<string> { ): Promise<string> {
const { appId, sessionId, apiKey, message, onText } = options; const { appId, sessionId, apiKey, message, onText, signal } = options;
const response = await fetch( const response = await fetch(
`${API_BASE_URL}/v1/apps/${encodeURIComponent(appId)}/sessions/${encodeURIComponent(sessionId)}/responses`, `${API_BASE_URL}/v1/apps/${encodeURIComponent(appId)}/sessions/${encodeURIComponent(sessionId)}/responses`,
{ {
@@ -199,6 +200,7 @@ export async function streamAgentChatResponse(
messages: [{ role: "user", content: message }], messages: [{ role: "user", content: message }],
stream: true, stream: true,
}), }),
signal,
} }
); );
@@ -226,6 +228,10 @@ export async function streamAgentChatResponse(
let latestText = ""; let latestText = "";
while (true) { while (true) {
if (signal?.aborted) {
reader.cancel();
throw new Error("Request cancelled");
}
const { value, done } = await reader.read(); const { value, done } = await reader.read();
if (done) { if (done) {
break; break;

View File

@@ -618,6 +618,7 @@ button:disabled {
display: grid; display: grid;
gap: 8px; gap: 8px;
grid-template-columns: 1fr auto; grid-template-columns: 1fr auto;
align-items: start;
} }
.chat-modal-input textarea { .chat-modal-input textarea {
@@ -628,3 +629,23 @@ button:disabled {
resize: vertical; resize: vertical;
} }
.chat-modal-actions {
display: flex;
gap: 8px;
height: 100%;
}
.chat-modal-actions button {
height: auto;
white-space: nowrap;
}
.chat-stop-button {
background-color: #dc3545;
color: white;
}
.chat-stop-button:hover {
background-color: #bb2d3b;
}

View File

@@ -55,6 +55,7 @@ export type PipelineCreateRequest = {
api_key?: string; api_key?: string;
llm_name: string; llm_name: string;
enabled?: boolean; enabled?: boolean;
graph_params?: Record<string, unknown>;
}; };
export type PipelineSpec = { export type PipelineSpec = {
@@ -63,7 +64,6 @@ export type PipelineSpec = {
enabled: boolean; enabled: boolean;
config_file: string; config_file: string;
llm_name: string; llm_name: string;
overrides: Record<string, unknown>;
}; };
export type PipelineCreateResponse = { export type PipelineCreateResponse = {

View File

@@ -55,7 +55,7 @@ class ServerPipelineManager:
parsed_specs[pipeline_id] = { parsed_specs[pipeline_id] = {
"enabled": bool(spec.get("enabled", True)), "enabled": bool(spec.get("enabled", True)),
"config_file": spec.get("config_file"), "config_file": spec.get("config_file"),
"overrides": spec.get("overrides", {}), "llm_name": spec.get("llm_name"),
} }
if not parsed_specs: if not parsed_specs:
raise ValueError("pipeline registry must define at least one pipeline.") raise ValueError("pipeline registry must define at least one pipeline.")
@@ -152,7 +152,7 @@ class ServerPipelineManager:
) )
config_file = spec.get("config_file") config_file = spec.get("config_file")
overrides = spec.get("overrides", {}) registry_llm_name = spec.get("llm_name")
if config_file: if config_file:
loaded_cfg = load_tyro_conf(self._resolve_config_path(config_file)) loaded_cfg = load_tyro_conf(self._resolve_config_path(config_file))
if hasattr(loaded_cfg, "setup"): if hasattr(loaded_cfg, "setup"):
@@ -166,19 +166,11 @@ class ServerPipelineManager:
) )
else: else:
cfg = copy.deepcopy(self.default_config) cfg = copy.deepcopy(self.default_config)
if not isinstance(overrides, dict): if registry_llm_name is not None and hasattr(cfg, "llm_name"):
raise ValueError( setattr(cfg, "llm_name", registry_llm_name)
f"pipeline `overrides` for `{pipeline_id}` must be an object."
)
for key, value in overrides.items():
if not hasattr(cfg, key):
raise ValueError(
f"unknown override field `{key}` for pipeline `{pipeline_id}`"
)
setattr(cfg, key, value)
p = cfg.setup() p = cfg.setup()
llm_name = getattr(cfg, "llm_name", "unknown-model") llm_name = str(getattr(cfg, "llm_name", registry_llm_name or "unknown-model"))
return p, llm_name return p, llm_name
def _authorize(self, api_key: str, pipeline_id: str) -> None: def _authorize(self, api_key: str, pipeline_id: str) -> None:

View File

@@ -85,6 +85,8 @@ class PipelineCreateRequest(BaseModel):
api_key: Optional[str] = Field(default=None) api_key: Optional[str] = Field(default=None)
llm_name: str = Field(default="qwen-plus") llm_name: str = Field(default="qwen-plus")
enabled: bool = Field(default=True) enabled: bool = Field(default=True)
# Arbitrary per-graph options forwarded to the graph build function.
graph_params: Dict[str, Any] = Field(default_factory=dict)
class PipelineSpec(BaseModel): class PipelineSpec(BaseModel):
@@ -93,7 +95,6 @@ class PipelineSpec(BaseModel):
enabled: bool enabled: bool
config_file: str config_file: str
llm_name: str llm_name: str
overrides: Dict[str, Any] = Field(default_factory=dict)
class PipelineCreateResponse(BaseModel): class PipelineCreateResponse(BaseModel):
@@ -314,19 +315,13 @@ def _resolve_runtime_fast_api_key() -> RuntimeAuthInfoResponse:
def _normalize_pipeline_spec(pipeline_id: str, spec: Dict[str, Any]) -> PipelineSpec: def _normalize_pipeline_spec(pipeline_id: str, spec: Dict[str, Any]) -> PipelineSpec:
if not isinstance(spec, dict): if not isinstance(spec, dict):
raise ValueError(f"pipeline spec for '{pipeline_id}' must be an object") raise ValueError(f"pipeline spec for '{pipeline_id}' must be an object")
overrides = spec.get("overrides", {}) llm_name = str(spec.get("llm_name") or "unknown")
if overrides is None:
overrides = {}
if not isinstance(overrides, dict):
raise ValueError(f"`overrides` for pipeline '{pipeline_id}' must be an object")
llm_name = str(overrides.get("llm_name") or "unknown")
return PipelineSpec( return PipelineSpec(
pipeline_id=pipeline_id, pipeline_id=pipeline_id,
graph_id=str(spec.get("graph_id") or pipeline_id), graph_id=str(spec.get("graph_id") or pipeline_id),
enabled=bool(spec.get("enabled", True)), enabled=bool(spec.get("enabled", True)),
config_file=str(spec.get("config_file") or ""), config_file=str(spec.get("config_file") or ""),
llm_name=llm_name, llm_name=llm_name,
overrides=overrides,
) )
@@ -616,6 +611,7 @@ async def create_pipeline(body: PipelineCreateRequest):
config_file = f"configs/pipelines/{pipeline_id}.yaml" config_file = f"configs/pipelines/{pipeline_id}.yaml"
config_abs_dir = osp.join(_PROJECT_ROOT, "configs", "pipelines") config_abs_dir = osp.join(_PROJECT_ROOT, "configs", "pipelines")
extra_params = dict(body.graph_params or {})
try: try:
build_fn( build_fn(
pipeline_id=pipeline_id, pipeline_id=pipeline_id,
@@ -624,6 +620,7 @@ async def create_pipeline(body: PipelineCreateRequest):
api_key=resolved_api_key, api_key=resolved_api_key,
llm_name=body.llm_name, llm_name=body.llm_name,
pipeline_config_dir=config_abs_dir, pipeline_config_dir=config_abs_dir,
**extra_params,
) )
update_pipeline_registry( update_pipeline_registry(

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List from typing import Any, Dict, List, Literal
import os import os
import os.path as osp import os.path as osp
import subprocess import subprocess
@@ -7,6 +7,15 @@ import json
from lang_agent.config.core_config import load_tyro_conf from lang_agent.config.core_config import load_tyro_conf
from lang_agent.config.constants import TY_BUILD_SCRIPT, _PROJECT_ROOT from lang_agent.config.constants import TY_BUILD_SCRIPT, _PROJECT_ROOT
_DEEP_AGENT_BACKEND_ALIASES = {
"state_bk": "statebk",
"statebk": "statebk",
"local_shell": "localshell",
"localshell": "localshell",
"daytona_sandbox": "daytonasandbox",
"daytonasandbox": "daytonasandbox",
}
def opt_to_config(save_path: str, *nargs): def opt_to_config(save_path: str, *nargs):
os.makedirs(osp.dirname(save_path), exist_ok=True) os.makedirs(osp.dirname(save_path), exist_ok=True)
@@ -50,7 +59,7 @@ def update_pipeline_registry(
pipeline["enabled"] = bool(enabled) pipeline["enabled"] = bool(enabled)
pipeline["config_file"] = config_file pipeline["config_file"] = config_file
pipeline["graph_id"] = graph_id pipeline["graph_id"] = graph_id
pipeline["overrides"] = {"llm_name": llm_name} pipeline["llm_name"] = llm_name
with open(registry_f, "w", encoding="utf-8") as f: with open(registry_f, "w", encoding="utf-8") as f:
json.dump(registry, f, indent=4) json.dump(registry, f, indent=4)
@@ -62,7 +71,8 @@ def build_route(
tool_keys: List[str], tool_keys: List[str],
api_key: str, api_key: str,
llm_name: str = "qwen-plus", llm_name: str = "qwen-plus",
pipeline_config_dir="configs/pipelines", pipeline_config_dir: str = "configs/pipelines",
**_: Any,
): ):
cmd_opt = [ cmd_opt = [
"--pipeline.pipeline-id", pipeline_id, "--pipeline.pipeline-id", pipeline_id,
@@ -92,7 +102,8 @@ def build_react(
tool_keys: List[str], tool_keys: List[str],
api_key: str, api_key: str,
llm_name: str = "qwen-plus", llm_name: str = "qwen-plus",
pipeline_config_dir="configs/pipelines", pipeline_config_dir: str = "configs/pipelines",
**_: Any,
): ):
cmd_opt = [ cmd_opt = [
"--pipeline.pipeline-id", pipeline_id, "--pipeline.pipeline-id", pipeline_id,
@@ -110,8 +121,50 @@ def build_react(
return _build_and_load_pipeline_config(pipeline_id, pipeline_config_dir, cmd_opt) return _build_and_load_pipeline_config(pipeline_id, pipeline_config_dir, cmd_opt)
def build_deep_agent(
pipeline_id: str,
prompt_set: str,
tool_keys: List[str],
api_key: str,
llm_name: str = "qwen-plus",
pipeline_config_dir: str = "configs/pipelines",
act_bkend: Literal[
"local_shell",
"localshell",
"state_bk",
"statebk",
"daytona_sandbox",
"daytonasandbox",
] = "state_bk",
**_: Any,
):
backend_subcommand = _DEEP_AGENT_BACKEND_ALIASES.get(act_bkend)
if backend_subcommand is None:
raise ValueError(
"Unsupported deepagent backend "
f"'{act_bkend}'. Expected one of {sorted(_DEEP_AGENT_BACKEND_ALIASES.keys())}"
)
cmd_opt = [
"--pipeline.pipeline-id", pipeline_id,
"deepagent",
"--llm-name", llm_name,
"--api-key", api_key,
"--pipeline-id", pipeline_id,
"--prompt-set-id", prompt_set,
backend_subcommand,
]
if tool_keys:
cmd_opt.extend(
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
)
return _build_and_load_pipeline_config(pipeline_id, pipeline_config_dir, cmd_opt)
# {pipeline_id: build_function} # {pipeline_id: build_function}
GRAPH_BUILD_FNCS = { GRAPH_BUILD_FNCS = {
"routing": build_route, "routing": build_route,
"react": build_react, "react": build_react,
"deepagent": build_deep_agent,
} }

View File

@@ -38,4 +38,24 @@ class LocalShell(BaseFilesystemBackend):
self.backend = LocalShellBackend(root_dir=self.config.workspace_dir, self.backend = LocalShellBackend(root_dir=self.config.workspace_dir,
virtual_mode=True, virtual_mode=True,
# env={"PATH": "/usr/bin:/bin"} # env={"PATH": "/usr/bin:/bin"}
inherit_env=True) inherit_env=True)
if __name__ == "__main__":
import sys
# Instantiate a LocalShell instance with the default config
config = LocalShellConfig()
shell = LocalShell(config)
# Try checking access to 'npx'
try:
result = shell.backend.execute("npx --version")
if result.exit_code == 0:
print("npx is available, version:", result.output.strip())
else:
print("npx returned non-zero exit code:", result.exit_code, file=sys.stderr)
print("output:", result.output, file=sys.stderr)
except Exception as e:
print("Could not access 'npx':", str(e), file=sys.stderr)

View File

@@ -61,7 +61,12 @@ class DeepAgent(GraphBase):
checkpointer=self.mem, checkpointer=self.mem,
**bkend_agent_params) **bkend_agent_params)
self.prompt_store = build_prompt_store(file_path=self.config.sys_prompt_f, default_key="sys_prompt") self.prompt_store = build_prompt_store(
pipeline_id=self.config.pipeline_id,
prompt_set_id=self.config.prompt_set_id,
file_path=self.config.sys_prompt_f,
default_key="sys_prompt",
)
self.sys_prompt = self.prompt_store.get("sys_prompt") self.sys_prompt = self.prompt_store.get("sys_prompt")
def _agent_call(self, state:State): def _agent_call(self, state:State):

View File

@@ -160,3 +160,30 @@ ON CONFLICT (prompt_set_id, prompt_key)
DO UPDATE SET DO UPDATE SET
content = EXCLUDED.content, content = EXCLUDED.content,
updated_at = now(); updated_at = now();
-- Seed: initial prompt set for lang_agent/graphs/deepagents_qt.py
-- DeepAgent uses prompt key "sys_prompt" with DB-first, file-fallback loading.
INSERT INTO prompt_sets (pipeline_id, graph_id, name, description, is_active, list)
SELECT
'deepagent',
'deepagent',
'default',
'Initial prompt set for DeepAgent',
true,
''
WHERE NOT EXISTS (
SELECT 1
FROM prompt_sets
WHERE pipeline_id = 'deepagent'
AND name = 'default'
);
INSERT INTO prompt_templates (prompt_set_id, prompt_key, content)
SELECT ps.id, 'sys_prompt', '你是一个擅长调用工具和处理文件任务的深度代理。'
FROM prompt_sets ps
WHERE ps.pipeline_id = 'deepagent'
AND ps.name = 'default'
ON CONFLICT (prompt_set_id, prompt_key)
DO UPDATE SET
content = EXCLUDED.content,
updated_at = now();

View File

@@ -3,7 +3,7 @@
Simple chat loop to interact with the blueberry pipeline via DashScope-compatible API. Simple chat loop to interact with the blueberry pipeline via DashScope-compatible API.
Usage: Usage:
python chat_blueberry.py python scripts/py_scripts/chat_dashcope.py
The script connects to the server running on http://localhost:8500 The script connects to the server running on http://localhost:8500
and uses the API key from the pipeline registry. and uses the API key from the pipeline registry.

View File

@@ -325,3 +325,27 @@ def test_pipeline_conversation_messages_404(monkeypatch):
resp = client.get("/v1/pipelines/agent-a/conversations/agent-b:conv-9/messages") resp = client.get("/v1/pipelines/agent-a/conversations/agent-b:conv-9/messages")
assert resp.status_code == 404, resp.text assert resp.status_code == 404, resp.text
assert "not found for pipeline 'agent-a'" in resp.json()["detail"] assert "not found for pipeline 'agent-a'" in resp.json()["detail"]
def test_runtime_auth_info_prefers_registry_then_env(monkeypatch, tmp_path):
registry_path = tmp_path / "pipeline_registry.json"
registry_path.write_text(
json.dumps(
{
"pipelines": {},
"api_keys": {
"sk-from-registry": {"default_pipeline_id": "blueberry"},
},
}
),
encoding="utf-8",
)
monkeypatch.setattr(front_apis, "PIPELINE_REGISTRY_PATH", str(registry_path))
monkeypatch.setenv("FAST_AUTH_KEYS", "sk-from-env,other")
client = TestClient(front_apis.app)
resp = client.get("/v1/runtime-auth")
assert resp.status_code == 200, resp.text
data = resp.json()
assert data["fast_api_key"] == "sk-from-registry"
assert data["source"] == "pipeline_registry"

View File

@@ -35,7 +35,7 @@ def test_refresh_registry_picks_up_new_pipeline(tmp_path):
"default": { "default": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-plus"}, "llm_name": "qwen-plus",
} }
}, },
) )
@@ -57,12 +57,12 @@ def test_refresh_registry_picks_up_new_pipeline(tmp_path):
"default": { "default": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-plus"}, "llm_name": "qwen-plus",
}, },
"blueberry": { "blueberry": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-max"}, "llm_name": "qwen-max",
}, },
}, },
) )
@@ -83,7 +83,7 @@ def test_refresh_registry_invalidates_cache_for_changed_pipeline(tmp_path):
"blueberry": { "blueberry": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-plus"}, "llm_name": "qwen-plus",
} }
}, },
) )
@@ -102,7 +102,7 @@ def test_refresh_registry_invalidates_cache_for_changed_pipeline(tmp_path):
"blueberry": { "blueberry": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-max"}, "llm_name": "qwen-max",
} }
}, },
) )
@@ -122,7 +122,7 @@ def test_refresh_registry_applies_disabled_state_immediately(tmp_path):
"blueberry": { "blueberry": {
"enabled": True, "enabled": True,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-plus"}, "llm_name": "qwen-plus",
} }
}, },
) )
@@ -139,7 +139,7 @@ def test_refresh_registry_applies_disabled_state_immediately(tmp_path):
"blueberry": { "blueberry": {
"enabled": False, "enabled": False,
"config_file": None, "config_file": None,
"overrides": {"llm_name": "qwen-plus"}, "llm_name": "qwen-plus",
} }
}, },
) )
@@ -152,3 +152,5 @@ def test_refresh_registry_applies_disabled_state_immediately(tmp_path):