Compare commits

...

19 Commits

Author SHA1 Message Date
cf2aea2d26 add tab to modify mcp_config.json in front end 2026-02-13 11:22:47 +08:00
2523703df0 add edit mcp_config.json apis 2026-02-13 11:21:25 +08:00
cab0a0a42c show api preview 2026-02-12 17:37:38 +08:00
c7db276df5 save api key 2026-02-12 17:35:36 +08:00
9d7d81c0ac show /api/ 2026-02-12 16:46:28 +08:00
3d072cab07 print flush for the streaming out put instead 2026-02-12 16:46:08 +08:00
5ed3f80971 add auth key in front_api 2026-02-12 16:39:40 +08:00
56124069e1 show auth key once the agent is created 2026-02-12 16:39:23 +08:00
c87c883313 pass in auth_key 2026-02-12 16:38:32 +08:00
05bcf884c5 add api key to config 2026-02-12 15:45:38 +08:00
7cb40fca0e add api key config 2026-02-12 15:17:46 +08:00
9ab9f0c36e update tool_node config 2026-02-12 15:12:42 +08:00
26ca06d50d remove pipeline_id and set_id since using LLMNodeConfig 2026-02-12 15:05:26 +08:00
9363bd3442 use LLMNodeConfig 2026-02-12 14:54:27 +08:00
1972c182d8 LLMNodeConfig 2026-02-12 14:53:52 +08:00
156186bfae remove unused code 2026-02-12 14:37:02 +08:00
c2cc2628dd use LLMKeyConfig 2026-02-12 14:35:27 +08:00
43dad177ab remove KeyConfig with LLMConfig 2026-02-12 14:34:58 +08:00
1f690914fb remove graph structure detail 2026-02-12 13:37:47 +08:00
20 changed files with 741 additions and 341 deletions

View File

@@ -119,13 +119,6 @@ everything in scripts:
## Registering MCP service
put the links in `configs/mcp_config.json`
## Graph structure
Graph structure:
![Graph overview](./graph.png)
We choose this structure to overcome a limitation in xiaozhi. Specifically, both normal chatting and tool use prompts are deligated to one model. That leads to degregation in quality of generated conversation and tool use. By splitting into two model, we effectively increase the prompt limit size while preserving model quality.
## Modifying LLM prompts
Refer to model above when modifying the prompts.
they are in `configs/route_sys_prompts`

View File

@@ -1,6 +1,8 @@
from typing import Dict, List, Optional
import commentjson
import os
import os.path as osp
import secrets
import subprocess
import sys
import uuid
@@ -15,12 +17,17 @@ sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from lang_agent.config.db_config_manager import DBConfigManager
from lang_agent.front_api.build_server import GRAPH_BUILD_FNCS
_PROJECT_ROOT = osp.dirname(osp.dirname(osp.abspath(__file__)))
_MCP_CONFIG_PATH = osp.join(_PROJECT_ROOT, "configs", "mcp_config.json")
_MCP_CONFIG_DEFAULT_CONTENT = "{\n}\n"
class GraphConfigUpsertRequest(BaseModel):
graph_id: str
pipeline_id: str
prompt_set_id: Optional[str] = Field(default=None)
tool_keys: List[str] = Field(default_factory=list)
prompt_dict: Dict[str, str] = Field(default_factory=dict)
api_key: Optional[str] = Field(default=None)
class GraphConfigUpsertResponse(BaseModel):
graph_id: str
@@ -28,6 +35,7 @@ class GraphConfigUpsertResponse(BaseModel):
prompt_set_id: str
tool_keys: List[str]
prompt_keys: List[str]
api_key: str
class GraphConfigReadResponse(BaseModel):
graph_id: Optional[str] = Field(default=None)
@@ -35,6 +43,7 @@ class GraphConfigReadResponse(BaseModel):
prompt_set_id: str
tool_keys: List[str]
prompt_dict: Dict[str, str]
api_key: str = Field(default="")
class GraphConfigListItem(BaseModel):
graph_id: Optional[str] = Field(default=None)
@@ -44,6 +53,7 @@ class GraphConfigListItem(BaseModel):
description: str
is_active: bool
tool_keys: List[str]
api_key: str = Field(default="")
created_at: Optional[str] = Field(default=None)
updated_at: Optional[str] = Field(default=None)
@@ -59,6 +69,7 @@ class PipelineCreateRequest(BaseModel):
prompt_set_id: str
tool_keys: List[str] = Field(default_factory=list)
port: int
api_key: str
entry_point: str = Field(default="fastapi_server/server_dashscope.py")
llm_name: str = Field(default="qwen-plus")
@@ -70,6 +81,10 @@ class PipelineCreateResponse(BaseModel):
prompt_set_id: str
url: str
port: int
auth_type: str
auth_header_name: str
auth_key_once: str
auth_key_masked: str
class PipelineRunInfo(BaseModel):
run_id: str
@@ -79,6 +94,9 @@ class PipelineRunInfo(BaseModel):
prompt_set_id: str
url: str
port: int
auth_type: str
auth_header_name: str
auth_key_masked: str
class PipelineListResponse(BaseModel):
items: List[PipelineRunInfo]
@@ -88,6 +106,19 @@ class PipelineStopResponse(BaseModel):
run_id: str
status: str
class McpConfigReadResponse(BaseModel):
path: str
raw_content: str
tool_keys: List[str]
class McpConfigUpdateRequest(BaseModel):
raw_content: str
class McpConfigUpdateResponse(BaseModel):
status: str
path: str
tool_keys: List[str]
app = FastAPI(
title="Front APIs",
@@ -105,6 +136,18 @@ app.add_middleware(
_db = DBConfigManager()
_running_pipelines: Dict[str, Dict[str, object]] = {}
def _generate_auth_key() -> str:
return f"agk_{secrets.token_urlsafe(24)}"
def _mask_auth_key(value: str) -> str:
if not value:
return ""
if len(value) <= 10:
return value
return f"{value[:5]}...{value[-5:]}"
def _prune_stopped_pipelines() -> None:
stale_ids: List[str] = []
for run_id, info in _running_pipelines.items():
@@ -135,10 +178,28 @@ async def root():
"/v1/pipelines (POST)",
"/v1/pipelines (GET)",
"/v1/pipelines/{run_id} (DELETE)",
"/v1/tool-configs/mcp (GET)",
"/v1/tool-configs/mcp (PUT)",
],
}
def _parse_mcp_tool_keys(raw_content: str) -> List[str]:
parsed = commentjson.loads(raw_content or "{}")
if not isinstance(parsed, dict):
raise ValueError("mcp_config must be a JSON object at top level")
return sorted(str(key) for key in parsed.keys())
def _read_mcp_config_raw() -> str:
if not osp.exists(_MCP_CONFIG_PATH):
os.makedirs(osp.dirname(_MCP_CONFIG_PATH), exist_ok=True)
with open(_MCP_CONFIG_PATH, "w", encoding="utf-8") as f:
f.write(_MCP_CONFIG_DEFAULT_CONTENT)
with open(_MCP_CONFIG_PATH, "r", encoding="utf-8") as f:
return f.read()
@app.post("/v1/graph-configs", response_model=GraphConfigUpsertResponse)
async def upsert_graph_config(body: GraphConfigUpsertRequest):
try:
@@ -148,6 +209,7 @@ async def upsert_graph_config(body: GraphConfigUpsertRequest):
prompt_set_id=body.prompt_set_id,
tool_list=body.tool_keys,
prompt_dict=body.prompt_dict,
api_key=body.api_key,
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
@@ -160,6 +222,7 @@ async def upsert_graph_config(body: GraphConfigUpsertRequest):
prompt_set_id=resolved_prompt_set_id,
tool_keys=body.tool_keys,
prompt_keys=list(body.prompt_dict.keys()),
api_key=(body.api_key or "").strip(),
)
@app.get("/v1/graph-configs", response_model=GraphConfigListResponse)
@@ -201,6 +264,7 @@ async def get_default_graph_config(pipeline_id: str):
prompt_set_id=active["prompt_set_id"],
tool_keys=tool_keys,
prompt_dict=prompt_dict,
api_key=(active.get("api_key") or ""),
)
@app.get("/v1/graphs/{graph_id}/default-config", response_model=GraphConfigReadResponse)
@@ -233,6 +297,7 @@ async def get_graph_config(pipeline_id: str, prompt_set_id: str):
prompt_set_id=prompt_set_id,
tool_keys=tool_keys,
prompt_dict=prompt_dict,
api_key=(meta.get("api_key") or ""),
)
@@ -256,6 +321,40 @@ async def delete_graph_config(pipeline_id: str, prompt_set_id: str):
async def available_graphs():
return {"available_graphs": sorted(GRAPH_BUILD_FNCS.keys())}
@app.get("/v1/tool-configs/mcp", response_model=McpConfigReadResponse)
async def get_mcp_tool_config():
try:
raw_content = _read_mcp_config_raw()
tool_keys = _parse_mcp_tool_keys(raw_content)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return McpConfigReadResponse(
path=_MCP_CONFIG_PATH,
raw_content=raw_content,
tool_keys=tool_keys,
)
@app.put("/v1/tool-configs/mcp", response_model=McpConfigUpdateResponse)
async def update_mcp_tool_config(body: McpConfigUpdateRequest):
try:
tool_keys = _parse_mcp_tool_keys(body.raw_content)
os.makedirs(osp.dirname(_MCP_CONFIG_PATH), exist_ok=True)
with open(_MCP_CONFIG_PATH, "w", encoding="utf-8") as f:
# Keep user formatting/comments as entered while ensuring trailing newline.
f.write(body.raw_content.rstrip() + "\n")
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return McpConfigUpdateResponse(
status="updated",
path=_MCP_CONFIG_PATH,
tool_keys=tool_keys,
)
@app.get("/v1/pipelines", response_model=PipelineListResponse)
async def list_running_pipelines():
_prune_stopped_pipelines()
@@ -268,6 +367,9 @@ async def list_running_pipelines():
prompt_set_id=info["prompt_set_id"],
url=info["url"],
port=info["port"],
auth_type="bearer",
auth_header_name="Authorization",
auth_key_masked=info.get("auth_key_masked", ""),
)
for run_id, info in _running_pipelines.items()
]
@@ -283,12 +385,16 @@ async def create_pipeline(body: PipelineCreateRequest):
detail=f"Unknown graph_id '{body.graph_id}'. Valid options: {sorted(GRAPH_BUILD_FNCS.keys())}",
)
auth_key = _generate_auth_key()
auth_key_masked = _mask_auth_key(auth_key)
try:
proc, url = build_fn(
pipeline_id=body.pipeline_id,
prompt_set=body.prompt_set_id,
tool_keys=body.tool_keys,
port=str(body.port),
api_key=body.api_key,
fast_auth_keys=auth_key,
entry_pnt=body.entry_point,
llm_name=body.llm_name,
)
@@ -303,6 +409,7 @@ async def create_pipeline(body: PipelineCreateRequest):
"prompt_set_id": body.prompt_set_id,
"url": url,
"port": body.port,
"auth_key_masked": auth_key_masked,
}
return PipelineCreateResponse(
@@ -313,6 +420,10 @@ async def create_pipeline(body: PipelineCreateRequest):
prompt_set_id=body.prompt_set_id,
url=url,
port=body.port,
auth_type="bearer",
auth_header_name="Authorization",
auth_key_once=auth_key,
auth_key_masked=auth_key_masked,
)
@app.delete("/v1/pipelines/{run_id}", response_model=PipelineStopResponse)

View File

@@ -4,10 +4,12 @@ import {
deleteGraphConfig,
getGraphConfig,
getGraphDefaultConfig,
getMcpToolConfig,
listAvailableGraphs,
listGraphConfigs,
listPipelines,
stopPipeline,
updateMcpToolConfig,
upsertGraphConfig,
} from "./api/frontApis";
import type {
@@ -25,12 +27,24 @@ type EditableAgent = {
toolKeys: string[];
prompts: Record<string, string>;
port: number;
apiKey: string;
llmName: string;
};
type LaunchCredentials = {
url: string;
authType: string;
authHeaderName: string;
authKey: string;
authKeyMasked: string;
};
type ActiveTab = "agents" | "mcp";
const DEFAULT_ENTRY_POINT = "fastapi_server/server_dashscope.py";
const DEFAULT_LLM_NAME = "qwen-plus";
const DEFAULT_PORT = 8100;
const DEFAULT_API_KEY = "";
const GRAPH_ARCH_IMAGE_MODULES = import.meta.glob(
"../assets/images/graph_arch/*.{png,jpg,jpeg,webp,gif}",
{ eager: true, import: "default" }
@@ -64,6 +78,17 @@ function parseToolCsv(value: string): string[] {
return out;
}
function maskSecretPreview(value: string): string {
const trimmed = value.trim();
if (!trimmed) {
return "";
}
if (trimmed.length <= 10) {
return trimmed;
}
return `${trimmed.slice(0, 5)}...${trimmed.slice(-5)}`;
}
function getGraphArchImage(graphId: string): string | null {
const normalizedGraphId = graphId.trim().toLowerCase();
for (const [path, source] of Object.entries(GRAPH_ARCH_IMAGE_MODULES)) {
@@ -91,11 +116,13 @@ function toEditable(
toolKeys: config.tool_keys || [],
prompts: config.prompt_dict || {},
port: DEFAULT_PORT,
apiKey: config.api_key || DEFAULT_API_KEY,
llmName: DEFAULT_LLM_NAME,
};
}
export default function App() {
const [activeTab, setActiveTab] = useState<ActiveTab>("agents");
const [graphs, setGraphs] = useState<string[]>([]);
const [configItems, setConfigItems] = useState<GraphConfigListItem[]>([]);
const [running, setRunning] = useState<PipelineRunInfo[]>([]);
@@ -103,6 +130,10 @@ export default function App() {
const [selectedId, setSelectedId] = useState<string | null>(null);
const [editor, setEditor] = useState<EditableAgent | null>(null);
const [statusMessage, setStatusMessage] = useState<string>("");
const [launchCredentials, setLaunchCredentials] = useState<LaunchCredentials | null>(null);
const [mcpConfigPath, setMcpConfigPath] = useState<string>("");
const [mcpConfigRaw, setMcpConfigRaw] = useState<string>("");
const [mcpToolKeys, setMcpToolKeys] = useState<string[]>([]);
const [busy, setBusy] = useState(false);
const configKeySet = useMemo(
@@ -185,6 +216,16 @@ export default function App() {
}
}, [selectedId, configKeySet]);
useEffect(() => {
if (activeTab !== "mcp") {
return;
}
if (mcpConfigRaw) {
return;
}
reloadMcpConfig().catch(() => undefined);
}, [activeTab]);
async function selectExisting(item: GraphConfigListItem): Promise<void> {
const id = makeAgentKey(item.pipeline_id, item.prompt_set_id);
setSelectedId(id);
@@ -196,6 +237,7 @@ export default function App() {
editable.id = id;
editable.port = editor?.pipelineId === editable.pipelineId ? editor.port : DEFAULT_PORT;
editable.llmName = editor?.pipelineId === editable.pipelineId ? editor.llmName : DEFAULT_LLM_NAME;
// apiKey is loaded from backend (persisted in DB) — don't override with default
setEditor(editable);
setStatusMessage("");
} catch (error) {
@@ -292,10 +334,42 @@ export default function App() {
prompt_set_id: "default",
tool_keys: [],
prompt_dict: fallbackPrompts,
api_key: "",
};
}
}
async function reloadMcpConfig(): Promise<void> {
setBusy(true);
setStatusMessage("Loading MCP config...");
try {
const resp = await getMcpToolConfig();
setMcpConfigPath(resp.path || "");
setMcpConfigRaw(resp.raw_content || "");
setMcpToolKeys(resp.tool_keys || []);
setStatusMessage("MCP config loaded.");
} catch (error) {
setStatusMessage((error as Error).message);
} finally {
setBusy(false);
}
}
async function saveMcpConfig(): Promise<void> {
setBusy(true);
setStatusMessage("Saving MCP config...");
try {
const resp = await updateMcpToolConfig({ raw_content: mcpConfigRaw });
setMcpConfigPath(resp.path || "");
setMcpToolKeys(resp.tool_keys || []);
setStatusMessage("MCP config saved.");
} catch (error) {
setStatusMessage((error as Error).message);
} finally {
setBusy(false);
}
}
async function saveConfig(): Promise<void> {
if (!editor) {
return;
@@ -327,6 +401,7 @@ export default function App() {
prompt_set_id: editor.promptSetId,
tool_keys: editor.toolKeys,
prompt_dict: editor.prompts,
api_key: editor.apiKey.trim(),
});
await refreshConfigs();
@@ -334,6 +409,7 @@ export default function App() {
const saved = toEditable(detail, false);
saved.id = makeAgentKey(upsertResp.pipeline_id, upsertResp.prompt_set_id);
saved.port = editor.port;
// apiKey is loaded from backend (persisted in DB) — don't override
saved.llmName = editor.llmName;
setEditor(saved);
setSelectedId(saved.id);
@@ -389,9 +465,14 @@ export default function App() {
setStatusMessage("port must be a positive integer.");
return;
}
if (!editor.apiKey.trim()) {
setStatusMessage("api_key is required before run.");
return;
}
setBusy(true);
setStatusMessage("Starting agent...");
setLaunchCredentials(null);
try {
const resp = await createPipeline({
graph_id: editor.graphId,
@@ -399,11 +480,19 @@ export default function App() {
prompt_set_id: editor.promptSetId,
tool_keys: editor.toolKeys,
port: editor.port,
api_key: editor.apiKey.trim(),
entry_point: DEFAULT_ENTRY_POINT,
llm_name: editor.llmName,
});
await refreshRunning();
setStatusMessage(`Agent started. URL: ${resp.url}`);
setLaunchCredentials({
url: resp.url,
authType: resp.auth_type,
authHeaderName: resp.auth_header_name,
authKey: resp.auth_key_once,
authKeyMasked: resp.auth_key_masked,
});
} catch (error) {
setStatusMessage((error as Error).message);
} finally {
@@ -449,9 +538,29 @@ export default function App() {
})),
];
const graphArchImage = editor ? getGraphArchImage(editor.graphId) : null;
const authHeaderValue = launchCredentials
? `${launchCredentials.authHeaderName}: Bearer ${launchCredentials.authKey}`
: "";
const canUseClipboard = typeof navigator !== "undefined" && Boolean(navigator.clipboard);
async function copyText(text: string, label: string): Promise<void> {
if (!canUseClipboard) {
setStatusMessage(`Clipboard is not available. Please copy ${label} manually.`);
return;
}
try {
await navigator.clipboard.writeText(text);
setStatusMessage(`${label} copied.`);
} catch {
setStatusMessage(`Failed to copy ${label}.`);
}
}
const showSidebar = activeTab === "agents";
return (
<div className="app">
<div className={`app ${showSidebar ? "" : "full-width"}`}>
{showSidebar ? (
<aside className="sidebar">
<div className="sidebar-header">
<h2>Agents</h2>
@@ -486,10 +595,34 @@ export default function App() {
{rows.length === 0 ? <p className="empty">No agents configured yet.</p> : null}
</div>
</aside>
) : null}
<main className="content">
<header className="content-header">
<h1>Agent Configuration</h1>
<h1>Agent Manager</h1>
<div className="tabs">
<button
type="button"
className={`tab-button ${activeTab === "agents" ? "active" : ""}`}
onClick={() => setActiveTab("agents")}
disabled={busy}
>
Agents
</button>
<button
type="button"
className={`tab-button ${activeTab === "mcp" ? "active" : ""}`}
onClick={() => setActiveTab("mcp")}
disabled={busy}
>
MCP Config
</button>
</div>
</header>
{statusMessage ? <p className="status">{statusMessage}</p> : null}
{activeTab === "agents" ? (
<div className="tab-pane">
<div className="header-actions">
<button onClick={saveConfig} disabled={busy || !editor}>
Save
@@ -504,9 +637,48 @@ export default function App() {
Delete
</button>
</div>
</header>
{statusMessage ? <p className="status">{statusMessage}</p> : null}
{launchCredentials ? (
<div className="launch-credentials">
<h3>Access Credentials (shown once)</h3>
<div>
<strong>URL:</strong>{" "}
<a href={launchCredentials.url} target="_blank" rel="noreferrer">
{launchCredentials.url}
</a>
<button
type="button"
onClick={() => copyText(launchCredentials.url, "URL")}
disabled={busy}
>
Copy URL
</button>
</div>
<div>
<strong>{launchCredentials.authType} key:</strong> {launchCredentials.authKey}
<button
type="button"
onClick={() => copyText(launchCredentials.authKey, "auth key")}
disabled={busy}
>
Copy Key
</button>
</div>
<div>
<strong>Header:</strong> <code>{authHeaderValue}</code>
<button
type="button"
onClick={() => copyText(authHeaderValue, "auth header")}
disabled={busy}
>
Copy Header
</button>
</div>
<p className="empty">
Stored after launch as masked value: {launchCredentials.authKeyMasked}
</p>
</div>
) : null}
{!editor ? (
<div className="empty-panel">
@@ -578,6 +750,20 @@ export default function App() {
/>
</label>
<label>
api_key
<input
type="password"
value={editor.apiKey}
onChange={(e) => updateEditor("apiKey", e.target.value)}
placeholder="Enter provider API key"
disabled={busy}
/>
{editor.apiKey ? (
<small className="empty">Preview: {maskSecretPreview(editor.apiKey)}</small>
) : null}
</label>
<label>
llm_name
<input
@@ -625,12 +811,50 @@ export default function App() {
{run.url}
</a>
</div>
<div>
<strong>auth:</strong> {run.auth_header_name} Bearer {run.auth_key_masked}
</div>
</div>
))
)}
</div>
</section>
)}
</div>
) : (
<section className="mcp-config-section tab-pane">
<div className="mcp-config-header">
<h3>Edit MCP Tool Options</h3>
<div className="header-actions">
<button type="button" onClick={reloadMcpConfig} disabled={busy}>
Reload
</button>
<button type="button" onClick={saveMcpConfig} disabled={busy}>
Save
</button>
</div>
</div>
<p className="empty">
This tab edits <code>configs/mcp_config.json</code> directly (comments supported).
</p>
{mcpConfigPath ? (
<p className="empty">
File: <code>{mcpConfigPath}</code>
</p>
) : null}
<p className="empty">
Tool options detected: {mcpToolKeys.length ? mcpToolKeys.join(", ") : "(none)"}
</p>
<textarea
className="mcp-config-editor"
value={mcpConfigRaw}
onChange={(e) => setMcpConfigRaw(e.target.value)}
rows={18}
spellCheck={false}
disabled={busy}
/>
</section>
)}
</main>
</div>
);

View File

@@ -4,6 +4,9 @@ import type {
GraphConfigReadResponse,
GraphConfigUpsertRequest,
GraphConfigUpsertResponse,
McpToolConfigResponse,
McpToolConfigUpdateRequest,
McpToolConfigUpdateResponse,
PipelineCreateRequest,
PipelineCreateResponse,
PipelineListResponse,
@@ -85,6 +88,19 @@ export function deleteGraphConfig(
});
}
export function getMcpToolConfig(): Promise<McpToolConfigResponse> {
return fetchJson("/v1/tool-configs/mcp");
}
export function updateMcpToolConfig(
payload: McpToolConfigUpdateRequest
): Promise<McpToolConfigUpdateResponse> {
return fetchJson("/v1/tool-configs/mcp", {
method: "PUT",
body: JSON.stringify(payload),
});
}
export function createPipeline(
payload: PipelineCreateRequest
): Promise<PipelineCreateResponse> {

View File

@@ -24,6 +24,10 @@ body {
min-height: 100vh;
}
.app.full-width {
grid-template-columns: 1fr;
}
.sidebar {
border-right: 1px solid #dbe2ea;
background: #ffffff;
@@ -94,6 +98,21 @@ button:disabled {
margin: 0;
}
.tabs {
display: flex;
gap: 8px;
}
.tab-button {
min-width: 120px;
}
.tab-button.active {
background: #edf3ff;
border-color: #4d7ef3;
color: #1a4fc5;
}
.header-actions {
display: flex;
gap: 8px;
@@ -108,10 +127,41 @@ button:disabled {
padding: 10px;
}
.launch-credentials {
background: #fff4df;
border: 1px solid #f0d5a8;
border-radius: 8px;
margin-top: 12px;
padding: 12px;
}
.launch-credentials h3 {
margin: 0 0 8px;
}
.launch-credentials > div {
align-items: center;
display: flex;
flex-wrap: wrap;
gap: 8px;
margin: 6px 0;
}
.launch-credentials code {
background: #fff;
border: 1px solid #f0d5a8;
border-radius: 4px;
padding: 2px 6px;
}
.empty-panel {
margin-top: 30px;
}
.tab-pane {
margin-top: 12px;
}
.form-grid {
display: grid;
gap: 14px;
@@ -179,6 +229,35 @@ button:disabled {
padding: 10px;
}
.mcp-config-section {
background: #f7fbff;
border: 1px solid #d7e6f6;
border-radius: 10px;
padding: 12px;
}
.mcp-config-header {
align-items: center;
display: flex;
justify-content: space-between;
gap: 12px;
}
.mcp-config-header h3 {
margin: 0;
}
.mcp-config-editor {
border: 1px solid #c9d4e2;
border-radius: 8px;
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
font-size: 13px;
margin-top: 8px;
padding: 10px;
resize: vertical;
width: 100%;
}
.empty {
color: #687788;
margin: 6px 0;

View File

@@ -6,6 +6,7 @@ export type GraphConfigListItem = {
description: string;
is_active: boolean;
tool_keys: string[];
api_key: string;
created_at?: string | null;
updated_at?: string | null;
};
@@ -21,6 +22,7 @@ export type GraphConfigReadResponse = {
prompt_set_id: string;
tool_keys: string[];
prompt_dict: Record<string, string>;
api_key: string;
};
export type GraphConfigUpsertRequest = {
@@ -29,6 +31,7 @@ export type GraphConfigUpsertRequest = {
prompt_set_id?: string;
tool_keys: string[];
prompt_dict: Record<string, string>;
api_key?: string;
};
export type GraphConfigUpsertResponse = {
@@ -37,6 +40,7 @@ export type GraphConfigUpsertResponse = {
prompt_set_id: string;
tool_keys: string[];
prompt_keys: string[];
api_key: string;
};
export type AvailableGraphsResponse = {
@@ -49,6 +53,7 @@ export type PipelineCreateRequest = {
prompt_set_id: string;
tool_keys: string[];
port: number;
api_key: string;
entry_point: string;
llm_name: string;
};
@@ -61,9 +66,14 @@ export type PipelineRunInfo = {
prompt_set_id: string;
url: string;
port: number;
auth_type: string;
auth_header_name: string;
auth_key_masked: string;
};
export type PipelineCreateResponse = PipelineRunInfo;
export type PipelineCreateResponse = PipelineRunInfo & {
auth_key_once: string;
};
export type PipelineListResponse = {
items: PipelineRunInfo[];
@@ -75,3 +85,19 @@ export type PipelineStopResponse = {
status: string;
};
export type McpToolConfigResponse = {
path: string;
raw_content: string;
tool_keys: string[];
};
export type McpToolConfigUpdateRequest = {
raw_content: string;
};
export type McpToolConfigUpdateResponse = {
status: string;
path: string;
tool_keys: string[];
};

View File

@@ -1 +1,4 @@
from lang_agent.config.core_config import InstantiateConfig, KeyConfig, ToolConfig, LLMKeyConfig
from lang_agent.config.core_config import (InstantiateConfig,
ToolConfig,
LLMKeyConfig,
LLMNodeConfig)

View File

@@ -1,5 +1,5 @@
from dataclasses import dataclass, is_dataclass, fields, MISSING
from typing import Any, Tuple, Type
from typing import Any, Tuple, Type, Optional
import yaml
from pathlib import Path
from typing import Dict
@@ -126,10 +126,16 @@ class InstantiateConfig(PrintableConfig):
return self.__class__.__name__
@dataclass
class KeyConfig(InstantiateConfig):
class LLMKeyConfig(InstantiateConfig):
llm_name: str = "qwen-plus"
"""name of llm"""
llm_provider:str = "openai"
"""provider of the llm"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
api_key:str = None
"""api key for llm"""
@@ -144,15 +150,16 @@ class KeyConfig(InstantiateConfig):
@dataclass
class LLMKeyConfig(KeyConfig):
llm_name: str = "qwen-plus"
"""name of llm"""
class LLMNodeConfig(LLMKeyConfig):
"""
class is for LLM nodes that has system prompt config
"""
llm_provider:str = "openai"
"""provider of the llm"""
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with file fallback)"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
@dataclass
@@ -260,21 +267,3 @@ def ovewrite_config(loaded_conf, inp_conf):
setattr(loaded_conf, field_name, new_value)
return loaded_conf
def mcp_langchain_to_ws_config(conf:Dict[str, Dict[str, str]]):
serv_conf = {}
for k, v in conf.items():
if v["transport"] == "stdio":
serv_conf[k] = {
"type" : v["transport"],
"command": v["command"],
"args": v["args"],
}
else:
logger.warning(f"Unsupported transport {v['transport']} for MCP {k}. Skipping...")
continue
return {"mcpServers":serv_conf}

View File

@@ -28,7 +28,7 @@ class DBConfigManager:
if pipeline_id and graph_id:
cur.execute(
"""
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
FROM prompt_sets
WHERE pipeline_id = %s AND graph_id = %s
ORDER BY updated_at DESC, created_at DESC
@@ -38,7 +38,7 @@ class DBConfigManager:
elif pipeline_id:
cur.execute(
"""
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
FROM prompt_sets
WHERE pipeline_id = %s
ORDER BY updated_at DESC, created_at DESC
@@ -48,7 +48,7 @@ class DBConfigManager:
elif graph_id:
cur.execute(
"""
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
FROM prompt_sets
WHERE graph_id = %s
ORDER BY updated_at DESC, created_at DESC
@@ -58,7 +58,7 @@ class DBConfigManager:
else:
cur.execute(
"""
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
FROM prompt_sets
ORDER BY updated_at DESC, created_at DESC
"""
@@ -76,6 +76,7 @@ class DBConfigManager:
"created_at": row["created_at"].isoformat() if row["created_at"] else None,
"updated_at": row["updated_at"].isoformat() if row["updated_at"] else None,
"tool_keys": self._parse_tool_list(row.get("list")),
"api_key": row.get("api_key") or "",
}
for row in rows
]
@@ -88,7 +89,7 @@ class DBConfigManager:
with conn.cursor(row_factory=dict_row) as cur:
cur.execute(
"""
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
FROM prompt_sets
WHERE id = %s AND pipeline_id = %s
""",
@@ -109,6 +110,7 @@ class DBConfigManager:
"created_at": row["created_at"].isoformat() if row["created_at"] else None,
"updated_at": row["updated_at"].isoformat() if row["updated_at"] else None,
"tool_keys": self._parse_tool_list(row.get("list")),
"api_key": row.get("api_key") or "",
}
def get_config(
@@ -160,6 +162,7 @@ class DBConfigManager:
prompt_set_id: Optional[str],
tool_list: Optional[Sequence[str]],
prompt_dict: Optional[Mapping[str, str]],
api_key: Optional[str] = None,
) -> str:
"""
Persist prompt + tool configuration.
@@ -182,6 +185,7 @@ class DBConfigManager:
normalized_prompt_dict = self._normalize_prompt_dict(prompt_dict)
tool_csv = self._join_tool_list(tool_list)
normalized_api_key = self._normalize_api_key(api_key)
with psycopg.connect(self.conn_str) as conn:
resolved_set_id, _ = self._resolve_prompt_set(
@@ -200,10 +204,13 @@ class DBConfigManager:
cur.execute(
"""
UPDATE prompt_sets
SET list = %s, graph_id = COALESCE(%s, graph_id), updated_at = now()
SET list = %s,
graph_id = COALESCE(%s, graph_id),
api_key = COALESCE(%s, api_key),
updated_at = now()
WHERE id = %s
""",
(tool_csv, normalized_graph_id, resolved_set_id),
(tool_csv, normalized_graph_id, normalized_api_key, resolved_set_id),
)
keys = list(normalized_prompt_dict.keys())
@@ -341,3 +348,8 @@ class DBConfigManager:
return None
value = str(graph_id).strip()
return value or None
def _normalize_api_key(self, api_key: Optional[str]) -> Optional[str]:
if api_key is None:
return None
return str(api_key).strip()

View File

@@ -3,7 +3,7 @@ from typing import Type, Callable, List
import tyro
import random
from lang_agent.config import KeyConfig
from lang_agent.config import LLMKeyConfig
from lang_agent.pipeline import Pipeline, PipelineConfig
from langchain.chat_models import init_chat_model
@@ -11,7 +11,7 @@ from langchain_core.messages import BaseMessage, ToolMessage
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class ValidatorConfig(KeyConfig):
class ValidatorConfig(LLMKeyConfig):
_target: Type = field(default_factory=lambda:Validator)
@@ -34,9 +34,9 @@ class Validator:
def populate_modules(self):
self.judge_llm = init_chat_model(
model="qwen-plus",
model_provider="openai",
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
model=self.config.llm_name,
model_provider=self.config.llm_provider,
base_url=self.config.base_url,
api_key=self.config.api_key
)

View File

@@ -1,18 +1,26 @@
from typing import List
from typing import Dict, List, Optional
import os
import subprocess
def _build_template(graph:str,
pipeline_id:str,
def build_route(pipeline_id:str,
prompt_set:str,
tool_keys:List[str],
port:str,
api_key: str,
fast_auth_keys: Optional[str] = None,
entry_pnt:str="fastapi_server/server_dashscope.py",
llm_name:str="qwen-plus"):
cmd = [
"python", entry_pnt,
"--llm-name", llm_name,
"--port", str(port),
graph,
"route", # ------------
"--llm-name", llm_name,
"--api-key", api_key,
"--pipeline-id", pipeline_id,
"--prompt-set-id", prompt_set,
"tool_node", # ------------
"--llm-name", llm_name,
"--api-key", api_key,
"--pipeline-id", pipeline_id,
"--prompt-set-id", prompt_set,
]
@@ -20,26 +28,41 @@ def _build_template(graph:str,
cmd.extend(
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
)
sv_prc = subprocess.Popen(cmd)
env: Dict[str, str] = os.environ.copy()
if fast_auth_keys:
env["FAST_AUTH_KEYS"] = fast_auth_keys
sv_prc = subprocess.Popen(cmd, env=env)
return sv_prc, f"http://0.0.0.0:{port}"
def build_route(pipeline_id:str,
prompt_set:str,
tool_keys:List[str],
port:str,
entry_pnt:str="fastapi_server/server_dashscope.py",
llm_name:str="qwen-plus"):
return _build_template("route", pipeline_id, prompt_set, tool_keys, port, entry_pnt, llm_name)
return sv_prc, f"http://127.0.0.1:{port}/api/"
def build_react(pipeline_id:str,
prompt_set:str,
tool_keys:List[str],
port:str,
api_key: str,
fast_auth_keys: Optional[str] = None,
entry_pnt:str="fastapi_server/server_dashscope.py",
llm_name:str="qwen-plus"):
return _build_template("react", pipeline_id, prompt_set, tool_keys, port, entry_pnt, llm_name)
cmd = [
"python", entry_pnt,
"--port", str(port),
"react", # ------------
"--llm-name", llm_name,
"--api-key", api_key,
"--pipeline-id", pipeline_id,
"--prompt-set-id", prompt_set,
]
if tool_keys:
cmd.extend(
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
)
env: Dict[str, str] = os.environ.copy()
if fast_auth_keys:
env["FAST_AUTH_KEYS"] = fast_auth_keys
sv_prc = subprocess.Popen(cmd, env=env)
return sv_prc, f"http://127.0.0.1:{port}/api/"
# {pipeline_id: build_function}
GRAPH_BUILD_FNCS = {

View File

@@ -6,7 +6,7 @@ from loguru import logger
from langchain.chat_models import init_chat_model
from lang_agent.config import LLMKeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.base import GraphBase
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
from lang_agent.components.prompt_store import build_prompt_store
@@ -48,15 +48,9 @@ TOOL_SYS_PROMPT = """根据用户的心情使用self_led_control改变灯的颜
用户在描述梦境的时候用紫色。"""
@dataclass
class DualConfig(LLMKeyConfig):
class DualConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda:Dual)
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with hardcoded fallback)"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
from langchain.tools import tool

View File

@@ -8,7 +8,7 @@ import time
from langchain.chat_models import init_chat_model
from lang_agent.config import LLMKeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.base import GraphBase
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
from lang_agent.graphs.graph_states import State
@@ -45,7 +45,7 @@ TOOL_SYS_PROMPT = """You are a helpful helper and will use the self_led_control
@dataclass
class XiaoAiConfig(LLMKeyConfig):
class XiaoAiConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda:XiaoAi)
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)

View File

@@ -4,7 +4,7 @@ import tyro
import os.path as osp
from loguru import logger
from lang_agent.config import KeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
from lang_agent.components.prompt_store import build_prompt_store
from lang_agent.base import GraphBase
@@ -20,27 +20,12 @@ from langgraph.graph import StateGraph, START, END
# NOTE: maybe make this into a base_graph_config?
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class ReactGraphConfig(KeyConfig):
class ReactGraphConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: ReactGraph)
llm_name: str = "qwen-plus"
"""name of llm"""
llm_provider:str = "openai"
"""provider of the llm"""
sys_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "prompts", "blueberry.txt")
"""path to system prompt"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with file fallback)"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
def __post_init__(self):

View File

@@ -8,7 +8,7 @@ import commentjson
import glob
import time
from lang_agent.config import LLMKeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
from lang_agent.components.prompt_store import build_prompt_store
from lang_agent.base import GraphBase, ToolNodeBase
@@ -27,27 +27,12 @@ from langgraph.checkpoint.memory import MemorySaver
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class RoutingConfig(LLMKeyConfig):
class RoutingConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: RoutingGraph)
llm_name: str = "qwen-plus"
"""name of llm"""
llm_provider:str = "openai"
"""provider of the llm"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
sys_promp_dir: str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts")
"""path to directory or json contantaining system prompt for graphs; Will overwrite systemprompt from xiaozhi if 'chat_prompt' is provided"""
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with file fallback)"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
tool_node_config: AnnotatedToolNode = field(default_factory=ToolNodeConfig)

View File

@@ -6,7 +6,7 @@ import time
import asyncio
from loguru import logger
from lang_agent.config import InstantiateConfig, KeyConfig
from lang_agent.config import InstantiateConfig, LLMNodeConfig
from lang_agent.components.tool_manager import ToolManager
from lang_agent.components.prompt_store import build_prompt_store
from lang_agent.components.reit_llm import ReitLLM
@@ -23,17 +23,11 @@ from langgraph.graph import StateGraph, START, END
@dataclass
class ToolNodeConfig(InstantiateConfig):
class ToolNodeConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: ToolNode)
tool_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts", "tool_prompt.txt")
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with file fallback)"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
class ToolNode(ToolNodeBase):
def __init__(self, config: ToolNodeConfig,
@@ -46,7 +40,9 @@ class ToolNode(ToolNodeBase):
self.populate_modules()
def populate_modules(self):
self.llm = make_llm(tags=["tool_llm"])
self.llm = make_llm(model=self.config.llm_name,
api_key=self.config.api_key,
tags=["tool_llm"])
self.tool_agent = create_agent(self.llm, self.tool_manager.get_langchain_tools(), checkpointer=self.mem)
self.prompt_store = build_prompt_store(
@@ -85,23 +81,12 @@ class ToolNode(ToolNodeBase):
@dataclass
class ChattyToolNodeConfig(KeyConfig, ToolNodeConfig):
class ChattyToolNodeConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: ChattyToolNode)
llm_name: str = "qwen-plus"
"""name of llm"""
llm_provider:str = "openai"
"""provider of the llm"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
chatty_sys_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts", "chatty_prompt.txt")
"""path to chatty system prompt"""
# pipeline_id and prompt_set_id are inherited from ToolNodeConfig
tool_node_conf:ToolNodeConfig = field(default_factory=ToolNodeConfig)

View File

@@ -12,7 +12,7 @@ import base64
import json
from loguru import logger
from lang_agent.config import LLMKeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
from lang_agent.components.prompt_store import build_prompt_store
from lang_agent.base import GraphBase, ToolNodeBase
@@ -90,7 +90,7 @@ class VisionRoutingState(TypedDict):
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class VisionRoutingConfig(LLMKeyConfig):
class VisionRoutingConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: VisionRoutingGraph)
tool_llm_name: str = "qwen-flash"
@@ -99,18 +99,6 @@ class VisionRoutingConfig(LLMKeyConfig):
vision_llm_name: str = "qwen-vl-max"
"""LLM for vision/image analysis"""
llm_provider: str = "openai"
"""provider of the llm"""
base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url for API"""
pipeline_id: Optional[str] = None
"""If set, load prompts from database (with hardcoded fallback)"""
prompt_set_id: Optional[str] = None
"""If set, load from this specific prompt set instead of the active one"""
tool_manager_config: ToolManagerConfig = field(default_factory=ClientToolManagerConfig)

View File

@@ -13,7 +13,7 @@ from langchain_core.messages import SystemMessage, HumanMessage, BaseMessage
from langchain.agents import create_agent
from langgraph.checkpoint.memory import MemorySaver
from lang_agent.config import InstantiateConfig, KeyConfig
from lang_agent.config import LLMNodeConfig
from lang_agent.graphs import AnnotatedGraph, ReactGraphConfig, RoutingConfig
from lang_agent.base import GraphBase
from lang_agent.components import conv_store
@@ -52,33 +52,18 @@ DEFAULT_PROMPT="""你是半盏新青年茶馆的服务员,擅长倾听、共
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class PipelineConfig(KeyConfig):
class PipelineConfig(LLMNodeConfig):
_target: Type = field(default_factory=lambda: Pipeline)
config_f: str = None
"""path to config file"""
llm_name: str = "qwen-plus"
"""name of llm; use default for qwen-plus"""
llm_provider:str = "openai"
"""provider of the llm; use default for openai"""
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
"""base url; could be used to overwrite the baseurl in llm provider"""
host:str = "0.0.0.0"
"""where am I hosted"""
port:int = 8588
"""what is my port"""
pipeline_id: str = None
"""If set, load prompts from database (with file fallback)"""
prompt_set_id: str = None
"""If set, load from this specific prompt set instead of the active one"""
# graph_config: AnnotatedGraph = field(default_factory=ReactGraphConfig)
graph_config: AnnotatedGraph = field(default_factory=RoutingConfig)

View File

@@ -9,13 +9,13 @@ from langchain_community.vectorstores import FAISS
from langchain_core.documents.base import Document
from lang_agent.rag.emb import QwenEmbeddings
from lang_agent.config import ToolConfig, KeyConfig
from lang_agent.config import ToolConfig, LLMKeyConfig
from lang_agent.base import LangToolBase
@tyro.conf.configure(tyro.conf.SuppressFixed)
@dataclass
class SimpleRagConfig(ToolConfig, KeyConfig):
class SimpleRagConfig(ToolConfig, LLMKeyConfig):
_target: Type = field(default_factory=lambda: SimpleRag)
model_name:str = "text-embedding-v4"

View File

@@ -80,7 +80,9 @@ def main():
u = delta
last_text = current_text
logger.info(f"from stream: {u}")
# For streaming responses, print incrementally to stdout and flush
# so the user can see tokens as they arrive.
print(u, end="", flush=True)
except TypeError:
# 非流式回落(一次性返回)
if responses.status_code != HTTPStatus.OK: