Compare commits
19 Commits
d4b4ef3690
...
cf2aea2d26
| Author | SHA1 | Date | |
|---|---|---|---|
| cf2aea2d26 | |||
| 2523703df0 | |||
| cab0a0a42c | |||
| c7db276df5 | |||
| 9d7d81c0ac | |||
| 3d072cab07 | |||
| 5ed3f80971 | |||
| 56124069e1 | |||
| c87c883313 | |||
| 05bcf884c5 | |||
| 7cb40fca0e | |||
| 9ab9f0c36e | |||
| 26ca06d50d | |||
| 9363bd3442 | |||
| 1972c182d8 | |||
| 156186bfae | |||
| c2cc2628dd | |||
| 43dad177ab | |||
| 1f690914fb |
@@ -119,13 +119,6 @@ everything in scripts:
|
||||
## Registering MCP service
|
||||
put the links in `configs/mcp_config.json`
|
||||
|
||||
|
||||
## Graph structure
|
||||
Graph structure:
|
||||
|
||||

|
||||
We choose this structure to overcome a limitation in xiaozhi. Specifically, both normal chatting and tool use prompts are deligated to one model. That leads to degregation in quality of generated conversation and tool use. By splitting into two model, we effectively increase the prompt limit size while preserving model quality.
|
||||
|
||||
## Modifying LLM prompts
|
||||
Refer to model above when modifying the prompts.
|
||||
they are in `configs/route_sys_prompts`
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from typing import Dict, List, Optional
|
||||
import commentjson
|
||||
import os
|
||||
import os.path as osp
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
@@ -15,12 +17,17 @@ sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
|
||||
from lang_agent.config.db_config_manager import DBConfigManager
|
||||
from lang_agent.front_api.build_server import GRAPH_BUILD_FNCS
|
||||
|
||||
_PROJECT_ROOT = osp.dirname(osp.dirname(osp.abspath(__file__)))
|
||||
_MCP_CONFIG_PATH = osp.join(_PROJECT_ROOT, "configs", "mcp_config.json")
|
||||
_MCP_CONFIG_DEFAULT_CONTENT = "{\n}\n"
|
||||
|
||||
class GraphConfigUpsertRequest(BaseModel):
|
||||
graph_id: str
|
||||
pipeline_id: str
|
||||
prompt_set_id: Optional[str] = Field(default=None)
|
||||
tool_keys: List[str] = Field(default_factory=list)
|
||||
prompt_dict: Dict[str, str] = Field(default_factory=dict)
|
||||
api_key: Optional[str] = Field(default=None)
|
||||
|
||||
class GraphConfigUpsertResponse(BaseModel):
|
||||
graph_id: str
|
||||
@@ -28,6 +35,7 @@ class GraphConfigUpsertResponse(BaseModel):
|
||||
prompt_set_id: str
|
||||
tool_keys: List[str]
|
||||
prompt_keys: List[str]
|
||||
api_key: str
|
||||
|
||||
class GraphConfigReadResponse(BaseModel):
|
||||
graph_id: Optional[str] = Field(default=None)
|
||||
@@ -35,6 +43,7 @@ class GraphConfigReadResponse(BaseModel):
|
||||
prompt_set_id: str
|
||||
tool_keys: List[str]
|
||||
prompt_dict: Dict[str, str]
|
||||
api_key: str = Field(default="")
|
||||
|
||||
class GraphConfigListItem(BaseModel):
|
||||
graph_id: Optional[str] = Field(default=None)
|
||||
@@ -44,6 +53,7 @@ class GraphConfigListItem(BaseModel):
|
||||
description: str
|
||||
is_active: bool
|
||||
tool_keys: List[str]
|
||||
api_key: str = Field(default="")
|
||||
created_at: Optional[str] = Field(default=None)
|
||||
updated_at: Optional[str] = Field(default=None)
|
||||
|
||||
@@ -59,6 +69,7 @@ class PipelineCreateRequest(BaseModel):
|
||||
prompt_set_id: str
|
||||
tool_keys: List[str] = Field(default_factory=list)
|
||||
port: int
|
||||
api_key: str
|
||||
entry_point: str = Field(default="fastapi_server/server_dashscope.py")
|
||||
llm_name: str = Field(default="qwen-plus")
|
||||
|
||||
@@ -70,6 +81,10 @@ class PipelineCreateResponse(BaseModel):
|
||||
prompt_set_id: str
|
||||
url: str
|
||||
port: int
|
||||
auth_type: str
|
||||
auth_header_name: str
|
||||
auth_key_once: str
|
||||
auth_key_masked: str
|
||||
|
||||
class PipelineRunInfo(BaseModel):
|
||||
run_id: str
|
||||
@@ -79,6 +94,9 @@ class PipelineRunInfo(BaseModel):
|
||||
prompt_set_id: str
|
||||
url: str
|
||||
port: int
|
||||
auth_type: str
|
||||
auth_header_name: str
|
||||
auth_key_masked: str
|
||||
|
||||
class PipelineListResponse(BaseModel):
|
||||
items: List[PipelineRunInfo]
|
||||
@@ -88,6 +106,19 @@ class PipelineStopResponse(BaseModel):
|
||||
run_id: str
|
||||
status: str
|
||||
|
||||
class McpConfigReadResponse(BaseModel):
|
||||
path: str
|
||||
raw_content: str
|
||||
tool_keys: List[str]
|
||||
|
||||
class McpConfigUpdateRequest(BaseModel):
|
||||
raw_content: str
|
||||
|
||||
class McpConfigUpdateResponse(BaseModel):
|
||||
status: str
|
||||
path: str
|
||||
tool_keys: List[str]
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="Front APIs",
|
||||
@@ -105,6 +136,18 @@ app.add_middleware(
|
||||
_db = DBConfigManager()
|
||||
_running_pipelines: Dict[str, Dict[str, object]] = {}
|
||||
|
||||
|
||||
def _generate_auth_key() -> str:
|
||||
return f"agk_{secrets.token_urlsafe(24)}"
|
||||
|
||||
|
||||
def _mask_auth_key(value: str) -> str:
|
||||
if not value:
|
||||
return ""
|
||||
if len(value) <= 10:
|
||||
return value
|
||||
return f"{value[:5]}...{value[-5:]}"
|
||||
|
||||
def _prune_stopped_pipelines() -> None:
|
||||
stale_ids: List[str] = []
|
||||
for run_id, info in _running_pipelines.items():
|
||||
@@ -135,10 +178,28 @@ async def root():
|
||||
"/v1/pipelines (POST)",
|
||||
"/v1/pipelines (GET)",
|
||||
"/v1/pipelines/{run_id} (DELETE)",
|
||||
"/v1/tool-configs/mcp (GET)",
|
||||
"/v1/tool-configs/mcp (PUT)",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _parse_mcp_tool_keys(raw_content: str) -> List[str]:
|
||||
parsed = commentjson.loads(raw_content or "{}")
|
||||
if not isinstance(parsed, dict):
|
||||
raise ValueError("mcp_config must be a JSON object at top level")
|
||||
return sorted(str(key) for key in parsed.keys())
|
||||
|
||||
|
||||
def _read_mcp_config_raw() -> str:
|
||||
if not osp.exists(_MCP_CONFIG_PATH):
|
||||
os.makedirs(osp.dirname(_MCP_CONFIG_PATH), exist_ok=True)
|
||||
with open(_MCP_CONFIG_PATH, "w", encoding="utf-8") as f:
|
||||
f.write(_MCP_CONFIG_DEFAULT_CONTENT)
|
||||
with open(_MCP_CONFIG_PATH, "r", encoding="utf-8") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
@app.post("/v1/graph-configs", response_model=GraphConfigUpsertResponse)
|
||||
async def upsert_graph_config(body: GraphConfigUpsertRequest):
|
||||
try:
|
||||
@@ -148,6 +209,7 @@ async def upsert_graph_config(body: GraphConfigUpsertRequest):
|
||||
prompt_set_id=body.prompt_set_id,
|
||||
tool_list=body.tool_keys,
|
||||
prompt_dict=body.prompt_dict,
|
||||
api_key=body.api_key,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
@@ -160,6 +222,7 @@ async def upsert_graph_config(body: GraphConfigUpsertRequest):
|
||||
prompt_set_id=resolved_prompt_set_id,
|
||||
tool_keys=body.tool_keys,
|
||||
prompt_keys=list(body.prompt_dict.keys()),
|
||||
api_key=(body.api_key or "").strip(),
|
||||
)
|
||||
|
||||
@app.get("/v1/graph-configs", response_model=GraphConfigListResponse)
|
||||
@@ -201,6 +264,7 @@ async def get_default_graph_config(pipeline_id: str):
|
||||
prompt_set_id=active["prompt_set_id"],
|
||||
tool_keys=tool_keys,
|
||||
prompt_dict=prompt_dict,
|
||||
api_key=(active.get("api_key") or ""),
|
||||
)
|
||||
|
||||
@app.get("/v1/graphs/{graph_id}/default-config", response_model=GraphConfigReadResponse)
|
||||
@@ -233,6 +297,7 @@ async def get_graph_config(pipeline_id: str, prompt_set_id: str):
|
||||
prompt_set_id=prompt_set_id,
|
||||
tool_keys=tool_keys,
|
||||
prompt_dict=prompt_dict,
|
||||
api_key=(meta.get("api_key") or ""),
|
||||
)
|
||||
|
||||
|
||||
@@ -256,6 +321,40 @@ async def delete_graph_config(pipeline_id: str, prompt_set_id: str):
|
||||
async def available_graphs():
|
||||
return {"available_graphs": sorted(GRAPH_BUILD_FNCS.keys())}
|
||||
|
||||
@app.get("/v1/tool-configs/mcp", response_model=McpConfigReadResponse)
|
||||
async def get_mcp_tool_config():
|
||||
try:
|
||||
raw_content = _read_mcp_config_raw()
|
||||
tool_keys = _parse_mcp_tool_keys(raw_content)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
return McpConfigReadResponse(
|
||||
path=_MCP_CONFIG_PATH,
|
||||
raw_content=raw_content,
|
||||
tool_keys=tool_keys,
|
||||
)
|
||||
|
||||
|
||||
@app.put("/v1/tool-configs/mcp", response_model=McpConfigUpdateResponse)
|
||||
async def update_mcp_tool_config(body: McpConfigUpdateRequest):
|
||||
try:
|
||||
tool_keys = _parse_mcp_tool_keys(body.raw_content)
|
||||
os.makedirs(osp.dirname(_MCP_CONFIG_PATH), exist_ok=True)
|
||||
with open(_MCP_CONFIG_PATH, "w", encoding="utf-8") as f:
|
||||
# Keep user formatting/comments as entered while ensuring trailing newline.
|
||||
f.write(body.raw_content.rstrip() + "\n")
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
return McpConfigUpdateResponse(
|
||||
status="updated",
|
||||
path=_MCP_CONFIG_PATH,
|
||||
tool_keys=tool_keys,
|
||||
)
|
||||
|
||||
@app.get("/v1/pipelines", response_model=PipelineListResponse)
|
||||
async def list_running_pipelines():
|
||||
_prune_stopped_pipelines()
|
||||
@@ -268,6 +367,9 @@ async def list_running_pipelines():
|
||||
prompt_set_id=info["prompt_set_id"],
|
||||
url=info["url"],
|
||||
port=info["port"],
|
||||
auth_type="bearer",
|
||||
auth_header_name="Authorization",
|
||||
auth_key_masked=info.get("auth_key_masked", ""),
|
||||
)
|
||||
for run_id, info in _running_pipelines.items()
|
||||
]
|
||||
@@ -283,12 +385,16 @@ async def create_pipeline(body: PipelineCreateRequest):
|
||||
detail=f"Unknown graph_id '{body.graph_id}'. Valid options: {sorted(GRAPH_BUILD_FNCS.keys())}",
|
||||
)
|
||||
|
||||
auth_key = _generate_auth_key()
|
||||
auth_key_masked = _mask_auth_key(auth_key)
|
||||
try:
|
||||
proc, url = build_fn(
|
||||
pipeline_id=body.pipeline_id,
|
||||
prompt_set=body.prompt_set_id,
|
||||
tool_keys=body.tool_keys,
|
||||
port=str(body.port),
|
||||
api_key=body.api_key,
|
||||
fast_auth_keys=auth_key,
|
||||
entry_pnt=body.entry_point,
|
||||
llm_name=body.llm_name,
|
||||
)
|
||||
@@ -303,6 +409,7 @@ async def create_pipeline(body: PipelineCreateRequest):
|
||||
"prompt_set_id": body.prompt_set_id,
|
||||
"url": url,
|
||||
"port": body.port,
|
||||
"auth_key_masked": auth_key_masked,
|
||||
}
|
||||
|
||||
return PipelineCreateResponse(
|
||||
@@ -313,6 +420,10 @@ async def create_pipeline(body: PipelineCreateRequest):
|
||||
prompt_set_id=body.prompt_set_id,
|
||||
url=url,
|
||||
port=body.port,
|
||||
auth_type="bearer",
|
||||
auth_header_name="Authorization",
|
||||
auth_key_once=auth_key,
|
||||
auth_key_masked=auth_key_masked,
|
||||
)
|
||||
|
||||
@app.delete("/v1/pipelines/{run_id}", response_model=PipelineStopResponse)
|
||||
|
||||
@@ -4,10 +4,12 @@ import {
|
||||
deleteGraphConfig,
|
||||
getGraphConfig,
|
||||
getGraphDefaultConfig,
|
||||
getMcpToolConfig,
|
||||
listAvailableGraphs,
|
||||
listGraphConfigs,
|
||||
listPipelines,
|
||||
stopPipeline,
|
||||
updateMcpToolConfig,
|
||||
upsertGraphConfig,
|
||||
} from "./api/frontApis";
|
||||
import type {
|
||||
@@ -25,12 +27,24 @@ type EditableAgent = {
|
||||
toolKeys: string[];
|
||||
prompts: Record<string, string>;
|
||||
port: number;
|
||||
apiKey: string;
|
||||
llmName: string;
|
||||
};
|
||||
|
||||
type LaunchCredentials = {
|
||||
url: string;
|
||||
authType: string;
|
||||
authHeaderName: string;
|
||||
authKey: string;
|
||||
authKeyMasked: string;
|
||||
};
|
||||
|
||||
type ActiveTab = "agents" | "mcp";
|
||||
|
||||
const DEFAULT_ENTRY_POINT = "fastapi_server/server_dashscope.py";
|
||||
const DEFAULT_LLM_NAME = "qwen-plus";
|
||||
const DEFAULT_PORT = 8100;
|
||||
const DEFAULT_API_KEY = "";
|
||||
const GRAPH_ARCH_IMAGE_MODULES = import.meta.glob(
|
||||
"../assets/images/graph_arch/*.{png,jpg,jpeg,webp,gif}",
|
||||
{ eager: true, import: "default" }
|
||||
@@ -64,6 +78,17 @@ function parseToolCsv(value: string): string[] {
|
||||
return out;
|
||||
}
|
||||
|
||||
function maskSecretPreview(value: string): string {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) {
|
||||
return "";
|
||||
}
|
||||
if (trimmed.length <= 10) {
|
||||
return trimmed;
|
||||
}
|
||||
return `${trimmed.slice(0, 5)}...${trimmed.slice(-5)}`;
|
||||
}
|
||||
|
||||
function getGraphArchImage(graphId: string): string | null {
|
||||
const normalizedGraphId = graphId.trim().toLowerCase();
|
||||
for (const [path, source] of Object.entries(GRAPH_ARCH_IMAGE_MODULES)) {
|
||||
@@ -91,11 +116,13 @@ function toEditable(
|
||||
toolKeys: config.tool_keys || [],
|
||||
prompts: config.prompt_dict || {},
|
||||
port: DEFAULT_PORT,
|
||||
apiKey: config.api_key || DEFAULT_API_KEY,
|
||||
llmName: DEFAULT_LLM_NAME,
|
||||
};
|
||||
}
|
||||
|
||||
export default function App() {
|
||||
const [activeTab, setActiveTab] = useState<ActiveTab>("agents");
|
||||
const [graphs, setGraphs] = useState<string[]>([]);
|
||||
const [configItems, setConfigItems] = useState<GraphConfigListItem[]>([]);
|
||||
const [running, setRunning] = useState<PipelineRunInfo[]>([]);
|
||||
@@ -103,6 +130,10 @@ export default function App() {
|
||||
const [selectedId, setSelectedId] = useState<string | null>(null);
|
||||
const [editor, setEditor] = useState<EditableAgent | null>(null);
|
||||
const [statusMessage, setStatusMessage] = useState<string>("");
|
||||
const [launchCredentials, setLaunchCredentials] = useState<LaunchCredentials | null>(null);
|
||||
const [mcpConfigPath, setMcpConfigPath] = useState<string>("");
|
||||
const [mcpConfigRaw, setMcpConfigRaw] = useState<string>("");
|
||||
const [mcpToolKeys, setMcpToolKeys] = useState<string[]>([]);
|
||||
const [busy, setBusy] = useState(false);
|
||||
|
||||
const configKeySet = useMemo(
|
||||
@@ -185,6 +216,16 @@ export default function App() {
|
||||
}
|
||||
}, [selectedId, configKeySet]);
|
||||
|
||||
useEffect(() => {
|
||||
if (activeTab !== "mcp") {
|
||||
return;
|
||||
}
|
||||
if (mcpConfigRaw) {
|
||||
return;
|
||||
}
|
||||
reloadMcpConfig().catch(() => undefined);
|
||||
}, [activeTab]);
|
||||
|
||||
async function selectExisting(item: GraphConfigListItem): Promise<void> {
|
||||
const id = makeAgentKey(item.pipeline_id, item.prompt_set_id);
|
||||
setSelectedId(id);
|
||||
@@ -196,6 +237,7 @@ export default function App() {
|
||||
editable.id = id;
|
||||
editable.port = editor?.pipelineId === editable.pipelineId ? editor.port : DEFAULT_PORT;
|
||||
editable.llmName = editor?.pipelineId === editable.pipelineId ? editor.llmName : DEFAULT_LLM_NAME;
|
||||
// apiKey is loaded from backend (persisted in DB) — don't override with default
|
||||
setEditor(editable);
|
||||
setStatusMessage("");
|
||||
} catch (error) {
|
||||
@@ -292,10 +334,42 @@ export default function App() {
|
||||
prompt_set_id: "default",
|
||||
tool_keys: [],
|
||||
prompt_dict: fallbackPrompts,
|
||||
api_key: "",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async function reloadMcpConfig(): Promise<void> {
|
||||
setBusy(true);
|
||||
setStatusMessage("Loading MCP config...");
|
||||
try {
|
||||
const resp = await getMcpToolConfig();
|
||||
setMcpConfigPath(resp.path || "");
|
||||
setMcpConfigRaw(resp.raw_content || "");
|
||||
setMcpToolKeys(resp.tool_keys || []);
|
||||
setStatusMessage("MCP config loaded.");
|
||||
} catch (error) {
|
||||
setStatusMessage((error as Error).message);
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function saveMcpConfig(): Promise<void> {
|
||||
setBusy(true);
|
||||
setStatusMessage("Saving MCP config...");
|
||||
try {
|
||||
const resp = await updateMcpToolConfig({ raw_content: mcpConfigRaw });
|
||||
setMcpConfigPath(resp.path || "");
|
||||
setMcpToolKeys(resp.tool_keys || []);
|
||||
setStatusMessage("MCP config saved.");
|
||||
} catch (error) {
|
||||
setStatusMessage((error as Error).message);
|
||||
} finally {
|
||||
setBusy(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function saveConfig(): Promise<void> {
|
||||
if (!editor) {
|
||||
return;
|
||||
@@ -327,6 +401,7 @@ export default function App() {
|
||||
prompt_set_id: editor.promptSetId,
|
||||
tool_keys: editor.toolKeys,
|
||||
prompt_dict: editor.prompts,
|
||||
api_key: editor.apiKey.trim(),
|
||||
});
|
||||
|
||||
await refreshConfigs();
|
||||
@@ -334,6 +409,7 @@ export default function App() {
|
||||
const saved = toEditable(detail, false);
|
||||
saved.id = makeAgentKey(upsertResp.pipeline_id, upsertResp.prompt_set_id);
|
||||
saved.port = editor.port;
|
||||
// apiKey is loaded from backend (persisted in DB) — don't override
|
||||
saved.llmName = editor.llmName;
|
||||
setEditor(saved);
|
||||
setSelectedId(saved.id);
|
||||
@@ -389,9 +465,14 @@ export default function App() {
|
||||
setStatusMessage("port must be a positive integer.");
|
||||
return;
|
||||
}
|
||||
if (!editor.apiKey.trim()) {
|
||||
setStatusMessage("api_key is required before run.");
|
||||
return;
|
||||
}
|
||||
|
||||
setBusy(true);
|
||||
setStatusMessage("Starting agent...");
|
||||
setLaunchCredentials(null);
|
||||
try {
|
||||
const resp = await createPipeline({
|
||||
graph_id: editor.graphId,
|
||||
@@ -399,11 +480,19 @@ export default function App() {
|
||||
prompt_set_id: editor.promptSetId,
|
||||
tool_keys: editor.toolKeys,
|
||||
port: editor.port,
|
||||
api_key: editor.apiKey.trim(),
|
||||
entry_point: DEFAULT_ENTRY_POINT,
|
||||
llm_name: editor.llmName,
|
||||
});
|
||||
await refreshRunning();
|
||||
setStatusMessage(`Agent started. URL: ${resp.url}`);
|
||||
setLaunchCredentials({
|
||||
url: resp.url,
|
||||
authType: resp.auth_type,
|
||||
authHeaderName: resp.auth_header_name,
|
||||
authKey: resp.auth_key_once,
|
||||
authKeyMasked: resp.auth_key_masked,
|
||||
});
|
||||
} catch (error) {
|
||||
setStatusMessage((error as Error).message);
|
||||
} finally {
|
||||
@@ -449,186 +538,321 @@ export default function App() {
|
||||
})),
|
||||
];
|
||||
const graphArchImage = editor ? getGraphArchImage(editor.graphId) : null;
|
||||
const authHeaderValue = launchCredentials
|
||||
? `${launchCredentials.authHeaderName}: Bearer ${launchCredentials.authKey}`
|
||||
: "";
|
||||
const canUseClipboard = typeof navigator !== "undefined" && Boolean(navigator.clipboard);
|
||||
|
||||
async function copyText(text: string, label: string): Promise<void> {
|
||||
if (!canUseClipboard) {
|
||||
setStatusMessage(`Clipboard is not available. Please copy ${label} manually.`);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await navigator.clipboard.writeText(text);
|
||||
setStatusMessage(`${label} copied.`);
|
||||
} catch {
|
||||
setStatusMessage(`Failed to copy ${label}.`);
|
||||
}
|
||||
}
|
||||
|
||||
const showSidebar = activeTab === "agents";
|
||||
|
||||
return (
|
||||
<div className="app">
|
||||
<aside className="sidebar">
|
||||
<div className="sidebar-header">
|
||||
<h2>Agents</h2>
|
||||
<button onClick={addDraftAgent} disabled={busy}>
|
||||
+ New
|
||||
</button>
|
||||
</div>
|
||||
<div className="agent-list">
|
||||
{rows.map((row) => (
|
||||
<button
|
||||
key={row.id}
|
||||
className={`agent-item ${selectedId === row.id ? "selected" : ""}`}
|
||||
onClick={() => {
|
||||
if (row.isDraft) {
|
||||
const selectedDraft = draftAgents.find((d) => d.id === row.id) || null;
|
||||
setSelectedId(row.id);
|
||||
setEditor(selectedDraft);
|
||||
return;
|
||||
}
|
||||
const item = visibleConfigItems.find(
|
||||
(x) => makeAgentKey(x.pipeline_id, x.prompt_set_id) === row.id
|
||||
);
|
||||
if (item) {
|
||||
selectExisting(item);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<span>{row.label}</span>
|
||||
<small>{row.graphId}</small>
|
||||
<div className={`app ${showSidebar ? "" : "full-width"}`}>
|
||||
{showSidebar ? (
|
||||
<aside className="sidebar">
|
||||
<div className="sidebar-header">
|
||||
<h2>Agents</h2>
|
||||
<button onClick={addDraftAgent} disabled={busy}>
|
||||
+ New
|
||||
</button>
|
||||
))}
|
||||
{rows.length === 0 ? <p className="empty">No agents configured yet.</p> : null}
|
||||
</div>
|
||||
</aside>
|
||||
</div>
|
||||
<div className="agent-list">
|
||||
{rows.map((row) => (
|
||||
<button
|
||||
key={row.id}
|
||||
className={`agent-item ${selectedId === row.id ? "selected" : ""}`}
|
||||
onClick={() => {
|
||||
if (row.isDraft) {
|
||||
const selectedDraft = draftAgents.find((d) => d.id === row.id) || null;
|
||||
setSelectedId(row.id);
|
||||
setEditor(selectedDraft);
|
||||
return;
|
||||
}
|
||||
const item = visibleConfigItems.find(
|
||||
(x) => makeAgentKey(x.pipeline_id, x.prompt_set_id) === row.id
|
||||
);
|
||||
if (item) {
|
||||
selectExisting(item);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<span>{row.label}</span>
|
||||
<small>{row.graphId}</small>
|
||||
</button>
|
||||
))}
|
||||
{rows.length === 0 ? <p className="empty">No agents configured yet.</p> : null}
|
||||
</div>
|
||||
</aside>
|
||||
) : null}
|
||||
|
||||
<main className="content">
|
||||
<header className="content-header">
|
||||
<h1>Agent Configuration</h1>
|
||||
<div className="header-actions">
|
||||
<button onClick={saveConfig} disabled={busy || !editor}>
|
||||
Save
|
||||
<h1>Agent Manager</h1>
|
||||
<div className="tabs">
|
||||
<button
|
||||
type="button"
|
||||
className={`tab-button ${activeTab === "agents" ? "active" : ""}`}
|
||||
onClick={() => setActiveTab("agents")}
|
||||
disabled={busy}
|
||||
>
|
||||
Agents
|
||||
</button>
|
||||
<button onClick={runSelected} disabled={busy || !editor}>
|
||||
Run
|
||||
</button>
|
||||
<button onClick={stopSelected} disabled={busy || !editor}>
|
||||
Stop
|
||||
</button>
|
||||
<button onClick={deleteSelected} disabled={busy || !editor}>
|
||||
Delete
|
||||
<button
|
||||
type="button"
|
||||
className={`tab-button ${activeTab === "mcp" ? "active" : ""}`}
|
||||
onClick={() => setActiveTab("mcp")}
|
||||
disabled={busy}
|
||||
>
|
||||
MCP Config
|
||||
</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
{statusMessage ? <p className="status">{statusMessage}</p> : null}
|
||||
|
||||
{!editor ? (
|
||||
<div className="empty-panel">
|
||||
<p>Select an agent from the left or create a new one.</p>
|
||||
</div>
|
||||
) : (
|
||||
<section className="form-grid">
|
||||
<label>
|
||||
Agent Type (graph_id)
|
||||
<select
|
||||
value={editor.graphId}
|
||||
onChange={(e) => changeGraph(e.target.value)}
|
||||
disabled={busy}
|
||||
>
|
||||
{graphs.map((graph) => (
|
||||
<option key={graph} value={graph}>
|
||||
{graph}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</label>
|
||||
|
||||
{graphArchImage && (
|
||||
<div className="graph-arch-section">
|
||||
<h3>Graph Architecture</h3>
|
||||
<div className="graph-arch-image-container">
|
||||
<img
|
||||
src={graphArchImage}
|
||||
alt={`${editor.graphId} architecture diagram`}
|
||||
className="graph-arch-image"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<label>
|
||||
pipeline_id
|
||||
<input
|
||||
value={editor.pipelineId}
|
||||
onChange={(e) => updateEditor("pipelineId", e.target.value)}
|
||||
placeholder="example: routing-agent-1"
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
prompt_set_id
|
||||
<input value={editor.promptSetId || "(assigned on save)"} readOnly />
|
||||
</label>
|
||||
|
||||
<label>
|
||||
tool_keys (comma separated)
|
||||
<input
|
||||
value={editor.toolKeys.join(", ")}
|
||||
onChange={(e) => updateEditor("toolKeys", parseToolCsv(e.target.value))}
|
||||
placeholder="tool_a, tool_b"
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
port
|
||||
<input
|
||||
type="number"
|
||||
min={1}
|
||||
value={editor.port}
|
||||
onChange={(e) => updateEditor("port", Number(e.target.value))}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
llm_name
|
||||
<input
|
||||
value={editor.llmName}
|
||||
onChange={(e) => updateEditor("llmName", e.target.value)}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<div className="prompt-section">
|
||||
<h3>Prompts</h3>
|
||||
{Object.keys(editor.prompts).length === 0 ? (
|
||||
<p className="empty">No prompt keys returned from backend.</p>
|
||||
) : (
|
||||
Object.entries(editor.prompts).map(([key, value]) => (
|
||||
<label key={key}>
|
||||
{key}
|
||||
<textarea
|
||||
value={value}
|
||||
onChange={(e) => updatePrompt(key, e.target.value)}
|
||||
rows={4}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
))
|
||||
)}
|
||||
{activeTab === "agents" ? (
|
||||
<div className="tab-pane">
|
||||
<div className="header-actions">
|
||||
<button onClick={saveConfig} disabled={busy || !editor}>
|
||||
Save
|
||||
</button>
|
||||
<button onClick={runSelected} disabled={busy || !editor}>
|
||||
Run
|
||||
</button>
|
||||
<button onClick={stopSelected} disabled={busy || !editor}>
|
||||
Stop
|
||||
</button>
|
||||
<button onClick={deleteSelected} disabled={busy || !editor}>
|
||||
Delete
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="run-info">
|
||||
<h3>Running Instances</h3>
|
||||
{selectedRuns.length === 0 ? (
|
||||
<p className="empty">No active runs for this agent.</p>
|
||||
) : (
|
||||
selectedRuns.map((run) => (
|
||||
<div key={run.run_id} className="run-card">
|
||||
<div>
|
||||
<strong>run_id:</strong> {run.run_id}
|
||||
</div>
|
||||
<div>
|
||||
<strong>pid:</strong> {run.pid}
|
||||
</div>
|
||||
<div>
|
||||
<strong>url:</strong>{" "}
|
||||
<a href={run.url} target="_blank" rel="noreferrer">
|
||||
{run.url}
|
||||
</a>
|
||||
{launchCredentials ? (
|
||||
<div className="launch-credentials">
|
||||
<h3>Access Credentials (shown once)</h3>
|
||||
<div>
|
||||
<strong>URL:</strong>{" "}
|
||||
<a href={launchCredentials.url} target="_blank" rel="noreferrer">
|
||||
{launchCredentials.url}
|
||||
</a>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => copyText(launchCredentials.url, "URL")}
|
||||
disabled={busy}
|
||||
>
|
||||
Copy URL
|
||||
</button>
|
||||
</div>
|
||||
<div>
|
||||
<strong>{launchCredentials.authType} key:</strong> {launchCredentials.authKey}
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => copyText(launchCredentials.authKey, "auth key")}
|
||||
disabled={busy}
|
||||
>
|
||||
Copy Key
|
||||
</button>
|
||||
</div>
|
||||
<div>
|
||||
<strong>Header:</strong> <code>{authHeaderValue}</code>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => copyText(authHeaderValue, "auth header")}
|
||||
disabled={busy}
|
||||
>
|
||||
Copy Header
|
||||
</button>
|
||||
</div>
|
||||
<p className="empty">
|
||||
Stored after launch as masked value: {launchCredentials.authKeyMasked}
|
||||
</p>
|
||||
</div>
|
||||
) : null}
|
||||
|
||||
{!editor ? (
|
||||
<div className="empty-panel">
|
||||
<p>Select an agent from the left or create a new one.</p>
|
||||
</div>
|
||||
) : (
|
||||
<section className="form-grid">
|
||||
<label>
|
||||
Agent Type (graph_id)
|
||||
<select
|
||||
value={editor.graphId}
|
||||
onChange={(e) => changeGraph(e.target.value)}
|
||||
disabled={busy}
|
||||
>
|
||||
{graphs.map((graph) => (
|
||||
<option key={graph} value={graph}>
|
||||
{graph}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</label>
|
||||
|
||||
{graphArchImage && (
|
||||
<div className="graph-arch-section">
|
||||
<h3>Graph Architecture</h3>
|
||||
<div className="graph-arch-image-container">
|
||||
<img
|
||||
src={graphArchImage}
|
||||
alt={`${editor.graphId} architecture diagram`}
|
||||
className="graph-arch-image"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
)}
|
||||
|
||||
<label>
|
||||
pipeline_id
|
||||
<input
|
||||
value={editor.pipelineId}
|
||||
onChange={(e) => updateEditor("pipelineId", e.target.value)}
|
||||
placeholder="example: routing-agent-1"
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
prompt_set_id
|
||||
<input value={editor.promptSetId || "(assigned on save)"} readOnly />
|
||||
</label>
|
||||
|
||||
<label>
|
||||
tool_keys (comma separated)
|
||||
<input
|
||||
value={editor.toolKeys.join(", ")}
|
||||
onChange={(e) => updateEditor("toolKeys", parseToolCsv(e.target.value))}
|
||||
placeholder="tool_a, tool_b"
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
port
|
||||
<input
|
||||
type="number"
|
||||
min={1}
|
||||
value={editor.port}
|
||||
onChange={(e) => updateEditor("port", Number(e.target.value))}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<label>
|
||||
api_key
|
||||
<input
|
||||
type="password"
|
||||
value={editor.apiKey}
|
||||
onChange={(e) => updateEditor("apiKey", e.target.value)}
|
||||
placeholder="Enter provider API key"
|
||||
disabled={busy}
|
||||
/>
|
||||
{editor.apiKey ? (
|
||||
<small className="empty">Preview: {maskSecretPreview(editor.apiKey)}</small>
|
||||
) : null}
|
||||
</label>
|
||||
|
||||
<label>
|
||||
llm_name
|
||||
<input
|
||||
value={editor.llmName}
|
||||
onChange={(e) => updateEditor("llmName", e.target.value)}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
|
||||
<div className="prompt-section">
|
||||
<h3>Prompts</h3>
|
||||
{Object.keys(editor.prompts).length === 0 ? (
|
||||
<p className="empty">No prompt keys returned from backend.</p>
|
||||
) : (
|
||||
Object.entries(editor.prompts).map(([key, value]) => (
|
||||
<label key={key}>
|
||||
{key}
|
||||
<textarea
|
||||
value={value}
|
||||
onChange={(e) => updatePrompt(key, e.target.value)}
|
||||
rows={4}
|
||||
disabled={busy}
|
||||
/>
|
||||
</label>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="run-info">
|
||||
<h3>Running Instances</h3>
|
||||
{selectedRuns.length === 0 ? (
|
||||
<p className="empty">No active runs for this agent.</p>
|
||||
) : (
|
||||
selectedRuns.map((run) => (
|
||||
<div key={run.run_id} className="run-card">
|
||||
<div>
|
||||
<strong>run_id:</strong> {run.run_id}
|
||||
</div>
|
||||
<div>
|
||||
<strong>pid:</strong> {run.pid}
|
||||
</div>
|
||||
<div>
|
||||
<strong>url:</strong>{" "}
|
||||
<a href={run.url} target="_blank" rel="noreferrer">
|
||||
{run.url}
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<strong>auth:</strong> {run.auth_header_name} Bearer {run.auth_key_masked}
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</section>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<section className="mcp-config-section tab-pane">
|
||||
<div className="mcp-config-header">
|
||||
<h3>Edit MCP Tool Options</h3>
|
||||
<div className="header-actions">
|
||||
<button type="button" onClick={reloadMcpConfig} disabled={busy}>
|
||||
Reload
|
||||
</button>
|
||||
<button type="button" onClick={saveMcpConfig} disabled={busy}>
|
||||
Save
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<p className="empty">
|
||||
This tab edits <code>configs/mcp_config.json</code> directly (comments supported).
|
||||
</p>
|
||||
{mcpConfigPath ? (
|
||||
<p className="empty">
|
||||
File: <code>{mcpConfigPath}</code>
|
||||
</p>
|
||||
) : null}
|
||||
<p className="empty">
|
||||
Tool options detected: {mcpToolKeys.length ? mcpToolKeys.join(", ") : "(none)"}
|
||||
</p>
|
||||
<textarea
|
||||
className="mcp-config-editor"
|
||||
value={mcpConfigRaw}
|
||||
onChange={(e) => setMcpConfigRaw(e.target.value)}
|
||||
rows={18}
|
||||
spellCheck={false}
|
||||
disabled={busy}
|
||||
/>
|
||||
</section>
|
||||
)}
|
||||
</main>
|
||||
|
||||
@@ -4,6 +4,9 @@ import type {
|
||||
GraphConfigReadResponse,
|
||||
GraphConfigUpsertRequest,
|
||||
GraphConfigUpsertResponse,
|
||||
McpToolConfigResponse,
|
||||
McpToolConfigUpdateRequest,
|
||||
McpToolConfigUpdateResponse,
|
||||
PipelineCreateRequest,
|
||||
PipelineCreateResponse,
|
||||
PipelineListResponse,
|
||||
@@ -85,6 +88,19 @@ export function deleteGraphConfig(
|
||||
});
|
||||
}
|
||||
|
||||
export function getMcpToolConfig(): Promise<McpToolConfigResponse> {
|
||||
return fetchJson("/v1/tool-configs/mcp");
|
||||
}
|
||||
|
||||
export function updateMcpToolConfig(
|
||||
payload: McpToolConfigUpdateRequest
|
||||
): Promise<McpToolConfigUpdateResponse> {
|
||||
return fetchJson("/v1/tool-configs/mcp", {
|
||||
method: "PUT",
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
}
|
||||
|
||||
export function createPipeline(
|
||||
payload: PipelineCreateRequest
|
||||
): Promise<PipelineCreateResponse> {
|
||||
|
||||
@@ -24,6 +24,10 @@ body {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.app.full-width {
|
||||
grid-template-columns: 1fr;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
border-right: 1px solid #dbe2ea;
|
||||
background: #ffffff;
|
||||
@@ -94,6 +98,21 @@ button:disabled {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.tabs {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.tab-button {
|
||||
min-width: 120px;
|
||||
}
|
||||
|
||||
.tab-button.active {
|
||||
background: #edf3ff;
|
||||
border-color: #4d7ef3;
|
||||
color: #1a4fc5;
|
||||
}
|
||||
|
||||
.header-actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
@@ -108,10 +127,41 @@ button:disabled {
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.launch-credentials {
|
||||
background: #fff4df;
|
||||
border: 1px solid #f0d5a8;
|
||||
border-radius: 8px;
|
||||
margin-top: 12px;
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
.launch-credentials h3 {
|
||||
margin: 0 0 8px;
|
||||
}
|
||||
|
||||
.launch-credentials > div {
|
||||
align-items: center;
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: 8px;
|
||||
margin: 6px 0;
|
||||
}
|
||||
|
||||
.launch-credentials code {
|
||||
background: #fff;
|
||||
border: 1px solid #f0d5a8;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
}
|
||||
|
||||
.empty-panel {
|
||||
margin-top: 30px;
|
||||
}
|
||||
|
||||
.tab-pane {
|
||||
margin-top: 12px;
|
||||
}
|
||||
|
||||
.form-grid {
|
||||
display: grid;
|
||||
gap: 14px;
|
||||
@@ -179,6 +229,35 @@ button:disabled {
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.mcp-config-section {
|
||||
background: #f7fbff;
|
||||
border: 1px solid #d7e6f6;
|
||||
border-radius: 10px;
|
||||
padding: 12px;
|
||||
}
|
||||
|
||||
.mcp-config-header {
|
||||
align-items: center;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.mcp-config-header h3 {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.mcp-config-editor {
|
||||
border: 1px solid #c9d4e2;
|
||||
border-radius: 8px;
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
||||
font-size: 13px;
|
||||
margin-top: 8px;
|
||||
padding: 10px;
|
||||
resize: vertical;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.empty {
|
||||
color: #687788;
|
||||
margin: 6px 0;
|
||||
|
||||
@@ -6,6 +6,7 @@ export type GraphConfigListItem = {
|
||||
description: string;
|
||||
is_active: boolean;
|
||||
tool_keys: string[];
|
||||
api_key: string;
|
||||
created_at?: string | null;
|
||||
updated_at?: string | null;
|
||||
};
|
||||
@@ -21,6 +22,7 @@ export type GraphConfigReadResponse = {
|
||||
prompt_set_id: string;
|
||||
tool_keys: string[];
|
||||
prompt_dict: Record<string, string>;
|
||||
api_key: string;
|
||||
};
|
||||
|
||||
export type GraphConfigUpsertRequest = {
|
||||
@@ -29,6 +31,7 @@ export type GraphConfigUpsertRequest = {
|
||||
prompt_set_id?: string;
|
||||
tool_keys: string[];
|
||||
prompt_dict: Record<string, string>;
|
||||
api_key?: string;
|
||||
};
|
||||
|
||||
export type GraphConfigUpsertResponse = {
|
||||
@@ -37,6 +40,7 @@ export type GraphConfigUpsertResponse = {
|
||||
prompt_set_id: string;
|
||||
tool_keys: string[];
|
||||
prompt_keys: string[];
|
||||
api_key: string;
|
||||
};
|
||||
|
||||
export type AvailableGraphsResponse = {
|
||||
@@ -49,6 +53,7 @@ export type PipelineCreateRequest = {
|
||||
prompt_set_id: string;
|
||||
tool_keys: string[];
|
||||
port: number;
|
||||
api_key: string;
|
||||
entry_point: string;
|
||||
llm_name: string;
|
||||
};
|
||||
@@ -61,9 +66,14 @@ export type PipelineRunInfo = {
|
||||
prompt_set_id: string;
|
||||
url: string;
|
||||
port: number;
|
||||
auth_type: string;
|
||||
auth_header_name: string;
|
||||
auth_key_masked: string;
|
||||
};
|
||||
|
||||
export type PipelineCreateResponse = PipelineRunInfo;
|
||||
export type PipelineCreateResponse = PipelineRunInfo & {
|
||||
auth_key_once: string;
|
||||
};
|
||||
|
||||
export type PipelineListResponse = {
|
||||
items: PipelineRunInfo[];
|
||||
@@ -75,3 +85,19 @@ export type PipelineStopResponse = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type McpToolConfigResponse = {
|
||||
path: string;
|
||||
raw_content: string;
|
||||
tool_keys: string[];
|
||||
};
|
||||
|
||||
export type McpToolConfigUpdateRequest = {
|
||||
raw_content: string;
|
||||
};
|
||||
|
||||
export type McpToolConfigUpdateResponse = {
|
||||
status: string;
|
||||
path: string;
|
||||
tool_keys: string[];
|
||||
};
|
||||
|
||||
|
||||
@@ -1 +1,4 @@
|
||||
from lang_agent.config.core_config import InstantiateConfig, KeyConfig, ToolConfig, LLMKeyConfig
|
||||
from lang_agent.config.core_config import (InstantiateConfig,
|
||||
ToolConfig,
|
||||
LLMKeyConfig,
|
||||
LLMNodeConfig)
|
||||
@@ -1,5 +1,5 @@
|
||||
from dataclasses import dataclass, is_dataclass, fields, MISSING
|
||||
from typing import Any, Tuple, Type
|
||||
from typing import Any, Tuple, Type, Optional
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
@@ -125,11 +125,17 @@ class InstantiateConfig(PrintableConfig):
|
||||
def get_name(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeyConfig(InstantiateConfig):
|
||||
class LLMKeyConfig(InstantiateConfig):
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
|
||||
api_key:str = None
|
||||
"""api key for llm"""
|
||||
@@ -144,15 +150,16 @@ class KeyConfig(InstantiateConfig):
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMKeyConfig(KeyConfig):
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm"""
|
||||
class LLMNodeConfig(LLMKeyConfig):
|
||||
"""
|
||||
class is for LLM nodes that has system prompt config
|
||||
"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm"""
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with file fallback)"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -259,22 +266,4 @@ def ovewrite_config(loaded_conf, inp_conf):
|
||||
"""
|
||||
setattr(loaded_conf, field_name, new_value)
|
||||
|
||||
return loaded_conf
|
||||
|
||||
|
||||
def mcp_langchain_to_ws_config(conf:Dict[str, Dict[str, str]]):
|
||||
serv_conf = {}
|
||||
|
||||
for k, v in conf.items():
|
||||
|
||||
if v["transport"] == "stdio":
|
||||
serv_conf[k] = {
|
||||
"type" : v["transport"],
|
||||
"command": v["command"],
|
||||
"args": v["args"],
|
||||
}
|
||||
else:
|
||||
logger.warning(f"Unsupported transport {v['transport']} for MCP {k}. Skipping...")
|
||||
continue
|
||||
|
||||
return {"mcpServers":serv_conf}
|
||||
return loaded_conf
|
||||
@@ -28,7 +28,7 @@ class DBConfigManager:
|
||||
if pipeline_id and graph_id:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
|
||||
FROM prompt_sets
|
||||
WHERE pipeline_id = %s AND graph_id = %s
|
||||
ORDER BY updated_at DESC, created_at DESC
|
||||
@@ -38,7 +38,7 @@ class DBConfigManager:
|
||||
elif pipeline_id:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
|
||||
FROM prompt_sets
|
||||
WHERE pipeline_id = %s
|
||||
ORDER BY updated_at DESC, created_at DESC
|
||||
@@ -48,7 +48,7 @@ class DBConfigManager:
|
||||
elif graph_id:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
|
||||
FROM prompt_sets
|
||||
WHERE graph_id = %s
|
||||
ORDER BY updated_at DESC, created_at DESC
|
||||
@@ -58,7 +58,7 @@ class DBConfigManager:
|
||||
else:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
|
||||
FROM prompt_sets
|
||||
ORDER BY updated_at DESC, created_at DESC
|
||||
"""
|
||||
@@ -76,6 +76,7 @@ class DBConfigManager:
|
||||
"created_at": row["created_at"].isoformat() if row["created_at"] else None,
|
||||
"updated_at": row["updated_at"].isoformat() if row["updated_at"] else None,
|
||||
"tool_keys": self._parse_tool_list(row.get("list")),
|
||||
"api_key": row.get("api_key") or "",
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
@@ -88,7 +89,7 @@ class DBConfigManager:
|
||||
with conn.cursor(row_factory=dict_row) as cur:
|
||||
cur.execute(
|
||||
"""
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list
|
||||
SELECT id, pipeline_id, graph_id, name, description, is_active, created_at, updated_at, list, api_key
|
||||
FROM prompt_sets
|
||||
WHERE id = %s AND pipeline_id = %s
|
||||
""",
|
||||
@@ -109,6 +110,7 @@ class DBConfigManager:
|
||||
"created_at": row["created_at"].isoformat() if row["created_at"] else None,
|
||||
"updated_at": row["updated_at"].isoformat() if row["updated_at"] else None,
|
||||
"tool_keys": self._parse_tool_list(row.get("list")),
|
||||
"api_key": row.get("api_key") or "",
|
||||
}
|
||||
|
||||
def get_config(
|
||||
@@ -160,6 +162,7 @@ class DBConfigManager:
|
||||
prompt_set_id: Optional[str],
|
||||
tool_list: Optional[Sequence[str]],
|
||||
prompt_dict: Optional[Mapping[str, str]],
|
||||
api_key: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Persist prompt + tool configuration.
|
||||
@@ -182,6 +185,7 @@ class DBConfigManager:
|
||||
|
||||
normalized_prompt_dict = self._normalize_prompt_dict(prompt_dict)
|
||||
tool_csv = self._join_tool_list(tool_list)
|
||||
normalized_api_key = self._normalize_api_key(api_key)
|
||||
|
||||
with psycopg.connect(self.conn_str) as conn:
|
||||
resolved_set_id, _ = self._resolve_prompt_set(
|
||||
@@ -200,10 +204,13 @@ class DBConfigManager:
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE prompt_sets
|
||||
SET list = %s, graph_id = COALESCE(%s, graph_id), updated_at = now()
|
||||
SET list = %s,
|
||||
graph_id = COALESCE(%s, graph_id),
|
||||
api_key = COALESCE(%s, api_key),
|
||||
updated_at = now()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(tool_csv, normalized_graph_id, resolved_set_id),
|
||||
(tool_csv, normalized_graph_id, normalized_api_key, resolved_set_id),
|
||||
)
|
||||
|
||||
keys = list(normalized_prompt_dict.keys())
|
||||
@@ -340,4 +347,9 @@ class DBConfigManager:
|
||||
if graph_id is None:
|
||||
return None
|
||||
value = str(graph_id).strip()
|
||||
return value or None
|
||||
return value or None
|
||||
|
||||
def _normalize_api_key(self, api_key: Optional[str]) -> Optional[str]:
|
||||
if api_key is None:
|
||||
return None
|
||||
return str(api_key).strip()
|
||||
@@ -3,7 +3,7 @@ from typing import Type, Callable, List
|
||||
import tyro
|
||||
import random
|
||||
|
||||
from lang_agent.config import KeyConfig
|
||||
from lang_agent.config import LLMKeyConfig
|
||||
from lang_agent.pipeline import Pipeline, PipelineConfig
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
@@ -11,7 +11,7 @@ from langchain_core.messages import BaseMessage, ToolMessage
|
||||
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class ValidatorConfig(KeyConfig):
|
||||
class ValidatorConfig(LLMKeyConfig):
|
||||
_target: Type = field(default_factory=lambda:Validator)
|
||||
|
||||
|
||||
@@ -34,9 +34,9 @@ class Validator:
|
||||
|
||||
def populate_modules(self):
|
||||
self.judge_llm = init_chat_model(
|
||||
model="qwen-plus",
|
||||
model_provider="openai",
|
||||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
model=self.config.llm_name,
|
||||
model_provider=self.config.llm_provider,
|
||||
base_url=self.config.base_url,
|
||||
api_key=self.config.api_key
|
||||
)
|
||||
|
||||
|
||||
@@ -1,45 +1,68 @@
|
||||
from typing import List
|
||||
from typing import Dict, List, Optional
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
def _build_template(graph:str,
|
||||
pipeline_id:str,
|
||||
prompt_set:str,
|
||||
tool_keys:List[str],
|
||||
port:str,
|
||||
entry_pnt:str="fastapi_server/server_dashscope.py",
|
||||
llm_name:str="qwen-plus"):
|
||||
cmd = [
|
||||
"python", entry_pnt,
|
||||
"--llm-name", llm_name,
|
||||
"--port", str(port),
|
||||
graph,
|
||||
"--pipeline-id", pipeline_id,
|
||||
"--prompt-set-id", prompt_set,
|
||||
]
|
||||
if tool_keys:
|
||||
cmd.extend(
|
||||
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
|
||||
)
|
||||
sv_prc = subprocess.Popen(cmd)
|
||||
|
||||
return sv_prc, f"http://0.0.0.0:{port}"
|
||||
|
||||
def build_route(pipeline_id:str,
|
||||
prompt_set:str,
|
||||
tool_keys:List[str],
|
||||
port:str,
|
||||
api_key: str,
|
||||
fast_auth_keys: Optional[str] = None,
|
||||
entry_pnt:str="fastapi_server/server_dashscope.py",
|
||||
llm_name:str="qwen-plus"):
|
||||
return _build_template("route", pipeline_id, prompt_set, tool_keys, port, entry_pnt, llm_name)
|
||||
cmd = [
|
||||
"python", entry_pnt,
|
||||
"--port", str(port),
|
||||
"route", # ------------
|
||||
"--llm-name", llm_name,
|
||||
"--api-key", api_key,
|
||||
"--pipeline-id", pipeline_id,
|
||||
"--prompt-set-id", prompt_set,
|
||||
"tool_node", # ------------
|
||||
"--llm-name", llm_name,
|
||||
"--api-key", api_key,
|
||||
"--pipeline-id", pipeline_id,
|
||||
"--prompt-set-id", prompt_set,
|
||||
]
|
||||
if tool_keys:
|
||||
cmd.extend(
|
||||
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
|
||||
)
|
||||
env: Dict[str, str] = os.environ.copy()
|
||||
if fast_auth_keys:
|
||||
env["FAST_AUTH_KEYS"] = fast_auth_keys
|
||||
sv_prc = subprocess.Popen(cmd, env=env)
|
||||
|
||||
return sv_prc, f"http://127.0.0.1:{port}/api/"
|
||||
|
||||
|
||||
def build_react(pipeline_id:str,
|
||||
prompt_set:str,
|
||||
tool_keys:List[str],
|
||||
port:str,
|
||||
api_key: str,
|
||||
fast_auth_keys: Optional[str] = None,
|
||||
entry_pnt:str="fastapi_server/server_dashscope.py",
|
||||
llm_name:str="qwen-plus"):
|
||||
return _build_template("react", pipeline_id, prompt_set, tool_keys, port, entry_pnt, llm_name)
|
||||
cmd = [
|
||||
"python", entry_pnt,
|
||||
"--port", str(port),
|
||||
"react", # ------------
|
||||
"--llm-name", llm_name,
|
||||
"--api-key", api_key,
|
||||
"--pipeline-id", pipeline_id,
|
||||
"--prompt-set-id", prompt_set,
|
||||
]
|
||||
if tool_keys:
|
||||
cmd.extend(
|
||||
["--tool-manager-config.client-tool-manager.tool-keys", *tool_keys]
|
||||
)
|
||||
env: Dict[str, str] = os.environ.copy()
|
||||
if fast_auth_keys:
|
||||
env["FAST_AUTH_KEYS"] = fast_auth_keys
|
||||
sv_prc = subprocess.Popen(cmd, env=env)
|
||||
|
||||
return sv_prc, f"http://127.0.0.1:{port}/api/"
|
||||
|
||||
# {pipeline_id: build_function}
|
||||
GRAPH_BUILD_FNCS = {
|
||||
|
||||
@@ -6,7 +6,7 @@ from loguru import logger
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
|
||||
from lang_agent.config import LLMKeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.base import GraphBase
|
||||
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
|
||||
from lang_agent.components.prompt_store import build_prompt_store
|
||||
@@ -48,15 +48,9 @@ TOOL_SYS_PROMPT = """根据用户的心情使用self_led_control改变灯的颜
|
||||
用户在描述梦境的时候用紫色。"""
|
||||
|
||||
@dataclass
|
||||
class DualConfig(LLMKeyConfig):
|
||||
class DualConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda:Dual)
|
||||
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with hardcoded fallback)"""
|
||||
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@@ -8,7 +8,7 @@ import time
|
||||
|
||||
from langchain.chat_models import init_chat_model
|
||||
|
||||
from lang_agent.config import LLMKeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.base import GraphBase
|
||||
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
|
||||
from lang_agent.graphs.graph_states import State
|
||||
@@ -45,7 +45,7 @@ TOOL_SYS_PROMPT = """You are a helpful helper and will use the self_led_control
|
||||
|
||||
|
||||
@dataclass
|
||||
class XiaoAiConfig(LLMKeyConfig):
|
||||
class XiaoAiConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda:XiaoAi)
|
||||
|
||||
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
|
||||
|
||||
@@ -4,7 +4,7 @@ import tyro
|
||||
import os.path as osp
|
||||
from loguru import logger
|
||||
|
||||
from lang_agent.config import KeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
|
||||
from lang_agent.components.prompt_store import build_prompt_store
|
||||
from lang_agent.base import GraphBase
|
||||
@@ -20,27 +20,12 @@ from langgraph.graph import StateGraph, START, END
|
||||
# NOTE: maybe make this into a base_graph_config?
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class ReactGraphConfig(KeyConfig):
|
||||
class ReactGraphConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: ReactGraph)
|
||||
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm"""
|
||||
|
||||
sys_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "prompts", "blueberry.txt")
|
||||
"""path to system prompt"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with file fallback)"""
|
||||
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
|
||||
|
||||
def __post_init__(self):
|
||||
|
||||
@@ -8,7 +8,7 @@ import commentjson
|
||||
import glob
|
||||
import time
|
||||
|
||||
from lang_agent.config import LLMKeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
|
||||
from lang_agent.components.prompt_store import build_prompt_store
|
||||
from lang_agent.base import GraphBase, ToolNodeBase
|
||||
@@ -27,27 +27,12 @@ from langgraph.checkpoint.memory import MemorySaver
|
||||
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class RoutingConfig(LLMKeyConfig):
|
||||
class RoutingConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: RoutingGraph)
|
||||
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
|
||||
sys_promp_dir: str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts")
|
||||
"""path to directory or json contantaining system prompt for graphs; Will overwrite systemprompt from xiaozhi if 'chat_prompt' is provided"""
|
||||
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with file fallback)"""
|
||||
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
tool_manager_config: ToolManagerConfig = field(default_factory=ToolManagerConfig)
|
||||
|
||||
tool_node_config: AnnotatedToolNode = field(default_factory=ToolNodeConfig)
|
||||
|
||||
@@ -6,7 +6,7 @@ import time
|
||||
import asyncio
|
||||
from loguru import logger
|
||||
|
||||
from lang_agent.config import InstantiateConfig, KeyConfig
|
||||
from lang_agent.config import InstantiateConfig, LLMNodeConfig
|
||||
from lang_agent.components.tool_manager import ToolManager
|
||||
from lang_agent.components.prompt_store import build_prompt_store
|
||||
from lang_agent.components.reit_llm import ReitLLM
|
||||
@@ -23,17 +23,11 @@ from langgraph.graph import StateGraph, START, END
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolNodeConfig(InstantiateConfig):
|
||||
class ToolNodeConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: ToolNode)
|
||||
|
||||
tool_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts", "tool_prompt.txt")
|
||||
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with file fallback)"""
|
||||
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
|
||||
class ToolNode(ToolNodeBase):
|
||||
def __init__(self, config: ToolNodeConfig,
|
||||
@@ -46,7 +40,9 @@ class ToolNode(ToolNodeBase):
|
||||
self.populate_modules()
|
||||
|
||||
def populate_modules(self):
|
||||
self.llm = make_llm(tags=["tool_llm"])
|
||||
self.llm = make_llm(model=self.config.llm_name,
|
||||
api_key=self.config.api_key,
|
||||
tags=["tool_llm"])
|
||||
|
||||
self.tool_agent = create_agent(self.llm, self.tool_manager.get_langchain_tools(), checkpointer=self.mem)
|
||||
self.prompt_store = build_prompt_store(
|
||||
@@ -85,23 +81,12 @@ class ToolNode(ToolNodeBase):
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChattyToolNodeConfig(KeyConfig, ToolNodeConfig):
|
||||
class ChattyToolNodeConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: ChattyToolNode)
|
||||
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
|
||||
chatty_sys_prompt_f:str = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), "configs", "route_sys_prompts", "chatty_prompt.txt")
|
||||
"""path to chatty system prompt"""
|
||||
|
||||
# pipeline_id and prompt_set_id are inherited from ToolNodeConfig
|
||||
|
||||
tool_node_conf:ToolNodeConfig = field(default_factory=ToolNodeConfig)
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import base64
|
||||
import json
|
||||
from loguru import logger
|
||||
|
||||
from lang_agent.config import LLMKeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.components.tool_manager import ToolManager, ToolManagerConfig
|
||||
from lang_agent.components.prompt_store import build_prompt_store
|
||||
from lang_agent.base import GraphBase, ToolNodeBase
|
||||
@@ -90,7 +90,7 @@ class VisionRoutingState(TypedDict):
|
||||
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class VisionRoutingConfig(LLMKeyConfig):
|
||||
class VisionRoutingConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: VisionRoutingGraph)
|
||||
|
||||
tool_llm_name: str = "qwen-flash"
|
||||
@@ -99,18 +99,6 @@ class VisionRoutingConfig(LLMKeyConfig):
|
||||
vision_llm_name: str = "qwen-vl-max"
|
||||
"""LLM for vision/image analysis"""
|
||||
|
||||
llm_provider: str = "openai"
|
||||
"""provider of the llm"""
|
||||
|
||||
base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url for API"""
|
||||
|
||||
pipeline_id: Optional[str] = None
|
||||
"""If set, load prompts from database (with hardcoded fallback)"""
|
||||
|
||||
prompt_set_id: Optional[str] = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
tool_manager_config: ToolManagerConfig = field(default_factory=ClientToolManagerConfig)
|
||||
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ from langchain_core.messages import SystemMessage, HumanMessage, BaseMessage
|
||||
from langchain.agents import create_agent
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
|
||||
from lang_agent.config import InstantiateConfig, KeyConfig
|
||||
from lang_agent.config import LLMNodeConfig
|
||||
from lang_agent.graphs import AnnotatedGraph, ReactGraphConfig, RoutingConfig
|
||||
from lang_agent.base import GraphBase
|
||||
from lang_agent.components import conv_store
|
||||
@@ -52,33 +52,18 @@ DEFAULT_PROMPT="""你是半盏新青年茶馆的服务员,擅长倾听、共
|
||||
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class PipelineConfig(KeyConfig):
|
||||
class PipelineConfig(LLMNodeConfig):
|
||||
_target: Type = field(default_factory=lambda: Pipeline)
|
||||
|
||||
config_f: str = None
|
||||
"""path to config file"""
|
||||
|
||||
llm_name: str = "qwen-plus"
|
||||
"""name of llm; use default for qwen-plus"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
"""provider of the llm; use default for openai"""
|
||||
|
||||
base_url:str = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||||
"""base url; could be used to overwrite the baseurl in llm provider"""
|
||||
|
||||
host:str = "0.0.0.0"
|
||||
"""where am I hosted"""
|
||||
|
||||
port:int = 8588
|
||||
"""what is my port"""
|
||||
|
||||
pipeline_id: str = None
|
||||
"""If set, load prompts from database (with file fallback)"""
|
||||
|
||||
prompt_set_id: str = None
|
||||
"""If set, load from this specific prompt set instead of the active one"""
|
||||
|
||||
# graph_config: AnnotatedGraph = field(default_factory=ReactGraphConfig)
|
||||
graph_config: AnnotatedGraph = field(default_factory=RoutingConfig)
|
||||
|
||||
|
||||
@@ -9,13 +9,13 @@ from langchain_community.vectorstores import FAISS
|
||||
from langchain_core.documents.base import Document
|
||||
|
||||
from lang_agent.rag.emb import QwenEmbeddings
|
||||
from lang_agent.config import ToolConfig, KeyConfig
|
||||
from lang_agent.config import ToolConfig, LLMKeyConfig
|
||||
from lang_agent.base import LangToolBase
|
||||
|
||||
|
||||
@tyro.conf.configure(tyro.conf.SuppressFixed)
|
||||
@dataclass
|
||||
class SimpleRagConfig(ToolConfig, KeyConfig):
|
||||
class SimpleRagConfig(ToolConfig, LLMKeyConfig):
|
||||
_target: Type = field(default_factory=lambda: SimpleRag)
|
||||
|
||||
model_name:str = "text-embedding-v4"
|
||||
|
||||
@@ -77,10 +77,12 @@ def main():
|
||||
# 避免偶发回退
|
||||
delta = current_text
|
||||
if delta:
|
||||
u = delta
|
||||
u = delta
|
||||
last_text = current_text
|
||||
|
||||
logger.info(f"from stream: {u}")
|
||||
# For streaming responses, print incrementally to stdout and flush
|
||||
# so the user can see tokens as they arrive.
|
||||
print(u, end="", flush=True)
|
||||
except TypeError:
|
||||
# 非流式回落(一次性返回)
|
||||
if responses.status_code != HTTPStatus.OK:
|
||||
|
||||
Reference in New Issue
Block a user