use qwen flash instead

This commit is contained in:
2025-10-30 21:55:04 +08:00
parent bbe7d47e83
commit 394db8eb7b

View File

@@ -28,7 +28,7 @@ from langgraph.checkpoint.memory import MemorySaver
class RoutingConfig(KeyConfig):
_target: Type = field(default_factory=lambda: RoutingGraph)
llm_name: str = "qwen-turbo"
llm_name: str = "qwen-flash"
"""name of llm"""
llm_provider:str = "openai"