use qwen flash instead
This commit is contained in:
@@ -28,7 +28,7 @@ from langgraph.checkpoint.memory import MemorySaver
|
||||
class RoutingConfig(KeyConfig):
|
||||
_target: Type = field(default_factory=lambda: RoutingGraph)
|
||||
|
||||
llm_name: str = "qwen-turbo"
|
||||
llm_name: str = "qwen-flash"
|
||||
"""name of llm"""
|
||||
|
||||
llm_provider:str = "openai"
|
||||
|
||||
Reference in New Issue
Block a user