realtimex-deeptutor 0.5.0.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- realtimex_deeptutor/__init__.py +67 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/METADATA +1612 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/RECORD +276 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/WHEEL +5 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/entry_points.txt +2 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/licenses/LICENSE +661 -0
- realtimex_deeptutor-0.5.0.post1.dist-info/top_level.txt +2 -0
- src/__init__.py +40 -0
- src/agents/__init__.py +24 -0
- src/agents/base_agent.py +657 -0
- src/agents/chat/__init__.py +24 -0
- src/agents/chat/chat_agent.py +435 -0
- src/agents/chat/prompts/en/chat_agent.yaml +35 -0
- src/agents/chat/prompts/zh/chat_agent.yaml +35 -0
- src/agents/chat/session_manager.py +311 -0
- src/agents/co_writer/__init__.py +0 -0
- src/agents/co_writer/edit_agent.py +260 -0
- src/agents/co_writer/narrator_agent.py +423 -0
- src/agents/co_writer/prompts/en/edit_agent.yaml +113 -0
- src/agents/co_writer/prompts/en/narrator_agent.yaml +88 -0
- src/agents/co_writer/prompts/zh/edit_agent.yaml +113 -0
- src/agents/co_writer/prompts/zh/narrator_agent.yaml +88 -0
- src/agents/guide/__init__.py +16 -0
- src/agents/guide/agents/__init__.py +11 -0
- src/agents/guide/agents/chat_agent.py +104 -0
- src/agents/guide/agents/interactive_agent.py +223 -0
- src/agents/guide/agents/locate_agent.py +149 -0
- src/agents/guide/agents/summary_agent.py +150 -0
- src/agents/guide/guide_manager.py +500 -0
- src/agents/guide/prompts/en/chat_agent.yaml +41 -0
- src/agents/guide/prompts/en/interactive_agent.yaml +202 -0
- src/agents/guide/prompts/en/locate_agent.yaml +68 -0
- src/agents/guide/prompts/en/summary_agent.yaml +157 -0
- src/agents/guide/prompts/zh/chat_agent.yaml +41 -0
- src/agents/guide/prompts/zh/interactive_agent.yaml +626 -0
- src/agents/guide/prompts/zh/locate_agent.yaml +68 -0
- src/agents/guide/prompts/zh/summary_agent.yaml +157 -0
- src/agents/ideagen/__init__.py +12 -0
- src/agents/ideagen/idea_generation_workflow.py +426 -0
- src/agents/ideagen/material_organizer_agent.py +173 -0
- src/agents/ideagen/prompts/en/idea_generation.yaml +187 -0
- src/agents/ideagen/prompts/en/material_organizer.yaml +69 -0
- src/agents/ideagen/prompts/zh/idea_generation.yaml +187 -0
- src/agents/ideagen/prompts/zh/material_organizer.yaml +69 -0
- src/agents/question/__init__.py +24 -0
- src/agents/question/agents/__init__.py +18 -0
- src/agents/question/agents/generate_agent.py +381 -0
- src/agents/question/agents/relevance_analyzer.py +207 -0
- src/agents/question/agents/retrieve_agent.py +239 -0
- src/agents/question/coordinator.py +718 -0
- src/agents/question/example.py +109 -0
- src/agents/question/prompts/en/coordinator.yaml +75 -0
- src/agents/question/prompts/en/generate_agent.yaml +77 -0
- src/agents/question/prompts/en/relevance_analyzer.yaml +41 -0
- src/agents/question/prompts/en/retrieve_agent.yaml +32 -0
- src/agents/question/prompts/zh/coordinator.yaml +75 -0
- src/agents/question/prompts/zh/generate_agent.yaml +77 -0
- src/agents/question/prompts/zh/relevance_analyzer.yaml +39 -0
- src/agents/question/prompts/zh/retrieve_agent.yaml +30 -0
- src/agents/research/agents/__init__.py +23 -0
- src/agents/research/agents/decompose_agent.py +507 -0
- src/agents/research/agents/manager_agent.py +228 -0
- src/agents/research/agents/note_agent.py +180 -0
- src/agents/research/agents/rephrase_agent.py +263 -0
- src/agents/research/agents/reporting_agent.py +1333 -0
- src/agents/research/agents/research_agent.py +714 -0
- src/agents/research/data_structures.py +451 -0
- src/agents/research/main.py +188 -0
- src/agents/research/prompts/en/decompose_agent.yaml +89 -0
- src/agents/research/prompts/en/manager_agent.yaml +24 -0
- src/agents/research/prompts/en/note_agent.yaml +121 -0
- src/agents/research/prompts/en/rephrase_agent.yaml +58 -0
- src/agents/research/prompts/en/reporting_agent.yaml +380 -0
- src/agents/research/prompts/en/research_agent.yaml +173 -0
- src/agents/research/prompts/zh/decompose_agent.yaml +89 -0
- src/agents/research/prompts/zh/manager_agent.yaml +24 -0
- src/agents/research/prompts/zh/note_agent.yaml +121 -0
- src/agents/research/prompts/zh/rephrase_agent.yaml +58 -0
- src/agents/research/prompts/zh/reporting_agent.yaml +380 -0
- src/agents/research/prompts/zh/research_agent.yaml +173 -0
- src/agents/research/research_pipeline.py +1309 -0
- src/agents/research/utils/__init__.py +60 -0
- src/agents/research/utils/citation_manager.py +799 -0
- src/agents/research/utils/json_utils.py +98 -0
- src/agents/research/utils/token_tracker.py +297 -0
- src/agents/solve/__init__.py +80 -0
- src/agents/solve/analysis_loop/__init__.py +14 -0
- src/agents/solve/analysis_loop/investigate_agent.py +414 -0
- src/agents/solve/analysis_loop/note_agent.py +190 -0
- src/agents/solve/main_solver.py +862 -0
- src/agents/solve/memory/__init__.py +34 -0
- src/agents/solve/memory/citation_memory.py +353 -0
- src/agents/solve/memory/investigate_memory.py +226 -0
- src/agents/solve/memory/solve_memory.py +340 -0
- src/agents/solve/prompts/en/analysis_loop/investigate_agent.yaml +55 -0
- src/agents/solve/prompts/en/analysis_loop/note_agent.yaml +54 -0
- src/agents/solve/prompts/en/solve_loop/manager_agent.yaml +67 -0
- src/agents/solve/prompts/en/solve_loop/precision_answer_agent.yaml +62 -0
- src/agents/solve/prompts/en/solve_loop/response_agent.yaml +90 -0
- src/agents/solve/prompts/en/solve_loop/solve_agent.yaml +75 -0
- src/agents/solve/prompts/en/solve_loop/tool_agent.yaml +38 -0
- src/agents/solve/prompts/zh/analysis_loop/investigate_agent.yaml +53 -0
- src/agents/solve/prompts/zh/analysis_loop/note_agent.yaml +54 -0
- src/agents/solve/prompts/zh/solve_loop/manager_agent.yaml +66 -0
- src/agents/solve/prompts/zh/solve_loop/precision_answer_agent.yaml +62 -0
- src/agents/solve/prompts/zh/solve_loop/response_agent.yaml +90 -0
- src/agents/solve/prompts/zh/solve_loop/solve_agent.yaml +76 -0
- src/agents/solve/prompts/zh/solve_loop/tool_agent.yaml +41 -0
- src/agents/solve/solve_loop/__init__.py +22 -0
- src/agents/solve/solve_loop/citation_manager.py +74 -0
- src/agents/solve/solve_loop/manager_agent.py +274 -0
- src/agents/solve/solve_loop/precision_answer_agent.py +96 -0
- src/agents/solve/solve_loop/response_agent.py +301 -0
- src/agents/solve/solve_loop/solve_agent.py +325 -0
- src/agents/solve/solve_loop/tool_agent.py +470 -0
- src/agents/solve/utils/__init__.py +64 -0
- src/agents/solve/utils/config_validator.py +313 -0
- src/agents/solve/utils/display_manager.py +223 -0
- src/agents/solve/utils/error_handler.py +363 -0
- src/agents/solve/utils/json_utils.py +98 -0
- src/agents/solve/utils/performance_monitor.py +407 -0
- src/agents/solve/utils/token_tracker.py +541 -0
- src/api/__init__.py +0 -0
- src/api/main.py +240 -0
- src/api/routers/__init__.py +1 -0
- src/api/routers/agent_config.py +69 -0
- src/api/routers/chat.py +296 -0
- src/api/routers/co_writer.py +337 -0
- src/api/routers/config.py +627 -0
- src/api/routers/dashboard.py +18 -0
- src/api/routers/guide.py +337 -0
- src/api/routers/ideagen.py +436 -0
- src/api/routers/knowledge.py +821 -0
- src/api/routers/notebook.py +247 -0
- src/api/routers/question.py +537 -0
- src/api/routers/research.py +394 -0
- src/api/routers/settings.py +164 -0
- src/api/routers/solve.py +305 -0
- src/api/routers/system.py +252 -0
- src/api/run_server.py +61 -0
- src/api/utils/history.py +172 -0
- src/api/utils/log_interceptor.py +21 -0
- src/api/utils/notebook_manager.py +415 -0
- src/api/utils/progress_broadcaster.py +72 -0
- src/api/utils/task_id_manager.py +100 -0
- src/config/__init__.py +0 -0
- src/config/accessors.py +18 -0
- src/config/constants.py +34 -0
- src/config/defaults.py +18 -0
- src/config/schema.py +38 -0
- src/config/settings.py +50 -0
- src/core/errors.py +62 -0
- src/knowledge/__init__.py +23 -0
- src/knowledge/add_documents.py +606 -0
- src/knowledge/config.py +65 -0
- src/knowledge/example_add_documents.py +236 -0
- src/knowledge/extract_numbered_items.py +1039 -0
- src/knowledge/initializer.py +621 -0
- src/knowledge/kb.py +22 -0
- src/knowledge/manager.py +782 -0
- src/knowledge/progress_tracker.py +182 -0
- src/knowledge/start_kb.py +535 -0
- src/logging/__init__.py +103 -0
- src/logging/adapters/__init__.py +17 -0
- src/logging/adapters/lightrag.py +184 -0
- src/logging/adapters/llamaindex.py +141 -0
- src/logging/config.py +80 -0
- src/logging/handlers/__init__.py +20 -0
- src/logging/handlers/console.py +75 -0
- src/logging/handlers/file.py +201 -0
- src/logging/handlers/websocket.py +127 -0
- src/logging/logger.py +709 -0
- src/logging/stats/__init__.py +16 -0
- src/logging/stats/llm_stats.py +179 -0
- src/services/__init__.py +56 -0
- src/services/config/__init__.py +61 -0
- src/services/config/knowledge_base_config.py +210 -0
- src/services/config/loader.py +260 -0
- src/services/config/unified_config.py +603 -0
- src/services/embedding/__init__.py +45 -0
- src/services/embedding/adapters/__init__.py +22 -0
- src/services/embedding/adapters/base.py +106 -0
- src/services/embedding/adapters/cohere.py +127 -0
- src/services/embedding/adapters/jina.py +99 -0
- src/services/embedding/adapters/ollama.py +116 -0
- src/services/embedding/adapters/openai_compatible.py +96 -0
- src/services/embedding/client.py +159 -0
- src/services/embedding/config.py +156 -0
- src/services/embedding/provider.py +119 -0
- src/services/llm/__init__.py +152 -0
- src/services/llm/capabilities.py +313 -0
- src/services/llm/client.py +302 -0
- src/services/llm/cloud_provider.py +530 -0
- src/services/llm/config.py +200 -0
- src/services/llm/error_mapping.py +103 -0
- src/services/llm/exceptions.py +152 -0
- src/services/llm/factory.py +450 -0
- src/services/llm/local_provider.py +347 -0
- src/services/llm/providers/anthropic.py +95 -0
- src/services/llm/providers/base_provider.py +93 -0
- src/services/llm/providers/open_ai.py +83 -0
- src/services/llm/registry.py +71 -0
- src/services/llm/telemetry.py +40 -0
- src/services/llm/types.py +27 -0
- src/services/llm/utils.py +333 -0
- src/services/prompt/__init__.py +25 -0
- src/services/prompt/manager.py +206 -0
- src/services/rag/__init__.py +64 -0
- src/services/rag/components/__init__.py +29 -0
- src/services/rag/components/base.py +59 -0
- src/services/rag/components/chunkers/__init__.py +18 -0
- src/services/rag/components/chunkers/base.py +34 -0
- src/services/rag/components/chunkers/fixed.py +71 -0
- src/services/rag/components/chunkers/numbered_item.py +94 -0
- src/services/rag/components/chunkers/semantic.py +97 -0
- src/services/rag/components/embedders/__init__.py +14 -0
- src/services/rag/components/embedders/base.py +32 -0
- src/services/rag/components/embedders/openai.py +63 -0
- src/services/rag/components/indexers/__init__.py +18 -0
- src/services/rag/components/indexers/base.py +35 -0
- src/services/rag/components/indexers/graph.py +172 -0
- src/services/rag/components/indexers/lightrag.py +156 -0
- src/services/rag/components/indexers/vector.py +146 -0
- src/services/rag/components/parsers/__init__.py +18 -0
- src/services/rag/components/parsers/base.py +35 -0
- src/services/rag/components/parsers/markdown.py +52 -0
- src/services/rag/components/parsers/pdf.py +115 -0
- src/services/rag/components/parsers/text.py +86 -0
- src/services/rag/components/retrievers/__init__.py +18 -0
- src/services/rag/components/retrievers/base.py +34 -0
- src/services/rag/components/retrievers/dense.py +200 -0
- src/services/rag/components/retrievers/hybrid.py +164 -0
- src/services/rag/components/retrievers/lightrag.py +169 -0
- src/services/rag/components/routing.py +286 -0
- src/services/rag/factory.py +234 -0
- src/services/rag/pipeline.py +215 -0
- src/services/rag/pipelines/__init__.py +32 -0
- src/services/rag/pipelines/academic.py +44 -0
- src/services/rag/pipelines/lightrag.py +43 -0
- src/services/rag/pipelines/llamaindex.py +313 -0
- src/services/rag/pipelines/raganything.py +384 -0
- src/services/rag/service.py +244 -0
- src/services/rag/types.py +73 -0
- src/services/search/__init__.py +284 -0
- src/services/search/base.py +87 -0
- src/services/search/consolidation.py +398 -0
- src/services/search/providers/__init__.py +128 -0
- src/services/search/providers/baidu.py +188 -0
- src/services/search/providers/exa.py +194 -0
- src/services/search/providers/jina.py +161 -0
- src/services/search/providers/perplexity.py +153 -0
- src/services/search/providers/serper.py +209 -0
- src/services/search/providers/tavily.py +161 -0
- src/services/search/types.py +114 -0
- src/services/setup/__init__.py +34 -0
- src/services/setup/init.py +285 -0
- src/services/tts/__init__.py +16 -0
- src/services/tts/config.py +99 -0
- src/tools/__init__.py +91 -0
- src/tools/code_executor.py +536 -0
- src/tools/paper_search_tool.py +171 -0
- src/tools/query_item_tool.py +310 -0
- src/tools/question/__init__.py +15 -0
- src/tools/question/exam_mimic.py +616 -0
- src/tools/question/pdf_parser.py +211 -0
- src/tools/question/question_extractor.py +397 -0
- src/tools/rag_tool.py +173 -0
- src/tools/tex_chunker.py +339 -0
- src/tools/tex_downloader.py +253 -0
- src/tools/web_search.py +71 -0
- src/utils/config_manager.py +206 -0
- src/utils/document_validator.py +168 -0
- src/utils/error_rate_tracker.py +111 -0
- src/utils/error_utils.py +82 -0
- src/utils/json_parser.py +110 -0
- src/utils/network/circuit_breaker.py +79 -0
src/agents/base_agent.py
ADDED
|
@@ -0,0 +1,657 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
Unified BaseAgent - Base class for all module agents.
|
|
4
|
+
|
|
5
|
+
This is the single source of truth for agent base functionality across:
|
|
6
|
+
- solve module
|
|
7
|
+
- research module
|
|
8
|
+
- guide module
|
|
9
|
+
- ideagen module
|
|
10
|
+
- co_writer module
|
|
11
|
+
- question module (unified in Jan 2026 refactor)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from abc import ABC, abstractmethod
|
|
15
|
+
import os
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
import sys
|
|
18
|
+
import time
|
|
19
|
+
from typing import Any, AsyncGenerator
|
|
20
|
+
|
|
21
|
+
# Add project root to path
|
|
22
|
+
_project_root = Path(__file__).parent.parent.parent
|
|
23
|
+
if str(_project_root) not in sys.path:
|
|
24
|
+
sys.path.insert(0, str(_project_root))
|
|
25
|
+
|
|
26
|
+
from src.config.settings import settings
|
|
27
|
+
from src.logging import LLMStats, get_logger
|
|
28
|
+
from src.services.config import get_agent_params
|
|
29
|
+
from src.services.llm import complete as llm_complete
|
|
30
|
+
from src.services.llm import get_llm_config, get_token_limit_kwargs, supports_response_format
|
|
31
|
+
from src.services.llm import stream as llm_stream
|
|
32
|
+
from src.services.prompt import get_prompt_manager
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class BaseAgent(ABC):
|
|
36
|
+
"""
|
|
37
|
+
Unified base class for all module agents.
|
|
38
|
+
|
|
39
|
+
This class provides:
|
|
40
|
+
- LLM configuration management (api_key, base_url, model)
|
|
41
|
+
- Agent parameters (temperature, max_tokens) from agents.yaml
|
|
42
|
+
- Prompt loading via PromptManager
|
|
43
|
+
- Unified LLM call interface
|
|
44
|
+
- Token tracking (supports TokenTracker, LLMStats, or singleton tracker)
|
|
45
|
+
- Logging
|
|
46
|
+
|
|
47
|
+
Subclasses must implement the `process()` method.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
# Shared LLMStats tracker for each module (class-level)
|
|
51
|
+
_shared_stats: dict[str, LLMStats] = {}
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
module_name: str,
|
|
56
|
+
agent_name: str,
|
|
57
|
+
api_key: str | None = None,
|
|
58
|
+
base_url: str | None = None,
|
|
59
|
+
model: str | None = None,
|
|
60
|
+
api_version: str | None = None,
|
|
61
|
+
language: str = "zh",
|
|
62
|
+
binding: str = "openai",
|
|
63
|
+
config: dict[str, Any] | None = None,
|
|
64
|
+
token_tracker: Any | None = None,
|
|
65
|
+
log_dir: str | None = None,
|
|
66
|
+
):
|
|
67
|
+
"""
|
|
68
|
+
Initialize base Agent.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
module_name: Module name (solve/research/guide/ideagen/co_writer)
|
|
72
|
+
agent_name: Agent name (e.g., "solve_agent", "note_agent")
|
|
73
|
+
api_key: API key (optional, defaults to environment variable)
|
|
74
|
+
base_url: API endpoint (optional, defaults to environment variable)
|
|
75
|
+
model: Model name (optional, defaults to environment variable)
|
|
76
|
+
api_version: API version for Azure OpenAI (optional)
|
|
77
|
+
language: Language setting ('zh' | 'en'), default 'zh'
|
|
78
|
+
binding: Provider binding type (optional, defaults to 'openai')
|
|
79
|
+
config: Optional configuration dictionary
|
|
80
|
+
token_tracker: Optional external TokenTracker instance
|
|
81
|
+
log_dir: Optional log directory path
|
|
82
|
+
"""
|
|
83
|
+
self.module_name = module_name
|
|
84
|
+
self.agent_name = agent_name
|
|
85
|
+
self.language = language
|
|
86
|
+
# Ensure config is always a dict (not a dataclass like LLMConfig)
|
|
87
|
+
if config is None:
|
|
88
|
+
self.config = {}
|
|
89
|
+
elif isinstance(config, dict):
|
|
90
|
+
self.config = config
|
|
91
|
+
else:
|
|
92
|
+
# If config is a dataclass (like LLMConfig), convert to empty dict
|
|
93
|
+
# The actual LLM config should be loaded via get_llm_config()
|
|
94
|
+
self.config = {}
|
|
95
|
+
|
|
96
|
+
# Load agent parameters from unified config (agents.yaml)
|
|
97
|
+
self._agent_params = get_agent_params(module_name)
|
|
98
|
+
|
|
99
|
+
# Load LLM configuration
|
|
100
|
+
try:
|
|
101
|
+
env_llm = get_llm_config()
|
|
102
|
+
self.api_key = api_key or env_llm.api_key
|
|
103
|
+
self.base_url = base_url or env_llm.base_url
|
|
104
|
+
self.model = model or env_llm.model
|
|
105
|
+
self.api_version = api_version or getattr(env_llm, "api_version", None)
|
|
106
|
+
self.binding = binding or getattr(env_llm, "binding", "openai")
|
|
107
|
+
except ValueError:
|
|
108
|
+
# Fallback if env config not available
|
|
109
|
+
self.api_key = api_key or os.getenv("LLM_API_KEY")
|
|
110
|
+
self.base_url = base_url or os.getenv("LLM_HOST")
|
|
111
|
+
self.model = model or os.getenv("LLM_MODEL")
|
|
112
|
+
self.api_version = api_version or os.getenv("LLM_API_VERSION")
|
|
113
|
+
self.binding = binding
|
|
114
|
+
|
|
115
|
+
# Get Agent-specific configuration (if config provided)
|
|
116
|
+
self.agent_config = self.config.get("agents", {}).get(agent_name, {})
|
|
117
|
+
llm_cfg = self.config.get("llm", {})
|
|
118
|
+
# Ensure llm_config is always a dict (handle case where LLMConfig object is passed)
|
|
119
|
+
if hasattr(llm_cfg, "__dataclass_fields__"):
|
|
120
|
+
from dataclasses import asdict
|
|
121
|
+
|
|
122
|
+
self.llm_config = asdict(llm_cfg)
|
|
123
|
+
else:
|
|
124
|
+
self.llm_config = llm_cfg if isinstance(llm_cfg, dict) else {}
|
|
125
|
+
|
|
126
|
+
# Agent status
|
|
127
|
+
self.enabled = self.agent_config.get("enabled", True)
|
|
128
|
+
|
|
129
|
+
# Token tracker (external instance, optional)
|
|
130
|
+
self.token_tracker = token_tracker
|
|
131
|
+
|
|
132
|
+
# Initialize logger
|
|
133
|
+
logger_name = f"{module_name.capitalize()}.{agent_name}"
|
|
134
|
+
self.logger = get_logger(logger_name, log_dir=log_dir)
|
|
135
|
+
|
|
136
|
+
# Load prompts using unified PromptManager
|
|
137
|
+
try:
|
|
138
|
+
self.prompts = get_prompt_manager().load_prompts(
|
|
139
|
+
module_name=module_name,
|
|
140
|
+
agent_name=agent_name,
|
|
141
|
+
language=language,
|
|
142
|
+
)
|
|
143
|
+
if self.prompts:
|
|
144
|
+
self.logger.debug(f"Prompts loaded: {agent_name} ({language})")
|
|
145
|
+
except Exception as e:
|
|
146
|
+
self.prompts = None
|
|
147
|
+
self.logger.warning(f"Failed to load prompts for {agent_name}: {e}")
|
|
148
|
+
|
|
149
|
+
# -------------------------------------------------------------------------
|
|
150
|
+
# Model and Parameter Getters
|
|
151
|
+
# -------------------------------------------------------------------------
|
|
152
|
+
|
|
153
|
+
def get_model(self) -> str:
|
|
154
|
+
"""
|
|
155
|
+
Get model name.
|
|
156
|
+
|
|
157
|
+
Priority: agent_config > llm_config > self.model > environment variable
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
Model name
|
|
161
|
+
|
|
162
|
+
Raises:
|
|
163
|
+
ValueError: If model is not configured
|
|
164
|
+
"""
|
|
165
|
+
# 1. Try agent-specific config
|
|
166
|
+
if self.agent_config.get("model"):
|
|
167
|
+
return self.agent_config["model"]
|
|
168
|
+
|
|
169
|
+
# 2. Try general LLM config
|
|
170
|
+
if self.llm_config.get("model"):
|
|
171
|
+
return self.llm_config["model"]
|
|
172
|
+
|
|
173
|
+
# 3. Use instance model
|
|
174
|
+
if self.model:
|
|
175
|
+
return self.model
|
|
176
|
+
|
|
177
|
+
# 4. Fallback to environment variable
|
|
178
|
+
env_model = os.getenv("LLM_MODEL")
|
|
179
|
+
if env_model:
|
|
180
|
+
return env_model
|
|
181
|
+
|
|
182
|
+
raise ValueError(
|
|
183
|
+
f"Model not configured for agent {self.agent_name}. "
|
|
184
|
+
"Please set LLM_MODEL in .env or activate a provider."
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def get_temperature(self) -> float:
|
|
188
|
+
"""
|
|
189
|
+
Get temperature parameter from unified config (agents.yaml).
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Temperature value
|
|
193
|
+
"""
|
|
194
|
+
return self._agent_params["temperature"]
|
|
195
|
+
|
|
196
|
+
def get_max_tokens(self) -> int:
|
|
197
|
+
"""
|
|
198
|
+
Get maximum token count from unified config (agents.yaml).
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Maximum token count
|
|
202
|
+
"""
|
|
203
|
+
return self._agent_params["max_tokens"]
|
|
204
|
+
|
|
205
|
+
def get_max_retries(self) -> int:
|
|
206
|
+
"""
|
|
207
|
+
Get maximum retry count.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Retry count
|
|
211
|
+
"""
|
|
212
|
+
return self.agent_config.get("max_retries", settings.retry.max_retries)
|
|
213
|
+
|
|
214
|
+
def refresh_config(self) -> None:
|
|
215
|
+
"""
|
|
216
|
+
Refresh LLM configuration from the current active settings.
|
|
217
|
+
|
|
218
|
+
This method reloads the LLM configuration from the unified config service,
|
|
219
|
+
allowing agents to pick up configuration changes made by users in Settings
|
|
220
|
+
without needing to restart the server or recreate the agent instance.
|
|
221
|
+
|
|
222
|
+
Call this method before processing requests if you want to ensure
|
|
223
|
+
the agent uses the latest user-configured LLM settings.
|
|
224
|
+
"""
|
|
225
|
+
try:
|
|
226
|
+
llm_config = get_llm_config()
|
|
227
|
+
self.api_key = llm_config.api_key
|
|
228
|
+
self.base_url = llm_config.base_url
|
|
229
|
+
self.model = llm_config.model
|
|
230
|
+
self.api_version = getattr(llm_config, "api_version", None)
|
|
231
|
+
self.binding = getattr(llm_config, "binding", "openai")
|
|
232
|
+
self.logger.debug(
|
|
233
|
+
f"Config refreshed: model={self.model}, base_url={self.base_url[:30]}..."
|
|
234
|
+
if self.base_url
|
|
235
|
+
else f"Config refreshed: model={self.model}"
|
|
236
|
+
)
|
|
237
|
+
except Exception as e:
|
|
238
|
+
self.logger.warning(f"Failed to refresh config: {e}")
|
|
239
|
+
|
|
240
|
+
# -------------------------------------------------------------------------
|
|
241
|
+
# Token Tracking
|
|
242
|
+
# -------------------------------------------------------------------------
|
|
243
|
+
|
|
244
|
+
@classmethod
|
|
245
|
+
def get_stats(cls, module_name: str) -> LLMStats:
|
|
246
|
+
"""
|
|
247
|
+
Get or create shared LLMStats tracker for a module.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
module_name: Module name
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
LLMStats instance
|
|
254
|
+
"""
|
|
255
|
+
if module_name not in cls._shared_stats:
|
|
256
|
+
cls._shared_stats[module_name] = LLMStats(module_name=module_name.capitalize())
|
|
257
|
+
return cls._shared_stats[module_name]
|
|
258
|
+
|
|
259
|
+
@classmethod
|
|
260
|
+
def reset_stats(cls, module_name: str | None = None):
|
|
261
|
+
"""
|
|
262
|
+
Reset shared stats.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
module_name: Module name (if None, reset all)
|
|
266
|
+
"""
|
|
267
|
+
if module_name:
|
|
268
|
+
if module_name in cls._shared_stats:
|
|
269
|
+
cls._shared_stats[module_name].reset()
|
|
270
|
+
else:
|
|
271
|
+
for stats in cls._shared_stats.values():
|
|
272
|
+
stats.reset()
|
|
273
|
+
|
|
274
|
+
@classmethod
|
|
275
|
+
def print_stats(cls, module_name: str | None = None):
|
|
276
|
+
"""
|
|
277
|
+
Print stats summary.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
module_name: Module name (if None, print all)
|
|
281
|
+
"""
|
|
282
|
+
if module_name:
|
|
283
|
+
if module_name in cls._shared_stats:
|
|
284
|
+
cls._shared_stats[module_name].print_summary()
|
|
285
|
+
else:
|
|
286
|
+
for stats in cls._shared_stats.values():
|
|
287
|
+
stats.print_summary()
|
|
288
|
+
|
|
289
|
+
def _track_tokens(
|
|
290
|
+
self,
|
|
291
|
+
model: str,
|
|
292
|
+
system_prompt: str,
|
|
293
|
+
user_prompt: str,
|
|
294
|
+
response: str,
|
|
295
|
+
stage: str | None = None,
|
|
296
|
+
):
|
|
297
|
+
"""
|
|
298
|
+
Track token usage using available tracker.
|
|
299
|
+
|
|
300
|
+
Supports:
|
|
301
|
+
1. External TokenTracker (if self.token_tracker is set)
|
|
302
|
+
2. Shared LLMStats (always available)
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
model: Model name
|
|
306
|
+
system_prompt: System prompt
|
|
307
|
+
user_prompt: User prompt
|
|
308
|
+
response: LLM response
|
|
309
|
+
stage: Stage name (optional)
|
|
310
|
+
"""
|
|
311
|
+
stage_label = stage or self.agent_name
|
|
312
|
+
|
|
313
|
+
# 1. Use external TokenTracker if provided
|
|
314
|
+
if self.token_tracker:
|
|
315
|
+
try:
|
|
316
|
+
self.token_tracker.add_usage(
|
|
317
|
+
agent_name=self.agent_name,
|
|
318
|
+
stage=stage_label,
|
|
319
|
+
model=model,
|
|
320
|
+
system_prompt=system_prompt,
|
|
321
|
+
user_prompt=user_prompt,
|
|
322
|
+
response_text=response,
|
|
323
|
+
)
|
|
324
|
+
except Exception:
|
|
325
|
+
pass # Don't let tracking errors affect main flow
|
|
326
|
+
|
|
327
|
+
# 2. Always use shared LLMStats
|
|
328
|
+
stats = self.get_stats(self.module_name)
|
|
329
|
+
stats.add_call(
|
|
330
|
+
model=model,
|
|
331
|
+
system_prompt=system_prompt,
|
|
332
|
+
user_prompt=user_prompt,
|
|
333
|
+
response=response,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# -------------------------------------------------------------------------
|
|
337
|
+
# LLM Call Interface
|
|
338
|
+
# -------------------------------------------------------------------------
|
|
339
|
+
|
|
340
|
+
async def call_llm(
|
|
341
|
+
self,
|
|
342
|
+
user_prompt: str,
|
|
343
|
+
system_prompt: str,
|
|
344
|
+
messages: list[dict[str, str]] | None = None,
|
|
345
|
+
response_format: dict[str, str] | None = None,
|
|
346
|
+
temperature: float | None = None,
|
|
347
|
+
max_tokens: int | None = None,
|
|
348
|
+
model: str | None = None,
|
|
349
|
+
verbose: bool = True,
|
|
350
|
+
stage: str | None = None,
|
|
351
|
+
) -> str:
|
|
352
|
+
"""
|
|
353
|
+
Unified interface for calling LLM (non-streaming).
|
|
354
|
+
|
|
355
|
+
Uses the LLM factory to route calls to the appropriate provider
|
|
356
|
+
(cloud or local) based on configuration.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
user_prompt: User prompt (ignored if messages provided)
|
|
360
|
+
system_prompt: System prompt (ignored if messages provided)
|
|
361
|
+
messages: Pre-built messages array (optional, overrides prompt/system_prompt)
|
|
362
|
+
response_format: Response format (e.g., {"type": "json_object"})
|
|
363
|
+
temperature: Temperature parameter (optional, uses config by default)
|
|
364
|
+
max_tokens: Maximum tokens (optional, uses config by default)
|
|
365
|
+
model: Model name (optional, uses config by default)
|
|
366
|
+
verbose: Whether to print raw LLM output (default True)
|
|
367
|
+
stage: Stage marker for logging and tracking
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
LLM response text
|
|
371
|
+
"""
|
|
372
|
+
model = model or self.get_model()
|
|
373
|
+
temperature = temperature if temperature is not None else self.get_temperature()
|
|
374
|
+
max_tokens = max_tokens if max_tokens is not None else self.get_max_tokens()
|
|
375
|
+
max_retries = self.get_max_retries()
|
|
376
|
+
|
|
377
|
+
# Record call start time
|
|
378
|
+
start_time = time.time()
|
|
379
|
+
|
|
380
|
+
# Build kwargs for LLM factory
|
|
381
|
+
kwargs = {
|
|
382
|
+
"temperature": temperature,
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
# Handle token limit for newer OpenAI models
|
|
386
|
+
if max_tokens:
|
|
387
|
+
kwargs.update(get_token_limit_kwargs(model, max_tokens))
|
|
388
|
+
|
|
389
|
+
# Handle response_format with capability check
|
|
390
|
+
if response_format:
|
|
391
|
+
try:
|
|
392
|
+
config = get_llm_config()
|
|
393
|
+
binding = getattr(config, "binding", None) or "openai"
|
|
394
|
+
except Exception:
|
|
395
|
+
binding = "openai"
|
|
396
|
+
|
|
397
|
+
if supports_response_format(binding, model):
|
|
398
|
+
kwargs["response_format"] = response_format
|
|
399
|
+
else:
|
|
400
|
+
self.logger.debug(f"response_format not supported for {binding}/{model}, skipping")
|
|
401
|
+
|
|
402
|
+
if messages:
|
|
403
|
+
kwargs["messages"] = messages
|
|
404
|
+
|
|
405
|
+
# Log input
|
|
406
|
+
stage_label = stage or self.agent_name
|
|
407
|
+
if hasattr(self.logger, "log_llm_input"):
|
|
408
|
+
self.logger.log_llm_input(
|
|
409
|
+
agent_name=self.agent_name,
|
|
410
|
+
stage=stage_label,
|
|
411
|
+
system_prompt=system_prompt,
|
|
412
|
+
user_prompt=user_prompt,
|
|
413
|
+
metadata={"model": model, "temperature": temperature, "max_tokens": max_tokens},
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
# Call LLM via factory (routes to cloud or local provider)
|
|
417
|
+
response = None
|
|
418
|
+
try:
|
|
419
|
+
response = await llm_complete(
|
|
420
|
+
prompt=user_prompt,
|
|
421
|
+
system_prompt=system_prompt,
|
|
422
|
+
model=model,
|
|
423
|
+
api_key=self.api_key,
|
|
424
|
+
base_url=self.base_url,
|
|
425
|
+
api_version=self.api_version,
|
|
426
|
+
max_retries=max_retries,
|
|
427
|
+
**kwargs,
|
|
428
|
+
)
|
|
429
|
+
except Exception as e:
|
|
430
|
+
self.logger.error(f"LLM call failed: {e}")
|
|
431
|
+
raise
|
|
432
|
+
|
|
433
|
+
# Calculate duration
|
|
434
|
+
call_duration = time.time() - start_time
|
|
435
|
+
|
|
436
|
+
# Track token usage
|
|
437
|
+
self._track_tokens(
|
|
438
|
+
model=model,
|
|
439
|
+
system_prompt=system_prompt,
|
|
440
|
+
user_prompt=user_prompt,
|
|
441
|
+
response=response,
|
|
442
|
+
stage=stage_label,
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
# Log output
|
|
446
|
+
if hasattr(self.logger, "log_llm_output"):
|
|
447
|
+
self.logger.log_llm_output(
|
|
448
|
+
agent_name=self.agent_name,
|
|
449
|
+
stage=stage_label,
|
|
450
|
+
response=response,
|
|
451
|
+
metadata={"length": len(response), "duration": call_duration},
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
# Verbose output
|
|
455
|
+
if verbose:
|
|
456
|
+
self.logger.debug(f"LLM response: model={model}, duration={call_duration:.2f}s")
|
|
457
|
+
|
|
458
|
+
return response
|
|
459
|
+
|
|
460
|
+
async def stream_llm(
|
|
461
|
+
self,
|
|
462
|
+
user_prompt: str,
|
|
463
|
+
system_prompt: str,
|
|
464
|
+
messages: list[dict[str, str]] | None = None,
|
|
465
|
+
temperature: float | None = None,
|
|
466
|
+
max_tokens: int | None = None,
|
|
467
|
+
model: str | None = None,
|
|
468
|
+
stage: str | None = None,
|
|
469
|
+
) -> AsyncGenerator[str, None]:
|
|
470
|
+
"""
|
|
471
|
+
Unified interface for streaming LLM responses.
|
|
472
|
+
|
|
473
|
+
Uses the LLM factory to route calls to the appropriate provider
|
|
474
|
+
(cloud or local) based on configuration.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
user_prompt: User prompt (ignored if messages provided)
|
|
478
|
+
system_prompt: System prompt (ignored if messages provided)
|
|
479
|
+
messages: Pre-built messages array (optional, overrides prompt/system_prompt)
|
|
480
|
+
temperature: Temperature parameter (optional, uses config by default)
|
|
481
|
+
max_tokens: Maximum tokens (optional, uses config by default)
|
|
482
|
+
model: Model name (optional, uses config by default)
|
|
483
|
+
stage: Stage marker for logging
|
|
484
|
+
|
|
485
|
+
Yields:
|
|
486
|
+
Response chunks as strings
|
|
487
|
+
"""
|
|
488
|
+
model = model or self.get_model()
|
|
489
|
+
temperature = temperature if temperature is not None else self.get_temperature()
|
|
490
|
+
max_tokens = max_tokens if max_tokens is not None else self.get_max_tokens()
|
|
491
|
+
|
|
492
|
+
# Build kwargs
|
|
493
|
+
kwargs = {
|
|
494
|
+
"temperature": temperature,
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
# Handle token limit for newer OpenAI models
|
|
498
|
+
if max_tokens:
|
|
499
|
+
kwargs.update(get_token_limit_kwargs(model, max_tokens))
|
|
500
|
+
|
|
501
|
+
# Log input
|
|
502
|
+
stage_label = stage or self.agent_name
|
|
503
|
+
if hasattr(self.logger, "log_llm_input"):
|
|
504
|
+
self.logger.log_llm_input(
|
|
505
|
+
agent_name=self.agent_name,
|
|
506
|
+
stage=stage_label,
|
|
507
|
+
system_prompt=system_prompt,
|
|
508
|
+
user_prompt=user_prompt,
|
|
509
|
+
metadata={"model": model, "temperature": temperature, "streaming": True},
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
# Track start time
|
|
513
|
+
start_time = time.time()
|
|
514
|
+
full_response = ""
|
|
515
|
+
|
|
516
|
+
try:
|
|
517
|
+
# Stream via factory (routes to cloud or local provider)
|
|
518
|
+
async for chunk in llm_stream(
|
|
519
|
+
prompt=user_prompt,
|
|
520
|
+
system_prompt=system_prompt,
|
|
521
|
+
model=model,
|
|
522
|
+
api_key=self.api_key,
|
|
523
|
+
base_url=self.base_url,
|
|
524
|
+
api_version=self.api_version,
|
|
525
|
+
messages=messages,
|
|
526
|
+
**kwargs,
|
|
527
|
+
):
|
|
528
|
+
full_response += chunk
|
|
529
|
+
yield chunk
|
|
530
|
+
|
|
531
|
+
# Track token usage after streaming completes
|
|
532
|
+
self._track_tokens(
|
|
533
|
+
model=model,
|
|
534
|
+
system_prompt=system_prompt,
|
|
535
|
+
user_prompt=user_prompt,
|
|
536
|
+
response=full_response,
|
|
537
|
+
stage=stage_label,
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
# Log output
|
|
541
|
+
call_duration = time.time() - start_time
|
|
542
|
+
if hasattr(self.logger, "log_llm_output"):
|
|
543
|
+
self.logger.log_llm_output(
|
|
544
|
+
agent_name=self.agent_name,
|
|
545
|
+
stage=stage_label,
|
|
546
|
+
response=full_response[:200] + "..."
|
|
547
|
+
if len(full_response) > 200
|
|
548
|
+
else full_response,
|
|
549
|
+
metadata={
|
|
550
|
+
"length": len(full_response),
|
|
551
|
+
"duration": call_duration,
|
|
552
|
+
"streaming": True,
|
|
553
|
+
},
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
except Exception as e:
|
|
557
|
+
self.logger.error(f"LLM streaming failed: {e}")
|
|
558
|
+
raise
|
|
559
|
+
|
|
560
|
+
# -------------------------------------------------------------------------
|
|
561
|
+
# Prompt Helpers
|
|
562
|
+
# -------------------------------------------------------------------------
|
|
563
|
+
|
|
564
|
+
def get_prompt(
|
|
565
|
+
self,
|
|
566
|
+
section_or_type: str = "system",
|
|
567
|
+
field_or_fallback: str | None = None,
|
|
568
|
+
fallback: str = "",
|
|
569
|
+
) -> str | None:
|
|
570
|
+
"""
|
|
571
|
+
Get prompt by type or section/field.
|
|
572
|
+
|
|
573
|
+
Supports two calling patterns:
|
|
574
|
+
1. get_prompt("system") - simple key lookup
|
|
575
|
+
2. get_prompt("section", "field", "fallback") - nested lookup (for research module)
|
|
576
|
+
|
|
577
|
+
Args:
|
|
578
|
+
section_or_type: Prompt type key or section name
|
|
579
|
+
field_or_fallback: Field name (if nested) or fallback value (if simple)
|
|
580
|
+
fallback: Fallback value if prompt not found (only used in nested mode)
|
|
581
|
+
|
|
582
|
+
Returns:
|
|
583
|
+
Prompt string or fallback
|
|
584
|
+
"""
|
|
585
|
+
if not self.prompts:
|
|
586
|
+
return (
|
|
587
|
+
fallback
|
|
588
|
+
if fallback
|
|
589
|
+
else (
|
|
590
|
+
field_or_fallback
|
|
591
|
+
if isinstance(field_or_fallback, str) and field_or_fallback
|
|
592
|
+
else None
|
|
593
|
+
)
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# Check if this is a nested lookup (section.field pattern)
|
|
597
|
+
# If field_or_fallback is provided and section_or_type points to a dict, use nested lookup
|
|
598
|
+
section_value = self.prompts.get(section_or_type)
|
|
599
|
+
|
|
600
|
+
if isinstance(section_value, dict) and field_or_fallback is not None:
|
|
601
|
+
# Nested lookup: get_prompt("section", "field", "fallback")
|
|
602
|
+
result = section_value.get(field_or_fallback)
|
|
603
|
+
if result is not None:
|
|
604
|
+
return result
|
|
605
|
+
return fallback if fallback else None
|
|
606
|
+
else:
|
|
607
|
+
# Simple lookup: get_prompt("key") or get_prompt("key", "fallback")
|
|
608
|
+
if section_value is not None:
|
|
609
|
+
return section_value
|
|
610
|
+
# field_or_fallback acts as fallback in simple mode
|
|
611
|
+
return field_or_fallback if field_or_fallback else (fallback if fallback else None)
|
|
612
|
+
|
|
613
|
+
def has_prompts(self) -> bool:
|
|
614
|
+
"""Check if prompts have been loaded."""
|
|
615
|
+
return self.prompts is not None
|
|
616
|
+
|
|
617
|
+
# -------------------------------------------------------------------------
|
|
618
|
+
# Status
|
|
619
|
+
# -------------------------------------------------------------------------
|
|
620
|
+
|
|
621
|
+
def is_enabled(self) -> bool:
|
|
622
|
+
"""
|
|
623
|
+
Check if Agent is enabled.
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
Whether enabled
|
|
627
|
+
"""
|
|
628
|
+
return self.enabled
|
|
629
|
+
|
|
630
|
+
# -------------------------------------------------------------------------
|
|
631
|
+
# Abstract Method
|
|
632
|
+
# -------------------------------------------------------------------------
|
|
633
|
+
|
|
634
|
+
@abstractmethod
|
|
635
|
+
async def process(self, *args, **kwargs) -> Any:
|
|
636
|
+
"""
|
|
637
|
+
Main processing logic of Agent (must be implemented by subclasses).
|
|
638
|
+
|
|
639
|
+
Returns:
|
|
640
|
+
Processing result
|
|
641
|
+
"""
|
|
642
|
+
|
|
643
|
+
# -------------------------------------------------------------------------
|
|
644
|
+
# String Representation
|
|
645
|
+
# -------------------------------------------------------------------------
|
|
646
|
+
|
|
647
|
+
def __repr__(self) -> str:
|
|
648
|
+
"""String representation of Agent."""
|
|
649
|
+
return (
|
|
650
|
+
f"{self.__class__.__name__}("
|
|
651
|
+
f"module={self.module_name}, "
|
|
652
|
+
f"name={self.agent_name}, "
|
|
653
|
+
f"enabled={self.enabled})"
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
|
|
657
|
+
__all__ = ["BaseAgent"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Chat Module - Lightweight conversational AI with session management.
|
|
3
|
+
|
|
4
|
+
This module provides:
|
|
5
|
+
- ChatAgent: Multi-turn conversational agent with RAG/Web Search support
|
|
6
|
+
- SessionManager: Chat session persistence and management
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
from src.agents.chat import ChatAgent, SessionManager
|
|
10
|
+
|
|
11
|
+
agent = ChatAgent(language="en")
|
|
12
|
+
response = await agent.process(
|
|
13
|
+
message="What is machine learning?",
|
|
14
|
+
history=[],
|
|
15
|
+
kb_name="ai_textbook",
|
|
16
|
+
enable_rag=True,
|
|
17
|
+
enable_web_search=False
|
|
18
|
+
)
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from .chat_agent import ChatAgent
|
|
22
|
+
from .session_manager import SessionManager
|
|
23
|
+
|
|
24
|
+
__all__ = ["ChatAgent", "SessionManager"]
|