ag2 0.9.7__py3-none-any.whl → 0.9.8.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.7.dist-info → ag2-0.9.8.post1.dist-info}/METADATA +102 -75
- ag2-0.9.8.post1.dist-info/RECORD +387 -0
- autogen/__init__.py +1 -2
- autogen/_website/generate_api_references.py +4 -5
- autogen/_website/generate_mkdocs.py +9 -15
- autogen/_website/notebook_processor.py +13 -14
- autogen/_website/process_notebooks.py +10 -10
- autogen/_website/utils.py +5 -4
- autogen/agentchat/agent.py +13 -13
- autogen/agentchat/assistant_agent.py +7 -6
- autogen/agentchat/contrib/agent_eval/agent_eval.py +3 -3
- autogen/agentchat/contrib/agent_eval/critic_agent.py +3 -3
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +3 -3
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +3 -3
- autogen/agentchat/contrib/agent_optimizer.py +3 -3
- autogen/agentchat/contrib/capabilities/generate_images.py +11 -11
- autogen/agentchat/contrib/capabilities/teachability.py +15 -15
- autogen/agentchat/contrib/capabilities/transforms.py +17 -18
- autogen/agentchat/contrib/capabilities/transforms_util.py +5 -5
- autogen/agentchat/contrib/capabilities/vision_capability.py +4 -3
- autogen/agentchat/contrib/captainagent/agent_builder.py +30 -30
- autogen/agentchat/contrib/captainagent/captainagent.py +22 -21
- autogen/agentchat/contrib/captainagent/tool_retriever.py +2 -3
- autogen/agentchat/contrib/gpt_assistant_agent.py +9 -9
- autogen/agentchat/contrib/graph_rag/document.py +3 -3
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +3 -3
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +3 -3
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +5 -11
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +7 -7
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/img_utils.py +1 -1
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +11 -11
- autogen/agentchat/contrib/llava_agent.py +18 -4
- autogen/agentchat/contrib/math_user_proxy_agent.py +11 -11
- autogen/agentchat/contrib/multimodal_conversable_agent.py +8 -8
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +6 -5
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +22 -26
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +14 -17
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +27 -37
- autogen/agentchat/contrib/rag/query_engine.py +7 -5
- autogen/agentchat/contrib/retrieve_assistant_agent.py +5 -5
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +8 -7
- autogen/agentchat/contrib/society_of_mind_agent.py +15 -14
- autogen/agentchat/contrib/swarm_agent.py +76 -98
- autogen/agentchat/contrib/text_analyzer_agent.py +7 -7
- autogen/agentchat/contrib/vectordb/base.py +10 -18
- autogen/agentchat/contrib/vectordb/chromadb.py +2 -1
- autogen/agentchat/contrib/vectordb/couchbase.py +18 -20
- autogen/agentchat/contrib/vectordb/mongodb.py +6 -5
- autogen/agentchat/contrib/vectordb/pgvectordb.py +40 -41
- autogen/agentchat/contrib/vectordb/qdrant.py +5 -5
- autogen/agentchat/contrib/web_surfer.py +20 -19
- autogen/agentchat/conversable_agent.py +292 -290
- autogen/agentchat/group/context_str.py +1 -3
- autogen/agentchat/group/context_variables.py +15 -25
- autogen/agentchat/group/group_tool_executor.py +10 -10
- autogen/agentchat/group/group_utils.py +15 -15
- autogen/agentchat/group/guardrails.py +7 -7
- autogen/agentchat/group/handoffs.py +19 -36
- autogen/agentchat/group/multi_agent_chat.py +7 -7
- autogen/agentchat/group/on_condition.py +4 -7
- autogen/agentchat/group/on_context_condition.py +4 -7
- autogen/agentchat/group/patterns/auto.py +8 -7
- autogen/agentchat/group/patterns/manual.py +7 -6
- autogen/agentchat/group/patterns/pattern.py +13 -12
- autogen/agentchat/group/patterns/random.py +3 -3
- autogen/agentchat/group/patterns/round_robin.py +3 -3
- autogen/agentchat/group/reply_result.py +2 -4
- autogen/agentchat/group/speaker_selection_result.py +5 -5
- autogen/agentchat/group/targets/group_chat_target.py +7 -6
- autogen/agentchat/group/targets/group_manager_target.py +4 -4
- autogen/agentchat/group/targets/transition_target.py +2 -1
- autogen/agentchat/groupchat.py +58 -61
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +4 -4
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +4 -4
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +7 -7
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +8 -8
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +6 -6
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +10 -9
- autogen/agentchat/realtime/experimental/realtime_agent.py +10 -9
- autogen/agentchat/realtime/experimental/realtime_observer.py +3 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +44 -44
- autogen/agentchat/user_proxy_agent.py +10 -9
- autogen/agentchat/utils.py +3 -3
- autogen/agents/contrib/time/time_reply_agent.py +6 -5
- autogen/agents/contrib/time/time_tool_agent.py +2 -1
- autogen/agents/experimental/deep_research/deep_research.py +3 -3
- autogen/agents/experimental/discord/discord.py +2 -2
- autogen/agents/experimental/document_agent/chroma_query_engine.py +29 -44
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +9 -14
- autogen/agents/experimental/document_agent/document_agent.py +15 -16
- autogen/agents/experimental/document_agent/document_conditions.py +3 -3
- autogen/agents/experimental/document_agent/document_utils.py +5 -9
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +14 -20
- autogen/agents/experimental/document_agent/parser_utils.py +4 -4
- autogen/agents/experimental/document_agent/url_utils.py +14 -23
- autogen/agents/experimental/reasoning/reasoning_agent.py +33 -33
- autogen/agents/experimental/slack/slack.py +2 -2
- autogen/agents/experimental/telegram/telegram.py +2 -3
- autogen/agents/experimental/websurfer/websurfer.py +4 -4
- autogen/agents/experimental/wikipedia/wikipedia.py +5 -7
- autogen/browser_utils.py +8 -8
- autogen/cache/abstract_cache_base.py +5 -5
- autogen/cache/cache.py +12 -12
- autogen/cache/cache_factory.py +4 -4
- autogen/cache/cosmos_db_cache.py +9 -9
- autogen/cache/disk_cache.py +6 -6
- autogen/cache/in_memory_cache.py +4 -4
- autogen/cache/redis_cache.py +4 -4
- autogen/code_utils.py +18 -18
- autogen/coding/base.py +6 -6
- autogen/coding/docker_commandline_code_executor.py +9 -9
- autogen/coding/func_with_reqs.py +7 -6
- autogen/coding/jupyter/base.py +3 -3
- autogen/coding/jupyter/docker_jupyter_server.py +3 -4
- autogen/coding/jupyter/import_utils.py +3 -3
- autogen/coding/jupyter/jupyter_client.py +5 -5
- autogen/coding/jupyter/jupyter_code_executor.py +3 -4
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +8 -7
- autogen/coding/markdown_code_extractor.py +1 -2
- autogen/coding/utils.py +1 -2
- autogen/doc_utils.py +3 -2
- autogen/environments/docker_python_environment.py +19 -29
- autogen/environments/python_environment.py +8 -17
- autogen/environments/system_python_environment.py +3 -4
- autogen/environments/venv_python_environment.py +8 -12
- autogen/environments/working_directory.py +1 -2
- autogen/events/agent_events.py +106 -109
- autogen/events/base_event.py +6 -5
- autogen/events/client_events.py +15 -14
- autogen/events/helpers.py +1 -1
- autogen/events/print_event.py +4 -5
- autogen/fast_depends/_compat.py +10 -15
- autogen/fast_depends/core/build.py +17 -36
- autogen/fast_depends/core/model.py +64 -113
- autogen/fast_depends/dependencies/model.py +2 -1
- autogen/fast_depends/dependencies/provider.py +3 -2
- autogen/fast_depends/library/model.py +4 -4
- autogen/fast_depends/schema.py +7 -7
- autogen/fast_depends/use.py +17 -25
- autogen/fast_depends/utils.py +10 -30
- autogen/formatting_utils.py +6 -6
- autogen/graph_utils.py +1 -4
- autogen/import_utils.py +13 -13
- autogen/interop/crewai/crewai.py +2 -2
- autogen/interop/interoperable.py +2 -2
- autogen/interop/langchain/langchain_chat_model_factory.py +3 -2
- autogen/interop/langchain/langchain_tool.py +2 -6
- autogen/interop/litellm/litellm_config_factory.py +6 -7
- autogen/interop/pydantic_ai/pydantic_ai.py +4 -7
- autogen/interop/registry.py +2 -1
- autogen/io/base.py +5 -5
- autogen/io/run_response.py +33 -32
- autogen/io/websockets.py +6 -5
- autogen/json_utils.py +1 -2
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +58 -0
- autogen/llm_config/config.py +384 -0
- autogen/llm_config/entry.py +154 -0
- autogen/logger/base_logger.py +4 -3
- autogen/logger/file_logger.py +2 -1
- autogen/logger/logger_factory.py +2 -2
- autogen/logger/logger_utils.py +2 -2
- autogen/logger/sqlite_logger.py +2 -1
- autogen/math_utils.py +4 -5
- autogen/mcp/__main__.py +6 -6
- autogen/mcp/helpers.py +4 -4
- autogen/mcp/mcp_client.py +170 -29
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +3 -4
- autogen/mcp/mcp_proxy/mcp_proxy.py +23 -26
- autogen/mcp/mcp_proxy/operation_grouping.py +4 -5
- autogen/mcp/mcp_proxy/operation_renaming.py +6 -10
- autogen/mcp/mcp_proxy/security.py +2 -3
- autogen/messages/agent_messages.py +96 -98
- autogen/messages/base_message.py +6 -5
- autogen/messages/client_messages.py +15 -14
- autogen/messages/print_message.py +4 -5
- autogen/oai/__init__.py +1 -2
- autogen/oai/anthropic.py +42 -41
- autogen/oai/bedrock.py +68 -57
- autogen/oai/cerebras.py +26 -25
- autogen/oai/client.py +113 -139
- autogen/oai/client_utils.py +3 -3
- autogen/oai/cohere.py +34 -11
- autogen/oai/gemini.py +39 -17
- autogen/oai/gemini_types.py +11 -12
- autogen/oai/groq.py +22 -10
- autogen/oai/mistral.py +17 -11
- autogen/oai/oai_models/__init__.py +14 -2
- autogen/oai/oai_models/_models.py +2 -2
- autogen/oai/oai_models/chat_completion.py +13 -14
- autogen/oai/oai_models/chat_completion_message.py +11 -9
- autogen/oai/oai_models/chat_completion_message_tool_call.py +26 -3
- autogen/oai/oai_models/chat_completion_token_logprob.py +3 -4
- autogen/oai/oai_models/completion_usage.py +8 -9
- autogen/oai/ollama.py +19 -9
- autogen/oai/openai_responses.py +40 -17
- autogen/oai/openai_utils.py +48 -38
- autogen/oai/together.py +29 -14
- autogen/retrieve_utils.py +6 -7
- autogen/runtime_logging.py +5 -4
- autogen/token_count_utils.py +7 -4
- autogen/tools/contrib/time/time.py +0 -1
- autogen/tools/dependency_injection.py +5 -6
- autogen/tools/experimental/browser_use/browser_use.py +10 -10
- autogen/tools/experimental/code_execution/python_code_execution.py +5 -7
- autogen/tools/experimental/crawl4ai/crawl4ai.py +12 -15
- autogen/tools/experimental/deep_research/deep_research.py +9 -8
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +5 -11
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +98 -115
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +1 -1
- autogen/tools/experimental/google/drive/drive_functions.py +4 -4
- autogen/tools/experimental/google/drive/toolkit.py +5 -5
- autogen/tools/experimental/google_search/google_search.py +5 -5
- autogen/tools/experimental/google_search/youtube_search.py +5 -5
- autogen/tools/experimental/messageplatform/discord/discord.py +8 -12
- autogen/tools/experimental/messageplatform/slack/slack.py +14 -20
- autogen/tools/experimental/messageplatform/telegram/telegram.py +8 -12
- autogen/tools/experimental/perplexity/perplexity_search.py +18 -29
- autogen/tools/experimental/reliable/reliable.py +68 -74
- autogen/tools/experimental/searxng/searxng_search.py +20 -19
- autogen/tools/experimental/tavily/tavily_search.py +12 -19
- autogen/tools/experimental/web_search_preview/web_search_preview.py +13 -7
- autogen/tools/experimental/wikipedia/wikipedia.py +7 -10
- autogen/tools/function_utils.py +7 -7
- autogen/tools/tool.py +8 -6
- autogen/types.py +2 -2
- autogen/version.py +1 -1
- ag2-0.9.7.dist-info/RECORD +0 -421
- autogen/llm_config.py +0 -385
- {ag2-0.9.7.dist-info → ag2-0.9.8.post1.dist-info}/WHEEL +0 -0
- {ag2-0.9.7.dist-info → ag2-0.9.8.post1.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.7.dist-info → ag2-0.9.8.post1.dist-info}/licenses/NOTICE.md +0 -0
autogen/io/run_response.py
CHANGED
|
@@ -7,7 +7,8 @@
|
|
|
7
7
|
|
|
8
8
|
import queue
|
|
9
9
|
from asyncio import Queue as AsyncQueue
|
|
10
|
-
from
|
|
10
|
+
from collections.abc import AsyncIterable, Iterable, Sequence
|
|
11
|
+
from typing import Any, Optional, Protocol
|
|
11
12
|
from uuid import UUID, uuid4
|
|
12
13
|
|
|
13
14
|
from pydantic import BaseModel, Field
|
|
@@ -46,7 +47,7 @@ class Usage(BaseModel):
|
|
|
46
47
|
|
|
47
48
|
class CostBreakdown(BaseModel):
|
|
48
49
|
total_cost: float
|
|
49
|
-
models:
|
|
50
|
+
models: dict[str, Usage] = Field(default_factory=dict)
|
|
50
51
|
|
|
51
52
|
@classmethod
|
|
52
53
|
def from_raw(cls, data: dict[str, Any]) -> "CostBreakdown":
|
|
@@ -79,18 +80,18 @@ class RunResponseProtocol(RunInfoProtocol, Protocol):
|
|
|
79
80
|
def messages(self) -> Iterable[Message]: ...
|
|
80
81
|
|
|
81
82
|
@property
|
|
82
|
-
def summary(self) ->
|
|
83
|
+
def summary(self) -> str | None: ...
|
|
83
84
|
|
|
84
85
|
@property
|
|
85
|
-
def context_variables(self) ->
|
|
86
|
+
def context_variables(self) -> ContextVariables | None: ...
|
|
86
87
|
|
|
87
88
|
@property
|
|
88
|
-
def last_speaker(self) ->
|
|
89
|
+
def last_speaker(self) -> str | None: ...
|
|
89
90
|
|
|
90
91
|
@property
|
|
91
|
-
def cost(self) ->
|
|
92
|
+
def cost(self) -> Cost | None: ...
|
|
92
93
|
|
|
93
|
-
def process(self, processor:
|
|
94
|
+
def process(self, processor: EventProcessorProtocol | None = None) -> None: ...
|
|
94
95
|
|
|
95
96
|
def set_ui_tools(self, tools: list[Tool]) -> None: ...
|
|
96
97
|
|
|
@@ -103,18 +104,18 @@ class AsyncRunResponseProtocol(RunInfoProtocol, Protocol):
|
|
|
103
104
|
async def messages(self) -> Iterable[Message]: ...
|
|
104
105
|
|
|
105
106
|
@property
|
|
106
|
-
async def summary(self) ->
|
|
107
|
+
async def summary(self) -> str | None: ...
|
|
107
108
|
|
|
108
109
|
@property
|
|
109
|
-
async def context_variables(self) ->
|
|
110
|
+
async def context_variables(self) -> ContextVariables | None: ...
|
|
110
111
|
|
|
111
112
|
@property
|
|
112
|
-
async def last_speaker(self) ->
|
|
113
|
+
async def last_speaker(self) -> str | None: ...
|
|
113
114
|
|
|
114
115
|
@property
|
|
115
|
-
async def cost(self) ->
|
|
116
|
+
async def cost(self) -> Cost | None: ...
|
|
116
117
|
|
|
117
|
-
async def process(self, processor:
|
|
118
|
+
async def process(self, processor: AsyncEventProcessorProtocol | None = None) -> None: ...
|
|
118
119
|
|
|
119
120
|
def set_ui_tools(self, tools: list[Tool]) -> None: ...
|
|
120
121
|
|
|
@@ -123,12 +124,12 @@ class RunResponse:
|
|
|
123
124
|
def __init__(self, iostream: ThreadIOStream, agents: list[Agent]):
|
|
124
125
|
self.iostream = iostream
|
|
125
126
|
self.agents = agents
|
|
126
|
-
self._summary:
|
|
127
|
+
self._summary: str | None = None
|
|
127
128
|
self._messages: Sequence[LLMMessageType] = []
|
|
128
129
|
self._uuid = uuid4()
|
|
129
|
-
self._context_variables:
|
|
130
|
-
self._last_speaker:
|
|
131
|
-
self._cost:
|
|
130
|
+
self._context_variables: ContextVariables | None = None
|
|
131
|
+
self._last_speaker: str | None = None
|
|
132
|
+
self._cost: Cost | None = None
|
|
132
133
|
|
|
133
134
|
def _queue_generator(self, q: queue.Queue) -> Iterable[BaseEvent]: # type: ignore[type-arg]
|
|
134
135
|
"""A generator to yield items from the queue until the termination message is found."""
|
|
@@ -164,7 +165,7 @@ class RunResponse:
|
|
|
164
165
|
return self._messages
|
|
165
166
|
|
|
166
167
|
@property
|
|
167
|
-
def summary(self) ->
|
|
168
|
+
def summary(self) -> str | None:
|
|
168
169
|
return self._summary
|
|
169
170
|
|
|
170
171
|
@property
|
|
@@ -176,25 +177,25 @@ class RunResponse:
|
|
|
176
177
|
return self._uuid
|
|
177
178
|
|
|
178
179
|
@property
|
|
179
|
-
def context_variables(self) ->
|
|
180
|
+
def context_variables(self) -> ContextVariables | None:
|
|
180
181
|
return self._context_variables
|
|
181
182
|
|
|
182
183
|
@property
|
|
183
|
-
def last_speaker(self) ->
|
|
184
|
+
def last_speaker(self) -> str | None:
|
|
184
185
|
return self._last_speaker
|
|
185
186
|
|
|
186
187
|
@property
|
|
187
|
-
def cost(self) ->
|
|
188
|
+
def cost(self) -> Cost | None:
|
|
188
189
|
return self._cost
|
|
189
190
|
|
|
190
191
|
@cost.setter
|
|
191
|
-
def cost(self, value:
|
|
192
|
+
def cost(self, value: Cost | dict[str, Any]) -> None:
|
|
192
193
|
if isinstance(value, dict):
|
|
193
194
|
self._cost = Cost.from_raw(value)
|
|
194
195
|
else:
|
|
195
196
|
self._cost = value
|
|
196
197
|
|
|
197
|
-
def process(self, processor:
|
|
198
|
+
def process(self, processor: EventProcessorProtocol | None = None) -> None:
|
|
198
199
|
processor = processor or ConsoleEventProcessor()
|
|
199
200
|
processor.process(self)
|
|
200
201
|
|
|
@@ -208,12 +209,12 @@ class AsyncRunResponse:
|
|
|
208
209
|
def __init__(self, iostream: AsyncThreadIOStream, agents: list[Agent]):
|
|
209
210
|
self.iostream = iostream
|
|
210
211
|
self.agents = agents
|
|
211
|
-
self._summary:
|
|
212
|
+
self._summary: str | None = None
|
|
212
213
|
self._messages: Sequence[LLMMessageType] = []
|
|
213
214
|
self._uuid = uuid4()
|
|
214
|
-
self._context_variables:
|
|
215
|
-
self._last_speaker:
|
|
216
|
-
self._cost:
|
|
215
|
+
self._context_variables: ContextVariables | None = None
|
|
216
|
+
self._last_speaker: str | None = None
|
|
217
|
+
self._cost: Cost | None = None
|
|
217
218
|
|
|
218
219
|
async def _queue_generator(self, q: AsyncQueue[Any]) -> AsyncIterable[BaseEvent]: # type: ignore[type-arg]
|
|
219
220
|
"""A generator to yield items from the queue until the termination message is found."""
|
|
@@ -253,7 +254,7 @@ class AsyncRunResponse:
|
|
|
253
254
|
return self._messages
|
|
254
255
|
|
|
255
256
|
@property
|
|
256
|
-
async def summary(self) ->
|
|
257
|
+
async def summary(self) -> str | None:
|
|
257
258
|
return self._summary
|
|
258
259
|
|
|
259
260
|
@property
|
|
@@ -265,25 +266,25 @@ class AsyncRunResponse:
|
|
|
265
266
|
return self._uuid
|
|
266
267
|
|
|
267
268
|
@property
|
|
268
|
-
async def context_variables(self) ->
|
|
269
|
+
async def context_variables(self) -> ContextVariables | None:
|
|
269
270
|
return self._context_variables
|
|
270
271
|
|
|
271
272
|
@property
|
|
272
|
-
async def last_speaker(self) ->
|
|
273
|
+
async def last_speaker(self) -> str | None:
|
|
273
274
|
return self._last_speaker
|
|
274
275
|
|
|
275
276
|
@property
|
|
276
|
-
async def cost(self) ->
|
|
277
|
+
async def cost(self) -> Cost | None:
|
|
277
278
|
return self._cost
|
|
278
279
|
|
|
279
280
|
@cost.setter
|
|
280
|
-
def cost(self, value:
|
|
281
|
+
def cost(self, value: Cost | dict[str, Any]) -> None:
|
|
281
282
|
if isinstance(value, dict):
|
|
282
283
|
self._cost = Cost.from_raw(value)
|
|
283
284
|
else:
|
|
284
285
|
self._cost = value
|
|
285
286
|
|
|
286
|
-
async def process(self, processor:
|
|
287
|
+
async def process(self, processor: AsyncEventProcessorProtocol | None = None) -> None:
|
|
287
288
|
processor = processor or AsyncConsoleEventProcessor()
|
|
288
289
|
await processor.process(self)
|
|
289
290
|
|
autogen/io/websockets.py
CHANGED
|
@@ -7,10 +7,11 @@
|
|
|
7
7
|
import logging
|
|
8
8
|
import ssl
|
|
9
9
|
import threading
|
|
10
|
+
from collections.abc import Callable, Iterable, Iterator
|
|
10
11
|
from contextlib import contextmanager
|
|
11
12
|
from functools import partial
|
|
12
13
|
from time import sleep
|
|
13
|
-
from typing import Any,
|
|
14
|
+
from typing import Any, Protocol
|
|
14
15
|
|
|
15
16
|
from ..doc_utils import export_module
|
|
16
17
|
from ..events.base_event import BaseEvent
|
|
@@ -30,11 +31,11 @@ logger.setLevel(logging.INFO)
|
|
|
30
31
|
|
|
31
32
|
# The following type and protocols are used to define the ServerConnection and WebSocketServer classes
|
|
32
33
|
# if websockets is not installed, they would be untyped
|
|
33
|
-
Data =
|
|
34
|
+
Data = str | bytes
|
|
34
35
|
|
|
35
36
|
|
|
36
37
|
class ServerConnection(Protocol):
|
|
37
|
-
def send(self, message:
|
|
38
|
+
def send(self, message: Data | Iterable[Data]) -> None:
|
|
38
39
|
"""Send a message to the client.
|
|
39
40
|
|
|
40
41
|
Args:
|
|
@@ -43,7 +44,7 @@ class ServerConnection(Protocol):
|
|
|
43
44
|
"""
|
|
44
45
|
... # pragma: no cover
|
|
45
46
|
|
|
46
|
-
def recv(self, timeout:
|
|
47
|
+
def recv(self, timeout: float | None = None) -> Data:
|
|
47
48
|
"""Receive a message from the client.
|
|
48
49
|
|
|
49
50
|
Args:
|
|
@@ -114,7 +115,7 @@ class IOWebsockets(IOStream):
|
|
|
114
115
|
host: str = "127.0.0.1",
|
|
115
116
|
port: int = 8765,
|
|
116
117
|
on_connect: Callable[["IOWebsockets"], None],
|
|
117
|
-
ssl_context:
|
|
118
|
+
ssl_context: ssl.SSLContext | None = None,
|
|
118
119
|
**kwargs: Any,
|
|
119
120
|
) -> Iterator[str]:
|
|
120
121
|
"""Factory function to create a websocket input/output stream.
|
autogen/json_utils.py
CHANGED
|
@@ -14,8 +14,7 @@ __all__ = ["resolve_json_references"]
|
|
|
14
14
|
|
|
15
15
|
@require_optional_import("jsonschema", "gemini")
|
|
16
16
|
def resolve_json_references(schema: dict[str, Any]) -> dict[str, Any]:
|
|
17
|
-
"""
|
|
18
|
-
Resolve JSON references in the given schema.
|
|
17
|
+
"""Resolve JSON references in the given schema.
|
|
19
18
|
|
|
20
19
|
Args:
|
|
21
20
|
schema (dict): The JSON schema with references.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
from .client import ModelClient
|
|
6
|
+
from .config import LLMConfig
|
|
7
|
+
|
|
8
|
+
__all__ = (
|
|
9
|
+
"LLMConfig",
|
|
10
|
+
"ModelClient",
|
|
11
|
+
)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from typing import Any, Protocol
|
|
8
|
+
|
|
9
|
+
from ..doc_utils import export_module
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@export_module("autogen")
|
|
13
|
+
class ModelClient(Protocol):
|
|
14
|
+
"""A client class must implement the following methods:
|
|
15
|
+
- create must return a response object that implements the ModelClientResponseProtocol
|
|
16
|
+
- cost must return the cost of the response
|
|
17
|
+
- get_usage must return a dict with the following keys:
|
|
18
|
+
- prompt_tokens
|
|
19
|
+
- completion_tokens
|
|
20
|
+
- total_tokens
|
|
21
|
+
- cost
|
|
22
|
+
- model
|
|
23
|
+
|
|
24
|
+
This class is used to create a client that can be used by OpenAIWrapper.
|
|
25
|
+
The response returned from create must adhere to the ModelClientResponseProtocol but can be extended however needed.
|
|
26
|
+
The message_retrieval method must be implemented to return a list of str or a list of messages from the response.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
RESPONSE_USAGE_KEYS: list[str] = ["prompt_tokens", "completion_tokens", "total_tokens", "cost", "model"]
|
|
30
|
+
|
|
31
|
+
class ModelClientResponseProtocol(Protocol):
|
|
32
|
+
class Choice(Protocol):
|
|
33
|
+
class Message(Protocol):
|
|
34
|
+
content: str | dict[str, Any]
|
|
35
|
+
|
|
36
|
+
message: Message
|
|
37
|
+
|
|
38
|
+
choices: list[Choice]
|
|
39
|
+
model: str
|
|
40
|
+
|
|
41
|
+
def create(self, params: dict[str, Any]) -> ModelClientResponseProtocol: ... # pragma: no cover
|
|
42
|
+
|
|
43
|
+
def message_retrieval(
|
|
44
|
+
self, response: ModelClientResponseProtocol
|
|
45
|
+
) -> list[str] | list["ModelClient.ModelClientResponseProtocol.Choice.Message"]:
|
|
46
|
+
"""Retrieve and return a list of strings or a list of Choice.Message from the response.
|
|
47
|
+
|
|
48
|
+
NOTE: if a list of Choice.Message is returned, it currently needs to contain the fields of OpenAI's ChatCompletion Message object,
|
|
49
|
+
since that is expected for function or tool calling in the rest of the codebase at the moment, unless a custom agent is being used.
|
|
50
|
+
"""
|
|
51
|
+
... # pragma: no cover
|
|
52
|
+
|
|
53
|
+
def cost(self, response: ModelClientResponseProtocol) -> float: ... # pragma: no cover
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
def get_usage(response: ModelClientResponseProtocol) -> dict[str, Any]:
|
|
57
|
+
"""Return usage summary of the response using RESPONSE_USAGE_KEYS."""
|
|
58
|
+
... # pragma: no cover
|
|
@@ -0,0 +1,384 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import functools
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from collections.abc import Iterable
|
|
9
|
+
from contextvars import ContextVar
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Annotated, Any, Literal, TypeAlias
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
14
|
+
|
|
15
|
+
from autogen.oai.anthropic import AnthropicEntryDict, AnthropicLLMConfigEntry
|
|
16
|
+
from autogen.oai.bedrock import BedrockEntryDict, BedrockLLMConfigEntry
|
|
17
|
+
from autogen.oai.cerebras import CerebrasEntryDict, CerebrasLLMConfigEntry
|
|
18
|
+
from autogen.oai.client import (
|
|
19
|
+
AzureOpenAIEntryDict,
|
|
20
|
+
AzureOpenAILLMConfigEntry,
|
|
21
|
+
DeepSeekEntyDict,
|
|
22
|
+
DeepSeekLLMConfigEntry,
|
|
23
|
+
OpenAIEntryDict,
|
|
24
|
+
OpenAILLMConfigEntry,
|
|
25
|
+
OpenAIResponsesLLMConfigEntry,
|
|
26
|
+
)
|
|
27
|
+
from autogen.oai.cohere import CohereEntryDict, CohereLLMConfigEntry
|
|
28
|
+
from autogen.oai.gemini import GeminiEntryDict, GeminiLLMConfigEntry
|
|
29
|
+
from autogen.oai.groq import GroqEntryDict, GroqLLMConfigEntry
|
|
30
|
+
from autogen.oai.mistral import MistralEntryDict, MistralLLMConfigEntry
|
|
31
|
+
from autogen.oai.ollama import OllamaEntryDict, OllamaLLMConfigEntry
|
|
32
|
+
from autogen.oai.together import TogetherEntryDict, TogetherLLMConfigEntry
|
|
33
|
+
|
|
34
|
+
from ..doc_utils import export_module
|
|
35
|
+
from .entry import ApplicationConfig, LLMConfigEntry
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Meta class to allow LLMConfig.current and LLMConfig.default to be used as class properties
|
|
39
|
+
class MetaLLMConfig(type):
|
|
40
|
+
def __init__(cls, *args: Any, **kwargs: Any) -> None:
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def current(cls) -> "LLMConfig":
|
|
45
|
+
current_llm_config = LLMConfig.get_current_llm_config(llm_config=None)
|
|
46
|
+
if current_llm_config is None:
|
|
47
|
+
raise ValueError("No current LLMConfig set. Are you inside a context block?")
|
|
48
|
+
return current_llm_config # type: ignore[return-value]
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def default(cls) -> "LLMConfig":
|
|
52
|
+
return cls.current
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
ConfigItem: TypeAlias = (
|
|
56
|
+
LLMConfigEntry
|
|
57
|
+
| AnthropicEntryDict
|
|
58
|
+
| BedrockEntryDict
|
|
59
|
+
| CerebrasEntryDict
|
|
60
|
+
| CohereEntryDict
|
|
61
|
+
| AzureOpenAIEntryDict
|
|
62
|
+
| OpenAIEntryDict
|
|
63
|
+
| DeepSeekEntyDict
|
|
64
|
+
| MistralEntryDict
|
|
65
|
+
| GroqEntryDict
|
|
66
|
+
| OllamaEntryDict
|
|
67
|
+
| GeminiEntryDict
|
|
68
|
+
| TogetherEntryDict
|
|
69
|
+
| dict[str, Any]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@export_module("autogen")
|
|
74
|
+
class LLMConfig(metaclass=MetaLLMConfig):
|
|
75
|
+
_current_llm_config: ContextVar["LLMConfig"] = ContextVar("current_llm_config")
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
*,
|
|
80
|
+
top_p: float | None = None,
|
|
81
|
+
temperature: float | None = None,
|
|
82
|
+
max_tokens: int | None = None,
|
|
83
|
+
config_list: Iterable[ConfigItem] | dict[str, Any] = (),
|
|
84
|
+
check_every_ms: int | None = None,
|
|
85
|
+
allow_format_str_template: bool | None = None,
|
|
86
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None = None,
|
|
87
|
+
timeout: int | None = None,
|
|
88
|
+
seed: int | None = None,
|
|
89
|
+
cache_seed: int | None = None,
|
|
90
|
+
parallel_tool_calls: bool | None = None,
|
|
91
|
+
tools: Iterable[Any] = (),
|
|
92
|
+
functions: Iterable[Any] = (),
|
|
93
|
+
routing_method: Literal["fixed_order", "round_robin"] | None = None,
|
|
94
|
+
**kwargs: Any,
|
|
95
|
+
) -> None:
|
|
96
|
+
"""Initializes the LLMConfig object.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
config_list: A list of LLM configuration entries or dictionaries.
|
|
100
|
+
temperature: The sampling temperature for LLM generation.
|
|
101
|
+
check_every_ms: The interval (in milliseconds) to check for updates
|
|
102
|
+
allow_format_str_template: Whether to allow format string templates.
|
|
103
|
+
response_format: The format of the response (e.g., JSON, text).
|
|
104
|
+
timeout: The timeout for LLM requests in seconds.
|
|
105
|
+
seed: The random seed for reproducible results.
|
|
106
|
+
cache_seed: The seed for caching LLM responses.
|
|
107
|
+
parallel_tool_calls: Whether to enable parallel tool calls.
|
|
108
|
+
tools: A list of tools available for the LLM.
|
|
109
|
+
functions: A list of functions available for the LLM.
|
|
110
|
+
max_tokens: The maximum number of tokens to generate.
|
|
111
|
+
top_p: The nucleus sampling probability.
|
|
112
|
+
routing_method: The method used to route requests (e.g., fixed_order, round_robin).
|
|
113
|
+
**kwargs: Additional keyword arguments for future extensions.
|
|
114
|
+
|
|
115
|
+
Examples:
|
|
116
|
+
```python
|
|
117
|
+
# Example 1: create config from `kwargs` options
|
|
118
|
+
config = LLMConfig(
|
|
119
|
+
model="gpt-4o-mini",
|
|
120
|
+
api_key=os.environ["OPENAI_API_KEY"],
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Example 2: create config from `config_list` dictionary
|
|
124
|
+
config = LLMConfig(
|
|
125
|
+
config_list={
|
|
126
|
+
"model": "gpt-4o-mini",
|
|
127
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
128
|
+
}
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Example 3: create config from `config_list` list
|
|
132
|
+
config = LLMConfig(
|
|
133
|
+
config_list=[
|
|
134
|
+
{
|
|
135
|
+
"model": "gpt-4o-mini",
|
|
136
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
137
|
+
},
|
|
138
|
+
{
|
|
139
|
+
"model": "gpt-4",
|
|
140
|
+
"api_key": os.environ["OPENAI_API_KEY"],
|
|
141
|
+
},
|
|
142
|
+
]
|
|
143
|
+
)
|
|
144
|
+
```
|
|
145
|
+
"""
|
|
146
|
+
app_config = ApplicationConfig(
|
|
147
|
+
max_tokens=max_tokens,
|
|
148
|
+
top_p=top_p,
|
|
149
|
+
temperature=temperature,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
application_level_options = app_config.model_dump(exclude_none=True)
|
|
153
|
+
|
|
154
|
+
final_config_list: list[LLMConfigEntry | dict[str, Any]] = []
|
|
155
|
+
|
|
156
|
+
if isinstance(config_list, dict):
|
|
157
|
+
config_list = [config_list]
|
|
158
|
+
|
|
159
|
+
for c in filter(bool, (*config_list, kwargs)):
|
|
160
|
+
if isinstance(c, LLMConfigEntry):
|
|
161
|
+
final_config_list.append(c.apply_application_config(app_config))
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
else:
|
|
165
|
+
final_config_list.append({
|
|
166
|
+
"api_type": "openai", # default api_type
|
|
167
|
+
**application_level_options,
|
|
168
|
+
**c,
|
|
169
|
+
})
|
|
170
|
+
|
|
171
|
+
self._model = _LLMConfig(
|
|
172
|
+
**application_level_options,
|
|
173
|
+
config_list=final_config_list,
|
|
174
|
+
check_every_ms=check_every_ms,
|
|
175
|
+
seed=seed,
|
|
176
|
+
allow_format_str_template=allow_format_str_template,
|
|
177
|
+
response_format=response_format,
|
|
178
|
+
timeout=timeout,
|
|
179
|
+
cache_seed=cache_seed,
|
|
180
|
+
tools=tools or [],
|
|
181
|
+
functions=functions or [],
|
|
182
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
183
|
+
routing_method=routing_method,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# used by BaseModel to create instance variables
|
|
187
|
+
def __enter__(self) -> "LLMConfig":
|
|
188
|
+
# Store previous context and set self as current
|
|
189
|
+
self._token = LLMConfig._current_llm_config.set(self)
|
|
190
|
+
return self
|
|
191
|
+
|
|
192
|
+
def __exit__(self, exc_type: type[Exception], exc_val: Exception, exc_tb: Any) -> None:
|
|
193
|
+
LLMConfig._current_llm_config.reset(self._token)
|
|
194
|
+
|
|
195
|
+
@classmethod
|
|
196
|
+
def get_current_llm_config(cls, llm_config: "LLMConfig | None" = None) -> "LLMConfig | None":
|
|
197
|
+
if llm_config is not None:
|
|
198
|
+
return llm_config
|
|
199
|
+
try:
|
|
200
|
+
return (LLMConfig._current_llm_config.get()).copy()
|
|
201
|
+
except LookupError:
|
|
202
|
+
return None
|
|
203
|
+
|
|
204
|
+
def _satisfies_criteria(self, value: Any, criteria_values: Any) -> bool:
|
|
205
|
+
if value is None:
|
|
206
|
+
return False
|
|
207
|
+
|
|
208
|
+
if isinstance(value, list):
|
|
209
|
+
return bool(set(value) & set(criteria_values)) # Non-empty intersection
|
|
210
|
+
else:
|
|
211
|
+
return value in criteria_values
|
|
212
|
+
|
|
213
|
+
@classmethod
|
|
214
|
+
def from_json(
|
|
215
|
+
cls,
|
|
216
|
+
*,
|
|
217
|
+
env: str | None = None,
|
|
218
|
+
path: str | Path | None = None,
|
|
219
|
+
file_location: str | None = None,
|
|
220
|
+
**kwargs: Any,
|
|
221
|
+
) -> "LLMConfig":
|
|
222
|
+
from autogen.oai.openai_utils import config_list_from_json
|
|
223
|
+
|
|
224
|
+
if env is None and path is None:
|
|
225
|
+
raise ValueError("Either 'env' or 'path' must be provided")
|
|
226
|
+
if env is not None and path is not None:
|
|
227
|
+
raise ValueError("Only one of 'env' or 'path' can be provided")
|
|
228
|
+
|
|
229
|
+
config_list = config_list_from_json(
|
|
230
|
+
env_or_file=env if env is not None else str(path), file_location=file_location
|
|
231
|
+
)
|
|
232
|
+
return LLMConfig(config_list=config_list, **kwargs)
|
|
233
|
+
|
|
234
|
+
def where(self, *, exclude: bool = False, **kwargs: Any) -> "LLMConfig":
|
|
235
|
+
from autogen.oai.openai_utils import filter_config
|
|
236
|
+
|
|
237
|
+
filtered_config_list = filter_config(config_list=self.config_list, filter_dict=kwargs, exclude=exclude)
|
|
238
|
+
if len(filtered_config_list) == 0:
|
|
239
|
+
raise ValueError(f"No config found that satisfies the filter criteria: {kwargs}")
|
|
240
|
+
|
|
241
|
+
kwargs = self.model_dump()
|
|
242
|
+
kwargs["config_list"] = filtered_config_list
|
|
243
|
+
|
|
244
|
+
return LLMConfig(**kwargs)
|
|
245
|
+
|
|
246
|
+
# @functools.wraps(BaseModel.model_dump)
|
|
247
|
+
def model_dump(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> dict[str, Any]:
|
|
248
|
+
d = self._model.model_dump(*args, exclude_none=exclude_none, **kwargs)
|
|
249
|
+
return {k: v for k, v in d.items() if not (isinstance(v, list) and len(v) == 0)}
|
|
250
|
+
|
|
251
|
+
# @functools.wraps(BaseModel.model_dump_json)
|
|
252
|
+
def model_dump_json(self, *args: Any, exclude_none: bool = True, **kwargs: Any) -> str:
|
|
253
|
+
# return self._model.model_dump_json(*args, exclude_none=exclude_none, **kwargs)
|
|
254
|
+
d = self.model_dump(*args, exclude_none=exclude_none, **kwargs)
|
|
255
|
+
return json.dumps(d)
|
|
256
|
+
|
|
257
|
+
# @functools.wraps(BaseModel.model_validate)
|
|
258
|
+
def model_validate(self, *args: Any, **kwargs: Any) -> Any:
|
|
259
|
+
return self._model.model_validate(*args, **kwargs)
|
|
260
|
+
|
|
261
|
+
@functools.wraps(BaseModel.model_validate_json)
|
|
262
|
+
def model_validate_json(self, *args: Any, **kwargs: Any) -> Any:
|
|
263
|
+
return self._model.model_validate_json(*args, **kwargs)
|
|
264
|
+
|
|
265
|
+
@functools.wraps(BaseModel.model_validate_strings)
|
|
266
|
+
def model_validate_strings(self, *args: Any, **kwargs: Any) -> Any:
|
|
267
|
+
return self._model.model_validate_strings(*args, **kwargs)
|
|
268
|
+
|
|
269
|
+
def __eq__(self, value: Any) -> bool:
|
|
270
|
+
if not isinstance(value, LLMConfig):
|
|
271
|
+
return NotImplemented
|
|
272
|
+
return self._model == value._model
|
|
273
|
+
|
|
274
|
+
def _getattr(self, o: object, name: str) -> Any:
|
|
275
|
+
val = getattr(o, name)
|
|
276
|
+
return val
|
|
277
|
+
|
|
278
|
+
def get(self, key: str, default: Any | None = None) -> Any:
|
|
279
|
+
val = getattr(self._model, key, default)
|
|
280
|
+
return val
|
|
281
|
+
|
|
282
|
+
def __getitem__(self, key: str) -> Any:
|
|
283
|
+
try:
|
|
284
|
+
return self._getattr(self._model, key)
|
|
285
|
+
except AttributeError:
|
|
286
|
+
raise KeyError(f"Key '{key}' not found in {self.__class__.__name__}")
|
|
287
|
+
|
|
288
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
289
|
+
try:
|
|
290
|
+
setattr(self._model, key, value)
|
|
291
|
+
except ValueError:
|
|
292
|
+
raise ValueError(f"'{self.__class__.__name__}' object has no field '{key}'")
|
|
293
|
+
|
|
294
|
+
def __getattr__(self, name: Any) -> Any:
|
|
295
|
+
try:
|
|
296
|
+
return self._getattr(self._model, name)
|
|
297
|
+
except AttributeError:
|
|
298
|
+
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
|
|
299
|
+
|
|
300
|
+
def __setattr__(self, name: str, value: Any) -> None:
|
|
301
|
+
if name == "_model":
|
|
302
|
+
object.__setattr__(self, name, value)
|
|
303
|
+
else:
|
|
304
|
+
setattr(self._model, name, value)
|
|
305
|
+
|
|
306
|
+
def __contains__(self, key: str) -> bool:
|
|
307
|
+
return hasattr(self._model, key)
|
|
308
|
+
|
|
309
|
+
def __repr__(self) -> str:
|
|
310
|
+
d = self.model_dump()
|
|
311
|
+
r = [f"{k}={repr(v)}" for k, v in d.items()]
|
|
312
|
+
|
|
313
|
+
s = f"LLMConfig({', '.join(r)})"
|
|
314
|
+
# Replace any keys ending with 'key' or 'token' values with stars for security
|
|
315
|
+
s = re.sub(
|
|
316
|
+
r"(['\"])(\w*(key|token))\1:\s*(['\"])([^'\"]*)(?:\4)", r"\1\2\1: \4**********\4", s, flags=re.IGNORECASE
|
|
317
|
+
)
|
|
318
|
+
return s
|
|
319
|
+
|
|
320
|
+
def __copy__(self) -> "LLMConfig":
|
|
321
|
+
return LLMConfig(**self.model_dump())
|
|
322
|
+
|
|
323
|
+
def __deepcopy__(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
|
|
324
|
+
return self.__copy__()
|
|
325
|
+
|
|
326
|
+
def copy(self) -> "LLMConfig":
|
|
327
|
+
return self.__copy__()
|
|
328
|
+
|
|
329
|
+
def deepcopy(self, memo: dict[int, Any] | None = None) -> "LLMConfig":
|
|
330
|
+
return self.__deepcopy__(memo)
|
|
331
|
+
|
|
332
|
+
def __str__(self) -> str:
|
|
333
|
+
return repr(self)
|
|
334
|
+
|
|
335
|
+
def items(self) -> Iterable[tuple[str, Any]]:
|
|
336
|
+
d = self.model_dump()
|
|
337
|
+
return d.items()
|
|
338
|
+
|
|
339
|
+
def keys(self) -> Iterable[str]:
|
|
340
|
+
d = self.model_dump()
|
|
341
|
+
return d.keys()
|
|
342
|
+
|
|
343
|
+
def values(self) -> Iterable[Any]:
|
|
344
|
+
d = self.model_dump()
|
|
345
|
+
return d.values()
|
|
346
|
+
|
|
347
|
+
_base_model_classes: dict[tuple[type["LLMConfigEntry"], ...], type[BaseModel]] = {}
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
class _LLMConfig(ApplicationConfig):
|
|
351
|
+
check_every_ms: int | None
|
|
352
|
+
seed: int | None
|
|
353
|
+
allow_format_str_template: bool | None
|
|
354
|
+
response_format: str | dict[str, Any] | BaseModel | type[BaseModel] | None
|
|
355
|
+
timeout: int | None
|
|
356
|
+
cache_seed: int | None
|
|
357
|
+
parallel_tool_calls: bool | None
|
|
358
|
+
|
|
359
|
+
tools: list[Any]
|
|
360
|
+
functions: list[Any]
|
|
361
|
+
|
|
362
|
+
config_list: list[ # type: ignore[valid-type]
|
|
363
|
+
Annotated[
|
|
364
|
+
AnthropicLLMConfigEntry
|
|
365
|
+
| CerebrasLLMConfigEntry
|
|
366
|
+
| BedrockLLMConfigEntry
|
|
367
|
+
| AzureOpenAILLMConfigEntry
|
|
368
|
+
| DeepSeekLLMConfigEntry
|
|
369
|
+
| OpenAILLMConfigEntry
|
|
370
|
+
| OpenAIResponsesLLMConfigEntry
|
|
371
|
+
| CohereLLMConfigEntry
|
|
372
|
+
| GeminiLLMConfigEntry
|
|
373
|
+
| GroqLLMConfigEntry
|
|
374
|
+
| MistralLLMConfigEntry
|
|
375
|
+
| OllamaLLMConfigEntry
|
|
376
|
+
| TogetherLLMConfigEntry,
|
|
377
|
+
Field(discriminator="api_type"),
|
|
378
|
+
],
|
|
379
|
+
] = Field(..., min_length=1)
|
|
380
|
+
|
|
381
|
+
routing_method: Literal["fixed_order", "round_robin"] | None
|
|
382
|
+
|
|
383
|
+
# Following field is configuration for pydantic to disallow extra fields
|
|
384
|
+
model_config = ConfigDict(extra="forbid")
|