camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -17,6 +17,10 @@ from json import JSONDecodeError
|
|
|
17
17
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
18
18
|
|
|
19
19
|
from openai import AsyncOpenAI, AsyncStream, BadRequestError, OpenAI, Stream
|
|
20
|
+
from openai.lib.streaming.chat import (
|
|
21
|
+
AsyncChatCompletionStreamManager,
|
|
22
|
+
ChatCompletionStreamManager,
|
|
23
|
+
)
|
|
20
24
|
from pydantic import BaseModel, ValidationError
|
|
21
25
|
|
|
22
26
|
from camel.logger import get_logger
|
|
@@ -41,6 +45,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
41
45
|
from langfuse.decorators import observe
|
|
42
46
|
except ImportError:
|
|
43
47
|
from camel.utils import observe
|
|
48
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
49
|
+
try:
|
|
50
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
51
|
+
except ImportError:
|
|
52
|
+
from camel.utils import observe
|
|
44
53
|
else:
|
|
45
54
|
from camel.utils import observe
|
|
46
55
|
|
|
@@ -69,9 +78,21 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
69
78
|
(default: :obj:`None`)
|
|
70
79
|
max_retries (int, optional): Maximum number of retries for API calls.
|
|
71
80
|
(default: :obj:`3`)
|
|
81
|
+
client (Optional[Any], optional): A custom synchronous
|
|
82
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
83
|
+
be used instead of creating a new one. Useful for RL frameworks
|
|
84
|
+
like AReaL or rLLM that provide OpenAI-compatible clients (e.g.,
|
|
85
|
+
ArealOpenAI). The client should implement the OpenAI client
|
|
86
|
+
interface with `.chat.completions.create()` and `.beta.chat.
|
|
87
|
+
completions.parse()` methods. (default: :obj:`None`)
|
|
88
|
+
async_client (Optional[Any], optional): A custom asynchronous
|
|
89
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
90
|
+
be used instead of creating a new one. The client should implement
|
|
91
|
+
the AsyncOpenAI client interface. (default: :obj:`None`)
|
|
72
92
|
**kwargs (Any): Additional arguments to pass to the
|
|
73
93
|
OpenAI client initialization. These can include parameters like
|
|
74
94
|
'organization', 'default_headers', 'http_client', etc.
|
|
95
|
+
Ignored if custom clients are provided.
|
|
75
96
|
"""
|
|
76
97
|
|
|
77
98
|
def __init__(
|
|
@@ -83,6 +104,8 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
83
104
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
84
105
|
timeout: Optional[float] = None,
|
|
85
106
|
max_retries: int = 3,
|
|
107
|
+
client: Optional[Any] = None,
|
|
108
|
+
async_client: Optional[Any] = None,
|
|
86
109
|
**kwargs: Any,
|
|
87
110
|
) -> None:
|
|
88
111
|
api_key = api_key or os.environ.get("OPENAI_COMPATIBILITY_API_KEY")
|
|
@@ -98,39 +121,55 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
98
121
|
timeout,
|
|
99
122
|
max_retries,
|
|
100
123
|
)
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
self._client =
|
|
106
|
-
timeout=self._timeout,
|
|
107
|
-
max_retries=max_retries,
|
|
108
|
-
base_url=self._url,
|
|
109
|
-
api_key=self._api_key,
|
|
110
|
-
**kwargs,
|
|
111
|
-
)
|
|
112
|
-
self._async_client = LangfuseAsyncOpenAI(
|
|
113
|
-
timeout=self._timeout,
|
|
114
|
-
max_retries=max_retries,
|
|
115
|
-
base_url=self._url,
|
|
116
|
-
api_key=self._api_key,
|
|
117
|
-
**kwargs,
|
|
118
|
-
)
|
|
124
|
+
|
|
125
|
+
# Use custom clients if provided, otherwise create new ones
|
|
126
|
+
if client is not None:
|
|
127
|
+
# Use the provided custom sync client
|
|
128
|
+
self._client = client
|
|
119
129
|
else:
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
130
|
+
# Create default sync client
|
|
131
|
+
if is_langfuse_available():
|
|
132
|
+
from langfuse.openai import OpenAI as LangfuseOpenAI
|
|
133
|
+
|
|
134
|
+
self._client = LangfuseOpenAI(
|
|
135
|
+
timeout=self._timeout,
|
|
136
|
+
max_retries=max_retries,
|
|
137
|
+
base_url=self._url,
|
|
138
|
+
api_key=self._api_key,
|
|
139
|
+
**kwargs,
|
|
140
|
+
)
|
|
141
|
+
else:
|
|
142
|
+
self._client = OpenAI(
|
|
143
|
+
timeout=self._timeout,
|
|
144
|
+
max_retries=max_retries,
|
|
145
|
+
base_url=self._url,
|
|
146
|
+
api_key=self._api_key,
|
|
147
|
+
**kwargs,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if async_client is not None:
|
|
151
|
+
# Use the provided custom async client
|
|
152
|
+
self._async_client = async_client
|
|
153
|
+
else:
|
|
154
|
+
# Create default async client
|
|
155
|
+
if is_langfuse_available():
|
|
156
|
+
from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
|
|
157
|
+
|
|
158
|
+
self._async_client = LangfuseAsyncOpenAI(
|
|
159
|
+
timeout=self._timeout,
|
|
160
|
+
max_retries=max_retries,
|
|
161
|
+
base_url=self._url,
|
|
162
|
+
api_key=self._api_key,
|
|
163
|
+
**kwargs,
|
|
164
|
+
)
|
|
165
|
+
else:
|
|
166
|
+
self._async_client = AsyncOpenAI(
|
|
167
|
+
timeout=self._timeout,
|
|
168
|
+
max_retries=max_retries,
|
|
169
|
+
base_url=self._url,
|
|
170
|
+
api_key=self._api_key,
|
|
171
|
+
**kwargs,
|
|
172
|
+
)
|
|
134
173
|
|
|
135
174
|
@observe()
|
|
136
175
|
def _run(
|
|
@@ -138,7 +177,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
138
177
|
messages: List[OpenAIMessage],
|
|
139
178
|
response_format: Optional[Type[BaseModel]] = None,
|
|
140
179
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
141
|
-
) -> Union[
|
|
180
|
+
) -> Union[
|
|
181
|
+
ChatCompletion,
|
|
182
|
+
Stream[ChatCompletionChunk],
|
|
183
|
+
ChatCompletionStreamManager[BaseModel],
|
|
184
|
+
]:
|
|
142
185
|
r"""Runs inference of OpenAI chat completion.
|
|
143
186
|
|
|
144
187
|
Args:
|
|
@@ -153,6 +196,8 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
153
196
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
154
197
|
`ChatCompletion` in the non-stream mode, or
|
|
155
198
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
199
|
+
`ChatCompletionStreamManager[BaseModel]` for
|
|
200
|
+
structured output streaming.
|
|
156
201
|
"""
|
|
157
202
|
|
|
158
203
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -170,10 +215,19 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
170
215
|
response_format = response_format or self.model_config_dict.get(
|
|
171
216
|
"response_format", None
|
|
172
217
|
)
|
|
218
|
+
|
|
219
|
+
# Check if streaming is enabled
|
|
220
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
221
|
+
|
|
173
222
|
if response_format:
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
223
|
+
if is_streaming:
|
|
224
|
+
# Use streaming parse for structured output
|
|
225
|
+
return self._request_stream_parse(
|
|
226
|
+
messages, response_format, tools
|
|
227
|
+
)
|
|
228
|
+
else:
|
|
229
|
+
# Use non-streaming parse for structured output
|
|
230
|
+
return self._request_parse(messages, response_format, tools)
|
|
177
231
|
else:
|
|
178
232
|
result = self._request_chat_completion(messages, tools)
|
|
179
233
|
|
|
@@ -185,7 +239,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
185
239
|
messages: List[OpenAIMessage],
|
|
186
240
|
response_format: Optional[Type[BaseModel]] = None,
|
|
187
241
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
188
|
-
) -> Union[
|
|
242
|
+
) -> Union[
|
|
243
|
+
ChatCompletion,
|
|
244
|
+
AsyncStream[ChatCompletionChunk],
|
|
245
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
246
|
+
]:
|
|
189
247
|
r"""Runs inference of OpenAI chat completion in async mode.
|
|
190
248
|
|
|
191
249
|
Args:
|
|
@@ -197,9 +255,12 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
197
255
|
use for the request.
|
|
198
256
|
|
|
199
257
|
Returns:
|
|
200
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
201
|
-
|
|
202
|
-
`
|
|
258
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
|
|
259
|
+
AsyncChatCompletionStreamManager[BaseModel]]:
|
|
260
|
+
`ChatCompletion` in the non-stream mode,
|
|
261
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode,
|
|
262
|
+
or `AsyncChatCompletionStreamManager[BaseModel]` for
|
|
263
|
+
structured output streaming.
|
|
203
264
|
"""
|
|
204
265
|
|
|
205
266
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -217,10 +278,21 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
217
278
|
response_format = response_format or self.model_config_dict.get(
|
|
218
279
|
"response_format", None
|
|
219
280
|
)
|
|
281
|
+
|
|
282
|
+
# Check if streaming is enabled
|
|
283
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
284
|
+
|
|
220
285
|
if response_format:
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
286
|
+
if is_streaming:
|
|
287
|
+
# Use streaming parse for structured output
|
|
288
|
+
return await self._arequest_stream_parse(
|
|
289
|
+
messages, response_format, tools
|
|
290
|
+
)
|
|
291
|
+
else:
|
|
292
|
+
# Use non-streaming parse for structured output
|
|
293
|
+
return await self._arequest_parse(
|
|
294
|
+
messages, response_format, tools
|
|
295
|
+
)
|
|
224
296
|
else:
|
|
225
297
|
result = await self._arequest_chat_completion(messages, tools)
|
|
226
298
|
|
|
@@ -336,6 +408,62 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
336
408
|
logger.error(f"Fallback attempt also failed: {e}")
|
|
337
409
|
raise
|
|
338
410
|
|
|
411
|
+
def _request_stream_parse(
|
|
412
|
+
self,
|
|
413
|
+
messages: List[OpenAIMessage],
|
|
414
|
+
response_format: Type[BaseModel],
|
|
415
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
416
|
+
) -> ChatCompletionStreamManager[BaseModel]:
|
|
417
|
+
r"""Request streaming structured output parsing.
|
|
418
|
+
|
|
419
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
420
|
+
"""
|
|
421
|
+
import copy
|
|
422
|
+
|
|
423
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
424
|
+
|
|
425
|
+
# Remove stream from config as it's handled by the stream method
|
|
426
|
+
request_config.pop("stream", None)
|
|
427
|
+
|
|
428
|
+
if tools is not None:
|
|
429
|
+
request_config["tools"] = tools
|
|
430
|
+
|
|
431
|
+
# Use the beta streaming API for structured outputs
|
|
432
|
+
return self._client.beta.chat.completions.stream(
|
|
433
|
+
messages=messages,
|
|
434
|
+
model=self.model_type,
|
|
435
|
+
response_format=response_format,
|
|
436
|
+
**request_config,
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
async def _arequest_stream_parse(
|
|
440
|
+
self,
|
|
441
|
+
messages: List[OpenAIMessage],
|
|
442
|
+
response_format: Type[BaseModel],
|
|
443
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
444
|
+
) -> AsyncChatCompletionStreamManager[BaseModel]:
|
|
445
|
+
r"""Request async streaming structured output parsing.
|
|
446
|
+
|
|
447
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
448
|
+
"""
|
|
449
|
+
import copy
|
|
450
|
+
|
|
451
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
452
|
+
|
|
453
|
+
# Remove stream from config as it's handled by the stream method
|
|
454
|
+
request_config.pop("stream", None)
|
|
455
|
+
|
|
456
|
+
if tools is not None:
|
|
457
|
+
request_config["tools"] = tools
|
|
458
|
+
|
|
459
|
+
# Use the beta streaming API for structured outputs
|
|
460
|
+
return self._async_client.beta.chat.completions.stream(
|
|
461
|
+
messages=messages,
|
|
462
|
+
model=self.model_type,
|
|
463
|
+
response_format=response_format,
|
|
464
|
+
**request_config,
|
|
465
|
+
)
|
|
466
|
+
|
|
339
467
|
@property
|
|
340
468
|
def token_counter(self) -> BaseTokenCounter:
|
|
341
469
|
r"""Initialize the token counter for the model backend.
|
|
@@ -358,6 +486,3 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
358
486
|
bool: Whether the model is in stream mode.
|
|
359
487
|
"""
|
|
360
488
|
return self.model_config_dict.get('stream', False)
|
|
361
|
-
|
|
362
|
-
def check_model_config(self):
|
|
363
|
-
pass
|