camel-ai 0.2.61__py3-none-any.whl → 0.2.64__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (68) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +75 -16
  3. camel/agents/mcp_agent.py +10 -6
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/crynux_config.py +94 -0
  6. camel/{data_collector → data_collectors}/alpaca_collector.py +1 -1
  7. camel/{data_collector → data_collectors}/sharegpt_collector.py +1 -1
  8. camel/interpreters/base.py +14 -1
  9. camel/interpreters/docker/Dockerfile +63 -7
  10. camel/interpreters/docker_interpreter.py +65 -7
  11. camel/interpreters/e2b_interpreter.py +23 -8
  12. camel/interpreters/internal_python_interpreter.py +30 -2
  13. camel/interpreters/ipython_interpreter.py +21 -3
  14. camel/interpreters/subprocess_interpreter.py +34 -2
  15. camel/memories/records.py +5 -3
  16. camel/models/__init__.py +2 -0
  17. camel/models/azure_openai_model.py +101 -25
  18. camel/models/cohere_model.py +65 -0
  19. camel/models/crynux_model.py +94 -0
  20. camel/models/deepseek_model.py +43 -1
  21. camel/models/gemini_model.py +50 -4
  22. camel/models/litellm_model.py +38 -0
  23. camel/models/mistral_model.py +66 -0
  24. camel/models/model_factory.py +10 -1
  25. camel/models/openai_compatible_model.py +81 -17
  26. camel/models/openai_model.py +86 -16
  27. camel/models/reka_model.py +69 -0
  28. camel/models/samba_model.py +69 -2
  29. camel/models/sglang_model.py +74 -2
  30. camel/models/watsonx_model.py +62 -0
  31. camel/retrievers/auto_retriever.py +20 -1
  32. camel/{runtime → runtimes}/daytona_runtime.py +1 -1
  33. camel/{runtime → runtimes}/docker_runtime.py +1 -1
  34. camel/{runtime → runtimes}/llm_guard_runtime.py +2 -2
  35. camel/{runtime → runtimes}/remote_http_runtime.py +1 -1
  36. camel/{runtime → runtimes}/ubuntu_docker_runtime.py +1 -1
  37. camel/societies/workforce/base.py +7 -3
  38. camel/societies/workforce/role_playing_worker.py +2 -2
  39. camel/societies/workforce/single_agent_worker.py +25 -1
  40. camel/societies/workforce/worker.py +5 -3
  41. camel/societies/workforce/workforce.py +409 -7
  42. camel/storages/__init__.py +2 -0
  43. camel/storages/vectordb_storages/__init__.py +2 -0
  44. camel/storages/vectordb_storages/weaviate.py +714 -0
  45. camel/tasks/task.py +19 -10
  46. camel/toolkits/__init__.py +2 -0
  47. camel/toolkits/code_execution.py +37 -8
  48. camel/toolkits/file_write_toolkit.py +4 -2
  49. camel/toolkits/mcp_toolkit.py +480 -733
  50. camel/toolkits/pptx_toolkit.py +777 -0
  51. camel/types/enums.py +56 -1
  52. camel/types/unified_model_type.py +5 -0
  53. camel/utils/__init__.py +16 -0
  54. camel/utils/langfuse.py +258 -0
  55. camel/utils/mcp_client.py +1046 -0
  56. {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/METADATA +9 -1
  57. {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/RECORD +68 -62
  58. /camel/{data_collector → data_collectors}/__init__.py +0 -0
  59. /camel/{data_collector → data_collectors}/base.py +0 -0
  60. /camel/{runtime → runtimes}/__init__.py +0 -0
  61. /camel/{runtime → runtimes}/api.py +0 -0
  62. /camel/{runtime → runtimes}/base.py +0 -0
  63. /camel/{runtime → runtimes}/configs.py +0 -0
  64. /camel/{runtime → runtimes}/utils/__init__.py +0 -0
  65. /camel/{runtime → runtimes}/utils/function_risk_toolkit.py +0 -0
  66. /camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +0 -0
  67. {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
  68. {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
@@ -28,7 +28,21 @@ from camel.types import (
28
28
  ChatCompletionChunk,
29
29
  ModelType,
30
30
  )
31
- from camel.utils import BaseTokenCounter, api_keys_required
31
+ from camel.utils import (
32
+ BaseTokenCounter,
33
+ api_keys_required,
34
+ get_current_agent_session_id,
35
+ update_langfuse_trace,
36
+ )
37
+
38
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from langfuse.decorators import observe
41
+ except ImportError:
42
+ from camel.utils import observe
43
+ else:
44
+ from camel.utils import observe
45
+
32
46
 
33
47
  logger = get_logger(__name__)
34
48
 
@@ -176,6 +190,7 @@ class DeepSeekModel(OpenAICompatibleModel):
176
190
  )
177
191
  return response
178
192
 
193
+ @observe()
179
194
  def _run(
180
195
  self,
181
196
  messages: List[OpenAIMessage],
@@ -193,6 +208,19 @@ class DeepSeekModel(OpenAICompatibleModel):
193
208
  `ChatCompletion` in the non-stream mode, or
194
209
  `Stream[ChatCompletionChunk]` in the stream mode.
195
210
  """
211
+
212
+ # Update Langfuse trace with current agent session and metadata
213
+ agent_session_id = get_current_agent_session_id()
214
+ if agent_session_id:
215
+ update_langfuse_trace(
216
+ session_id=agent_session_id,
217
+ metadata={
218
+ "agent_id": agent_session_id,
219
+ "model_type": str(self.model_type),
220
+ },
221
+ tags=["CAMEL-AI", str(self.model_type)],
222
+ )
223
+
196
224
  request_config = self._prepare_request(
197
225
  messages, response_format, tools
198
226
  )
@@ -205,6 +233,7 @@ class DeepSeekModel(OpenAICompatibleModel):
205
233
 
206
234
  return self._post_handle_response(response)
207
235
 
236
+ @observe()
208
237
  async def _arun(
209
238
  self,
210
239
  messages: List[OpenAIMessage],
@@ -222,6 +251,19 @@ class DeepSeekModel(OpenAICompatibleModel):
222
251
  `ChatCompletion` in the non-stream mode, or
223
252
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
224
253
  """
254
+
255
+ # Update Langfuse trace with current agent session and metadata
256
+ agent_session_id = get_current_agent_session_id()
257
+ if agent_session_id:
258
+ update_langfuse_trace(
259
+ session_id=agent_session_id,
260
+ metadata={
261
+ "agent_id": agent_session_id,
262
+ "model_type": str(self.model_type),
263
+ },
264
+ tags=["CAMEL-AI", str(self.model_type)],
265
+ )
266
+
225
267
  request_config = self._prepare_request(
226
268
  messages, response_format, tools
227
269
  )
@@ -28,8 +28,18 @@ from camel.types import (
28
28
  from camel.utils import (
29
29
  BaseTokenCounter,
30
30
  api_keys_required,
31
+ get_current_agent_session_id,
32
+ update_langfuse_trace,
31
33
  )
32
34
 
35
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
36
+ try:
37
+ from langfuse.decorators import observe
38
+ except ImportError:
39
+ from camel.utils import observe
40
+ else:
41
+ from camel.utils import observe
42
+
33
43
 
34
44
  class GeminiModel(OpenAICompatibleModel):
35
45
  r"""Gemini API in a unified OpenAICompatibleModel interface.
@@ -99,6 +109,7 @@ class GeminiModel(OpenAICompatibleModel):
99
109
  processed_messages.append(msg_copy)
100
110
  return processed_messages
101
111
 
112
+ @observe()
102
113
  def _run(
103
114
  self,
104
115
  messages: List[OpenAIMessage],
@@ -120,6 +131,19 @@ class GeminiModel(OpenAICompatibleModel):
120
131
  `ChatCompletion` in the non-stream mode, or
121
132
  `Stream[ChatCompletionChunk]` in the stream mode.
122
133
  """
134
+
135
+ # Update Langfuse trace with current agent session and metadata
136
+ agent_session_id = get_current_agent_session_id()
137
+ if agent_session_id:
138
+ update_langfuse_trace(
139
+ session_id=agent_session_id,
140
+ metadata={
141
+ "agent_id": agent_session_id,
142
+ "model_type": str(self.model_type),
143
+ },
144
+ tags=["CAMEL-AI", str(self.model_type)],
145
+ )
146
+
123
147
  response_format = response_format or self.model_config_dict.get(
124
148
  "response_format", None
125
149
  )
@@ -130,10 +154,15 @@ class GeminiModel(OpenAICompatibleModel):
130
154
  "Gemini does not support function calling with "
131
155
  "response format."
132
156
  )
133
- return self._request_parse(messages, response_format)
157
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
158
+ self._request_parse(messages, response_format)
159
+ )
134
160
  else:
135
- return self._request_chat_completion(messages, tools)
161
+ result = self._request_chat_completion(messages, tools)
162
+
163
+ return result
136
164
 
165
+ @observe()
137
166
  async def _arun(
138
167
  self,
139
168
  messages: List[OpenAIMessage],
@@ -155,6 +184,19 @@ class GeminiModel(OpenAICompatibleModel):
155
184
  `ChatCompletion` in the non-stream mode, or
156
185
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
157
186
  """
187
+
188
+ # Update Langfuse trace with current agent session and metadata
189
+ agent_session_id = get_current_agent_session_id()
190
+ if agent_session_id:
191
+ update_langfuse_trace(
192
+ session_id=agent_session_id,
193
+ metadata={
194
+ "agent_id": agent_session_id,
195
+ "model_type": str(self.model_type),
196
+ },
197
+ tags=["CAMEL-AI", str(self.model_type)],
198
+ )
199
+
158
200
  response_format = response_format or self.model_config_dict.get(
159
201
  "response_format", None
160
202
  )
@@ -165,9 +207,13 @@ class GeminiModel(OpenAICompatibleModel):
165
207
  "Gemini does not support function calling with "
166
208
  "response format."
167
209
  )
168
- return await self._arequest_parse(messages, response_format)
210
+ result: Union[
211
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
212
+ ] = await self._arequest_parse(messages, response_format)
169
213
  else:
170
- return await self._arequest_chat_completion(messages, tools)
214
+ result = await self._arequest_chat_completion(messages, tools)
215
+
216
+ return result
171
217
 
172
218
  def _request_chat_completion(
173
219
  self,
@@ -24,8 +24,19 @@ from camel.utils import (
24
24
  BaseTokenCounter,
25
25
  LiteLLMTokenCounter,
26
26
  dependencies_required,
27
+ get_current_agent_session_id,
28
+ update_current_observation,
29
+ update_langfuse_trace,
27
30
  )
28
31
 
32
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
33
+ try:
34
+ from langfuse.decorators import observe
35
+ except ImportError:
36
+ from camel.utils import observe
37
+ else:
38
+ from camel.utils import observe
39
+
29
40
 
30
41
  class LiteLLMModel(BaseModelBackend):
31
42
  r"""Constructor for LiteLLM backend with OpenAI compatibility.
@@ -117,6 +128,7 @@ class LiteLLMModel(BaseModelBackend):
117
128
  async def _arun(self) -> None: # type: ignore[override]
118
129
  raise NotImplementedError
119
130
 
131
+ @observe(as_type='generation')
120
132
  def _run(
121
133
  self,
122
134
  messages: List[OpenAIMessage],
@@ -132,6 +144,28 @@ class LiteLLMModel(BaseModelBackend):
132
144
  Returns:
133
145
  ChatCompletion
134
146
  """
147
+ update_current_observation(
148
+ input={
149
+ "messages": messages,
150
+ "tools": tools,
151
+ },
152
+ model=str(self.model_type),
153
+ model_parameters=self.model_config_dict,
154
+ )
155
+ # Update Langfuse trace with current agent session and metadata
156
+ agent_session_id = get_current_agent_session_id()
157
+ if agent_session_id:
158
+ update_langfuse_trace(
159
+ session_id=agent_session_id,
160
+ metadata={
161
+ "source": "camel",
162
+ "agent_id": agent_session_id,
163
+ "agent_type": "camel_chat_agent",
164
+ "model_type": str(self.model_type),
165
+ },
166
+ tags=["CAMEL-AI", str(self.model_type)],
167
+ )
168
+
135
169
  response = self.client(
136
170
  timeout=self._timeout,
137
171
  api_key=self._api_key,
@@ -141,6 +175,10 @@ class LiteLLMModel(BaseModelBackend):
141
175
  **self.model_config_dict,
142
176
  )
143
177
  response = self._convert_response_from_litellm_to_openai(response)
178
+
179
+ update_current_observation(
180
+ usage=response.usage,
181
+ )
144
182
  return response
145
183
 
146
184
  def check_model_config(self):
@@ -35,6 +35,9 @@ from camel.utils import (
35
35
  OpenAITokenCounter,
36
36
  api_keys_required,
37
37
  dependencies_required,
38
+ get_current_agent_session_id,
39
+ update_current_observation,
40
+ update_langfuse_trace,
38
41
  )
39
42
 
40
43
  logger = get_logger(__name__)
@@ -47,6 +50,14 @@ try:
47
50
  except (ImportError, AttributeError):
48
51
  LLMEvent = None
49
52
 
53
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
54
+ try:
55
+ from langfuse.decorators import observe
56
+ except ImportError:
57
+ from camel.utils import observe
58
+ else:
59
+ from camel.utils import observe
60
+
50
61
 
51
62
  class MistralModel(BaseModelBackend):
52
63
  r"""Mistral API in a unified BaseModelBackend interface.
@@ -232,6 +243,7 @@ class MistralModel(BaseModelBackend):
232
243
  )
233
244
  return self._token_counter
234
245
 
246
+ @observe(as_type="generation")
235
247
  async def _arun(
236
248
  self,
237
249
  messages: List[OpenAIMessage],
@@ -242,6 +254,29 @@ class MistralModel(BaseModelBackend):
242
254
  "Mistral does not support async inference, using sync "
243
255
  "inference instead."
244
256
  )
257
+ update_current_observation(
258
+ input={
259
+ "messages": messages,
260
+ "response_format": response_format,
261
+ "tools": tools,
262
+ },
263
+ model=str(self.model_type),
264
+ model_parameters=self.model_config_dict,
265
+ )
266
+ # Update Langfuse trace with current agent session and metadata
267
+ agent_session_id = get_current_agent_session_id()
268
+ if agent_session_id:
269
+ update_langfuse_trace(
270
+ session_id=agent_session_id,
271
+ metadata={
272
+ "source": "camel",
273
+ "agent_id": agent_session_id,
274
+ "agent_type": "camel_chat_agent",
275
+ "model_type": str(self.model_type),
276
+ },
277
+ tags=["CAMEL-AI", str(self.model_type)],
278
+ )
279
+
245
280
  request_config = self._prepare_request(
246
281
  messages, response_format, tools
247
282
  )
@@ -255,6 +290,10 @@ class MistralModel(BaseModelBackend):
255
290
 
256
291
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
257
292
 
293
+ update_current_observation(
294
+ usage=openai_response.usage,
295
+ )
296
+
258
297
  # Add AgentOps LLM Event tracking
259
298
  if LLMEvent:
260
299
  llm_event = LLMEvent(
@@ -271,6 +310,7 @@ class MistralModel(BaseModelBackend):
271
310
 
272
311
  return openai_response
273
312
 
313
+ @observe(as_type="generation")
274
314
  def _run(
275
315
  self,
276
316
  messages: List[OpenAIMessage],
@@ -290,6 +330,28 @@ class MistralModel(BaseModelBackend):
290
330
  Returns:
291
331
  ChatCompletion: The response from the model.
292
332
  """
333
+ update_current_observation(
334
+ input={
335
+ "messages": messages,
336
+ "tools": tools,
337
+ },
338
+ model=str(self.model_type),
339
+ model_parameters=self.model_config_dict,
340
+ )
341
+ # Update Langfuse trace with current agent session and metadata
342
+ agent_session_id = get_current_agent_session_id()
343
+ if agent_session_id:
344
+ update_langfuse_trace(
345
+ session_id=agent_session_id,
346
+ metadata={
347
+ "source": "camel",
348
+ "agent_id": agent_session_id,
349
+ "agent_type": "camel_chat_agent",
350
+ "model_type": str(self.model_type),
351
+ },
352
+ tags=["CAMEL-AI", str(self.model_type)],
353
+ )
354
+
293
355
  request_config = self._prepare_request(
294
356
  messages, response_format, tools
295
357
  )
@@ -303,6 +365,10 @@ class MistralModel(BaseModelBackend):
303
365
 
304
366
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
305
367
 
368
+ update_current_observation(
369
+ usage=openai_response.usage,
370
+ )
371
+
306
372
  # Add AgentOps LLM Event tracking
307
373
  if LLMEvent:
308
374
  llm_event = LLMEvent(
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import json
15
+ import os
15
16
  from typing import ClassVar, Dict, Optional, Type, Union
16
17
 
17
18
  from camel.models.aiml_model import AIMLModel
@@ -20,6 +21,7 @@ from camel.models.aws_bedrock_model import AWSBedrockModel
20
21
  from camel.models.azure_openai_model import AzureOpenAIModel
21
22
  from camel.models.base_model import BaseModelBackend
22
23
  from camel.models.cohere_model import CohereModel
24
+ from camel.models.crynux_model import CrynuxModel
23
25
  from camel.models.deepseek_model import DeepSeekModel
24
26
  from camel.models.gemini_model import GeminiModel
25
27
  from camel.models.groq_model import GroqModel
@@ -50,7 +52,7 @@ from camel.models.watsonx_model import WatsonXModel
50
52
  from camel.models.yi_model import YiModel
51
53
  from camel.models.zhipuai_model import ZhipuAIModel
52
54
  from camel.types import ModelPlatformType, ModelType, UnifiedModelType
53
- from camel.utils import BaseTokenCounter
55
+ from camel.utils import BaseTokenCounter, configure_langfuse
54
56
 
55
57
 
56
58
  class ModelFactory:
@@ -96,6 +98,7 @@ class ModelFactory:
96
98
  ModelPlatformType.MODELSCOPE: ModelScopeModel,
97
99
  ModelPlatformType.NOVITA: NovitaModel,
98
100
  ModelPlatformType.WATSONX: WatsonXModel,
101
+ ModelPlatformType.CRYNUX: CrynuxModel,
99
102
  }
100
103
 
101
104
  @staticmethod
@@ -142,6 +145,12 @@ class ModelFactory:
142
145
  Raises:
143
146
  ValueError: If there is no backend for the model.
144
147
  """
148
+
149
+ # Auto-configure Langfuse only if explicitly enabled
150
+ env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
151
+ if env_enabled_str and env_enabled_str.lower() == "true":
152
+ configure_langfuse()
153
+
145
154
  # Convert string to ModelPlatformType enum if needed
146
155
  if isinstance(model_platform, str):
147
156
  try:
@@ -31,8 +31,20 @@ from camel.types import (
31
31
  from camel.utils import (
32
32
  BaseTokenCounter,
33
33
  OpenAITokenCounter,
34
+ get_current_agent_session_id,
35
+ is_langfuse_available,
36
+ update_langfuse_trace,
34
37
  )
35
38
 
39
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
+ try:
41
+ from langfuse.decorators import observe
42
+ except ImportError:
43
+ from camel.utils import observe
44
+ else:
45
+ from camel.utils import observe
46
+
47
+
36
48
  logger = get_logger(__name__)
37
49
 
38
50
 
@@ -72,20 +84,37 @@ class OpenAICompatibleModel(BaseModelBackend):
72
84
  super().__init__(
73
85
  model_type, model_config_dict, api_key, url, token_counter, timeout
74
86
  )
75
- self._client = OpenAI(
76
- timeout=self._timeout,
77
- max_retries=3,
78
- api_key=self._api_key,
79
- base_url=self._url,
80
- )
81
-
82
- self._async_client = AsyncOpenAI(
83
- timeout=self._timeout,
84
- max_retries=3,
85
- api_key=self._api_key,
86
- base_url=self._url,
87
- )
87
+ if is_langfuse_available():
88
+ from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
89
+ from langfuse.openai import OpenAI as LangfuseOpenAI
90
+
91
+ self._client = LangfuseOpenAI(
92
+ timeout=self._timeout,
93
+ max_retries=3,
94
+ base_url=self._url,
95
+ api_key=self._api_key,
96
+ )
97
+ self._async_client = LangfuseAsyncOpenAI(
98
+ timeout=self._timeout,
99
+ max_retries=3,
100
+ base_url=self._url,
101
+ api_key=self._api_key,
102
+ )
103
+ else:
104
+ self._client = OpenAI(
105
+ timeout=self._timeout,
106
+ max_retries=3,
107
+ base_url=self._url,
108
+ api_key=self._api_key,
109
+ )
110
+ self._async_client = AsyncOpenAI(
111
+ timeout=self._timeout,
112
+ max_retries=3,
113
+ base_url=self._url,
114
+ api_key=self._api_key,
115
+ )
88
116
 
117
+ @observe()
89
118
  def _run(
90
119
  self,
91
120
  messages: List[OpenAIMessage],
@@ -107,14 +136,32 @@ class OpenAICompatibleModel(BaseModelBackend):
107
136
  `ChatCompletion` in the non-stream mode, or
108
137
  `Stream[ChatCompletionChunk]` in the stream mode.
109
138
  """
139
+
140
+ # Update Langfuse trace with current agent session and metadata
141
+ agent_session_id = get_current_agent_session_id()
142
+ if agent_session_id:
143
+ update_langfuse_trace(
144
+ session_id=agent_session_id,
145
+ metadata={
146
+ "agent_id": agent_session_id,
147
+ "model_type": str(self.model_type),
148
+ },
149
+ tags=["CAMEL-AI", str(self.model_type)],
150
+ )
151
+
110
152
  response_format = response_format or self.model_config_dict.get(
111
153
  "response_format", None
112
154
  )
113
155
  if response_format:
114
- return self._request_parse(messages, response_format, tools)
156
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
157
+ self._request_parse(messages, response_format, tools)
158
+ )
115
159
  else:
116
- return self._request_chat_completion(messages, tools)
160
+ result = self._request_chat_completion(messages, tools)
161
+
162
+ return result
117
163
 
164
+ @observe()
118
165
  async def _arun(
119
166
  self,
120
167
  messages: List[OpenAIMessage],
@@ -136,13 +183,30 @@ class OpenAICompatibleModel(BaseModelBackend):
136
183
  `ChatCompletion` in the non-stream mode, or
137
184
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
138
185
  """
186
+
187
+ # Update Langfuse trace with current agent session and metadata
188
+ agent_session_id = get_current_agent_session_id()
189
+ if agent_session_id:
190
+ update_langfuse_trace(
191
+ session_id=agent_session_id,
192
+ metadata={
193
+ "agent_id": agent_session_id,
194
+ "model_type": str(self.model_type),
195
+ },
196
+ tags=["CAMEL-AI", str(self.model_type)],
197
+ )
198
+
139
199
  response_format = response_format or self.model_config_dict.get(
140
200
  "response_format", None
141
201
  )
142
202
  if response_format:
143
- return await self._arequest_parse(messages, response_format, tools)
203
+ result: Union[
204
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
205
+ ] = await self._arequest_parse(messages, response_format, tools)
144
206
  else:
145
- return await self._arequest_chat_completion(messages, tools)
207
+ result = await self._arequest_chat_completion(messages, tools)
208
+
209
+ return result
146
210
 
147
211
  def _request_chat_completion(
148
212
  self,