camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (110) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +52 -3
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/environments/__init__.py +16 -0
  18. camel/environments/base.py +503 -0
  19. camel/extractors/__init__.py +16 -0
  20. camel/extractors/base.py +263 -0
  21. camel/memories/agent_memories.py +16 -1
  22. camel/memories/blocks/chat_history_block.py +10 -2
  23. camel/memories/blocks/vectordb_block.py +1 -0
  24. camel/memories/context_creators/score_based.py +20 -3
  25. camel/memories/records.py +10 -0
  26. camel/messages/base.py +8 -8
  27. camel/models/__init__.py +2 -0
  28. camel/models/_utils.py +57 -0
  29. camel/models/aiml_model.py +48 -17
  30. camel/models/anthropic_model.py +41 -3
  31. camel/models/azure_openai_model.py +39 -3
  32. camel/models/base_audio_model.py +92 -0
  33. camel/models/base_model.py +88 -13
  34. camel/models/cohere_model.py +88 -11
  35. camel/models/deepseek_model.py +107 -45
  36. camel/models/fish_audio_model.py +18 -8
  37. camel/models/gemini_model.py +133 -15
  38. camel/models/groq_model.py +72 -10
  39. camel/models/internlm_model.py +14 -3
  40. camel/models/litellm_model.py +9 -2
  41. camel/models/mistral_model.py +42 -5
  42. camel/models/model_manager.py +57 -3
  43. camel/models/moonshot_model.py +33 -4
  44. camel/models/nemotron_model.py +32 -3
  45. camel/models/nvidia_model.py +43 -3
  46. camel/models/ollama_model.py +139 -17
  47. camel/models/openai_audio_models.py +87 -2
  48. camel/models/openai_compatible_model.py +37 -3
  49. camel/models/openai_model.py +158 -46
  50. camel/models/qwen_model.py +61 -4
  51. camel/models/reka_model.py +53 -3
  52. camel/models/samba_model.py +209 -4
  53. camel/models/sglang_model.py +153 -14
  54. camel/models/siliconflow_model.py +16 -3
  55. camel/models/stub_model.py +46 -4
  56. camel/models/togetherai_model.py +38 -3
  57. camel/models/vllm_model.py +37 -3
  58. camel/models/yi_model.py +36 -3
  59. camel/models/zhipuai_model.py +38 -3
  60. camel/retrievers/__init__.py +3 -0
  61. camel/retrievers/hybrid_retrival.py +237 -0
  62. camel/toolkits/__init__.py +15 -1
  63. camel/toolkits/arxiv_toolkit.py +2 -1
  64. camel/toolkits/ask_news_toolkit.py +4 -2
  65. camel/toolkits/audio_analysis_toolkit.py +238 -0
  66. camel/toolkits/base.py +22 -3
  67. camel/toolkits/code_execution.py +2 -0
  68. camel/toolkits/dappier_toolkit.py +2 -1
  69. camel/toolkits/data_commons_toolkit.py +38 -12
  70. camel/toolkits/excel_toolkit.py +172 -0
  71. camel/toolkits/function_tool.py +13 -0
  72. camel/toolkits/github_toolkit.py +5 -1
  73. camel/toolkits/google_maps_toolkit.py +2 -1
  74. camel/toolkits/google_scholar_toolkit.py +2 -0
  75. camel/toolkits/human_toolkit.py +0 -3
  76. camel/toolkits/image_analysis_toolkit.py +202 -0
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +2 -2
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/page_script.js +376 -0
  84. camel/toolkits/reddit_toolkit.py +11 -3
  85. camel/toolkits/retrieval_toolkit.py +6 -1
  86. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  87. camel/toolkits/stripe_toolkit.py +8 -2
  88. camel/toolkits/sympy_toolkit.py +6 -1
  89. camel/toolkits/video_analysis_toolkit.py +407 -0
  90. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  91. camel/toolkits/web_toolkit.py +1307 -0
  92. camel/toolkits/whatsapp_toolkit.py +3 -2
  93. camel/toolkits/zapier_toolkit.py +191 -0
  94. camel/types/__init__.py +2 -2
  95. camel/types/agents/__init__.py +16 -0
  96. camel/types/agents/tool_calling_record.py +52 -0
  97. camel/types/enums.py +3 -0
  98. camel/types/openai_types.py +16 -14
  99. camel/utils/__init__.py +2 -1
  100. camel/utils/async_func.py +2 -2
  101. camel/utils/commons.py +114 -1
  102. camel/verifiers/__init__.py +23 -0
  103. camel/verifiers/base.py +340 -0
  104. camel/verifiers/models.py +82 -0
  105. camel/verifiers/python_verifier.py +202 -0
  106. camel_ai-0.2.23.dist-info/METADATA +671 -0
  107. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
  108. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  109. camel_ai-0.2.22.dist-info/METADATA +0 -527
  110. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -12,7 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
+
17
+ from pydantic import BaseModel
16
18
 
17
19
  from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
18
20
  from camel.messages import OpenAIMessage
@@ -59,7 +61,7 @@ class AnthropicModel(BaseModelBackend):
59
61
  url: Optional[str] = None,
60
62
  token_counter: Optional[BaseTokenCounter] = None,
61
63
  ) -> None:
62
- from anthropic import Anthropic
64
+ from anthropic import Anthropic, AsyncAnthropic
63
65
 
64
66
  if model_config_dict is None:
65
67
  model_config_dict = AnthropicConfig().as_dict()
@@ -69,6 +71,9 @@ class AnthropicModel(BaseModelBackend):
69
71
  model_type, model_config_dict, api_key, url, token_counter
70
72
  )
71
73
  self.client = Anthropic(api_key=self._api_key, base_url=self._url)
74
+ self.async_client = AsyncAnthropic(
75
+ api_key=self._api_key, base_url=self._url
76
+ )
72
77
 
73
78
  def _convert_response_from_anthropic_to_openai(self, response):
74
79
  # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
@@ -102,9 +107,11 @@ class AnthropicModel(BaseModelBackend):
102
107
  self._token_counter = AnthropicTokenCounter(self.model_type)
103
108
  return self._token_counter
104
109
 
105
- def run(
110
+ def _run(
106
111
  self,
107
112
  messages: List[OpenAIMessage],
113
+ response_format: Optional[Type[BaseModel]] = None,
114
+ tools: Optional[List[Dict[str, Any]]] = None,
108
115
  ):
109
116
  r"""Run inference of Anthropic chat completion.
110
117
 
@@ -133,6 +140,37 @@ class AnthropicModel(BaseModelBackend):
133
140
 
134
141
  return response
135
142
 
143
+ async def _arun(
144
+ self,
145
+ messages: List[OpenAIMessage],
146
+ response_format: Optional[Type[BaseModel]] = None,
147
+ tools: Optional[List[Dict[str, Any]]] = None,
148
+ ) -> ChatCompletion:
149
+ r"""Run inference of Anthropic chat completion.
150
+
151
+ Args:
152
+ messages (List[OpenAIMessage]): Message list with the chat history
153
+ in OpenAI API format.
154
+
155
+ Returns:
156
+ ChatCompletion: Response in the OpenAI API format.
157
+ """
158
+ from anthropic import NOT_GIVEN
159
+
160
+ if messages[0]["role"] == "system":
161
+ sys_msg = str(messages.pop(0)["content"])
162
+ else:
163
+ sys_msg = NOT_GIVEN # type: ignore[assignment]
164
+ response = await self.async_client.messages.create(
165
+ model=self.model_type,
166
+ system=sys_msg,
167
+ messages=messages, # type: ignore[arg-type]
168
+ **self.model_config_dict,
169
+ )
170
+
171
+ # format response to openai format
172
+ return self._convert_response_from_anthropic_to_openai(response)
173
+
136
174
  def check_model_config(self):
137
175
  r"""Check whether the model configuration is valid for anthropic
138
176
  model backends.
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import AzureOpenAI, Stream
17
+ from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
20
21
  from camel.messages import OpenAIMessage
@@ -95,6 +96,15 @@ class AzureOpenAIModel(BaseModelBackend):
95
96
  max_retries=3,
96
97
  )
97
98
 
99
+ self._async_client = AsyncAzureOpenAI(
100
+ azure_endpoint=str(self._url),
101
+ azure_deployment=self.azure_deployment_name,
102
+ api_version=self.api_version,
103
+ api_key=self._api_key,
104
+ timeout=180,
105
+ max_retries=3,
106
+ )
107
+
98
108
  @property
99
109
  def token_counter(self) -> BaseTokenCounter:
100
110
  r"""Initialize the token counter for the model backend.
@@ -107,9 +117,11 @@ class AzureOpenAIModel(BaseModelBackend):
107
117
  self._token_counter = OpenAITokenCounter(self.model_type)
108
118
  return self._token_counter
109
119
 
110
- def run(
120
+ def _run(
111
121
  self,
112
122
  messages: List[OpenAIMessage],
123
+ response_format: Optional[Type[BaseModel]] = None,
124
+ tools: Optional[List[Dict[str, Any]]] = None,
113
125
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
114
126
  r"""Runs inference of Azure OpenAI chat completion.
115
127
 
@@ -129,6 +141,30 @@ class AzureOpenAIModel(BaseModelBackend):
129
141
  )
130
142
  return response
131
143
 
144
+ async def _arun(
145
+ self,
146
+ messages: List[OpenAIMessage],
147
+ response_format: Optional[Type[BaseModel]] = None,
148
+ tools: Optional[List[Dict[str, Any]]] = None,
149
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
150
+ r"""Runs inference of Azure OpenAI chat completion.
151
+
152
+ Args:
153
+ messages (List[OpenAIMessage]): Message list with the chat history
154
+ in OpenAI API format.
155
+
156
+ Returns:
157
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
158
+ `ChatCompletion` in the non-stream mode, or
159
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
160
+ """
161
+ response = await self._async_client.chat.completions.create(
162
+ messages=messages,
163
+ model=self.azure_deployment_name, # type:ignore[arg-type]
164
+ **self.model_config_dict,
165
+ )
166
+ return response
167
+
132
168
  def check_model_config(self):
133
169
  r"""Check whether the model configuration contains any
134
170
  unexpected arguments to Azure OpenAI API.
@@ -0,0 +1,92 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from abc import ABC, abstractmethod
17
+ from typing import Any, Optional
18
+
19
+
20
+ class BaseAudioModel(ABC):
21
+ r"""Base class for audio models providing Text-to-Speech (TTS) and
22
+ Speech-to-Text (STT) functionality.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ api_key: Optional[str] = None,
28
+ url: Optional[str] = None,
29
+ ) -> None:
30
+ r"""Initialize an instance of BaseAudioModel.
31
+
32
+ Args:
33
+ api_key (Optional[str]): API key for the audio service. If not
34
+ provided, will look for an environment variable specific to the
35
+ implementation.
36
+ url (Optional[str]): Base URL for the audio API. If not provided,
37
+ will use a default URL or look for an environment variable
38
+ specific to the implementation.
39
+ """
40
+ self._api_key = api_key
41
+ self._url = url
42
+
43
+ @abstractmethod
44
+ def text_to_speech(
45
+ self,
46
+ input: str,
47
+ *,
48
+ storage_path: str,
49
+ **kwargs: Any,
50
+ ) -> Any:
51
+ r"""Convert text to speech.
52
+
53
+ Args:
54
+ input (str): The text to be converted to speech.
55
+ storage_path (str): The local path to store the
56
+ generated speech file.
57
+ **kwargs (Any): Extra kwargs passed to the TTS API.
58
+
59
+ Returns:
60
+ Any: The response from the TTS API, which may vary by
61
+ implementation.
62
+ """
63
+ pass
64
+
65
+ @abstractmethod
66
+ def speech_to_text(
67
+ self,
68
+ audio_file_path: str,
69
+ **kwargs: Any,
70
+ ) -> str:
71
+ r"""Convert speech audio to text.
72
+
73
+ Args:
74
+ audio_file_path (str): The audio file path to transcribe.
75
+ **kwargs (Any): Extra keyword arguments passed to the
76
+ Speech-to-Text (STT) API.
77
+
78
+ Returns:
79
+ str: The transcribed text.
80
+ """
81
+ pass
82
+
83
+ def _ensure_directory_exists(self, file_path: str) -> None:
84
+ r"""Ensure the directory for the given file path exists.
85
+
86
+ Args:
87
+ file_path (str): The file path for which to ensure the directory
88
+ exists.
89
+ """
90
+ directory = os.path.dirname(file_path)
91
+ if directory and not os.path.exists(directory):
92
+ os.makedirs(directory)
@@ -14,9 +14,10 @@
14
14
  import abc
15
15
  import re
16
16
  from abc import ABC, abstractmethod
17
- from typing import Any, Dict, List, Optional, Union
17
+ from typing import Any, Dict, List, Optional, Type, Union
18
18
 
19
- from openai import Stream
19
+ from openai import AsyncStream, Stream
20
+ from pydantic import BaseModel
20
21
 
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.types import (
@@ -102,7 +103,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
102
103
  self, messages: List[OpenAIMessage]
103
104
  ) -> List[OpenAIMessage]:
104
105
  r"""Preprocess messages before sending to model API.
105
- Removes thinking content and other model-specific preprocessing.
106
+ Removes thinking content from assistant and user messages.
106
107
 
107
108
  Args:
108
109
  messages (List[OpenAIMessage]): Original messages
@@ -110,39 +111,113 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
110
111
  Returns:
111
112
  List[OpenAIMessage]: Preprocessed messages
112
113
  """
113
- # Remove thinking content from messages before sending to API
114
- # This ensures only the final response is sent, excluding
115
- # intermediate thought processes
116
- return [
117
- { # type: ignore[misc]
118
- **msg,
119
- 'content': re.sub(
114
+
115
+ def should_process_thinking(msg: OpenAIMessage) -> bool:
116
+ # Only process thinking content for assistant and user messages
117
+ return msg['role'] in ['assistant', 'user'] and isinstance(
118
+ msg['content'], str
119
+ )
120
+
121
+ def remove_thinking(content: str) -> str:
122
+ # Only remove thinking content if the tags are present
123
+ if '<think>' in content and '</think>' in content:
124
+ return re.sub(
120
125
  r'<think>.*?</think>',
121
126
  '',
122
- msg['content'], # type: ignore[arg-type]
127
+ content,
123
128
  flags=re.DOTALL,
124
- ).strip(),
129
+ ).strip()
130
+ return content
131
+
132
+ return [
133
+ { # type: ignore[misc]
134
+ **msg,
135
+ 'content': (
136
+ remove_thinking(msg['content']) # type: ignore[arg-type]
137
+ if should_process_thinking(msg)
138
+ else msg['content']
139
+ ),
125
140
  }
126
141
  for msg in messages
127
142
  ]
128
143
 
129
144
  @abstractmethod
145
+ def _run(
146
+ self,
147
+ messages: List[OpenAIMessage],
148
+ response_format: Optional[Type[BaseModel]] = None,
149
+ tools: Optional[List[Dict[str, Any]]] = None,
150
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
151
+ pass
152
+
153
+ @abstractmethod
154
+ async def _arun(
155
+ self,
156
+ messages: List[OpenAIMessage],
157
+ response_format: Optional[Type[BaseModel]] = None,
158
+ tools: Optional[List[Dict[str, Any]]] = None,
159
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
160
+ pass
161
+
130
162
  def run(
131
163
  self,
132
164
  messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
133
167
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
134
168
  r"""Runs the query to the backend model.
135
169
 
136
170
  Args:
137
171
  messages (List[OpenAIMessage]): Message list with the chat history
138
172
  in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The response format
174
+ to use for the model. (default: :obj:`None`)
175
+ tools (Optional[List[Tool]]): The schema of tools to use for the
176
+ model for this request. Will override the tools specified in
177
+ the model configuration (but not change the configuration).
178
+ (default: :obj:`None`)
139
179
 
140
180
  Returns:
141
181
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
142
182
  `ChatCompletion` in the non-stream mode, or
143
183
  `Stream[ChatCompletionChunk]` in the stream mode.
144
184
  """
145
- pass
185
+ # None -> use default tools
186
+ if tools is None:
187
+ tools = self.model_config_dict.get("tools", None)
188
+ # Empty -> use no tools
189
+ elif not tools:
190
+ tools = None
191
+ return self._run(messages, response_format, tools)
192
+
193
+ async def arun(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
199
+ r"""Runs the query to the backend model asynchronously.
200
+
201
+ Args:
202
+ messages (List[OpenAIMessage]): Message list with the chat history
203
+ in OpenAI API format.
204
+ response_format (Optional[Type[BaseModel]]): The response format
205
+ to use for the model. (default: :obj:`None`)
206
+ tools (Optional[List[Tool]]): The schema of tools to use for the
207
+ model for this request. Will override the tools specified in
208
+ the model configuration (but not change the configuration).
209
+ (default: :obj:`None`)
210
+
211
+ Returns:
212
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
213
+ `ChatCompletion` in the non-stream mode, or
214
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
215
+ """
216
+ if tools is None:
217
+ tools = self.model_config_dict.get("tools", None)
218
+ elif not tools:
219
+ tools = None
220
+ return await self._arun(messages, response_format, tools)
146
221
 
147
222
  @abstractmethod
148
223
  def check_model_config(self):
@@ -16,7 +16,9 @@ import json
16
16
  import logging
17
17
  import os
18
18
  import uuid
19
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
19
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
20
+
21
+ from pydantic import BaseModel
20
22
 
21
23
  if TYPE_CHECKING:
22
24
  from cohere.types import ChatMessageV2, ChatResponse
@@ -24,6 +26,7 @@ if TYPE_CHECKING:
24
26
  from camel.configs import COHERE_API_PARAMS, CohereConfig
25
27
  from camel.messages import OpenAIMessage
26
28
  from camel.models import BaseModelBackend
29
+ from camel.models._utils import try_modify_message_with_format
27
30
  from camel.types import ChatCompletion, ModelType
28
31
  from camel.utils import (
29
32
  BaseTokenCounter,
@@ -67,6 +70,7 @@ class CohereModel(BaseModelBackend):
67
70
  model_type, model_config_dict, api_key, url, token_counter
68
71
  )
69
72
  self._client = cohere.ClientV2(api_key=self._api_key)
73
+ self._async_client = cohere.AsyncClientV2(api_key=self._api_key)
70
74
 
71
75
  def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
72
76
  if response.usage and response.usage.tokens:
@@ -215,7 +219,30 @@ class CohereModel(BaseModelBackend):
215
219
  )
216
220
  return self._token_counter
217
221
 
218
- def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
222
+ def _prepare_request(
223
+ self,
224
+ messages: List[OpenAIMessage],
225
+ response_format: Optional[Type[BaseModel]] = None,
226
+ tools: Optional[List[Dict[str, Any]]] = None,
227
+ ) -> Dict[str, Any]:
228
+ request_config = self.model_config_dict.copy()
229
+ if tools:
230
+ for tool in tools:
231
+ function_dict = tool.get('function', {})
232
+ function_dict.pop("strict", None)
233
+ request_config["tools"] = tools
234
+ elif response_format:
235
+ try_modify_message_with_format(messages[-1], response_format)
236
+ request_config["response_format"] = {"type": "json_object"}
237
+
238
+ return request_config
239
+
240
+ def _run(
241
+ self,
242
+ messages: List[OpenAIMessage],
243
+ response_format: Optional[Type[BaseModel]] = None,
244
+ tools: Optional[List[Dict[str, Any]]] = None,
245
+ ) -> ChatCompletion:
219
246
  r"""Runs inference of Cohere chat completion.
220
247
 
221
248
  Args:
@@ -226,21 +253,71 @@ class CohereModel(BaseModelBackend):
226
253
  """
227
254
  from cohere.core.api_error import ApiError
228
255
 
229
- cohere_messages = self._to_cohere_chatmessage(messages)
256
+ request_config = self._prepare_request(
257
+ messages, response_format, tools
258
+ )
230
259
 
231
- # Removing 'strict': True from the dictionary for
232
- # cohere client
233
- if self.model_config_dict.get('tools') is not None:
234
- for tool in self.model_config_dict.get('tools', []):
235
- function_dict = tool.get('function', {})
236
- if 'strict' in function_dict:
237
- del function_dict['strict']
260
+ cohere_messages = self._to_cohere_chatmessage(messages)
238
261
 
239
262
  try:
240
263
  response = self._client.chat(
241
264
  messages=cohere_messages,
242
265
  model=self.model_type,
243
- **self.model_config_dict,
266
+ **request_config,
267
+ )
268
+ except ApiError as e:
269
+ logging.error(f"Cohere API Error: {e.status_code}")
270
+ logging.error(f"Error body: {e.body}")
271
+ raise
272
+ except Exception as e:
273
+ logging.error(f"Unexpected error when calling Cohere API: {e!s}")
274
+ raise
275
+
276
+ openai_response = self._to_openai_response(response)
277
+
278
+ # Add AgentOps LLM Event tracking
279
+ if LLMEvent:
280
+ llm_event = LLMEvent(
281
+ thread_id=openai_response.id,
282
+ prompt=" ".join(
283
+ [message.get("content") for message in messages] # type: ignore[misc]
284
+ ),
285
+ prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
286
+ completion=openai_response.choices[0].message.content,
287
+ completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
288
+ model=self.model_type,
289
+ )
290
+ record(llm_event)
291
+
292
+ return openai_response
293
+
294
+ async def _arun(
295
+ self,
296
+ messages: List[OpenAIMessage],
297
+ response_format: Optional[Type[BaseModel]] = None,
298
+ tools: Optional[List[Dict[str, Any]]] = None,
299
+ ) -> ChatCompletion:
300
+ r"""Runs inference of Cohere chat completion.
301
+
302
+ Args:
303
+ messages (List[OpenAIMessage]): Message list with the chat history
304
+ in OpenAI API format.
305
+ Returns:
306
+ ChatCompletion.
307
+ """
308
+ from cohere.core.api_error import ApiError
309
+
310
+ request_config = self._prepare_request(
311
+ messages, response_format, tools
312
+ )
313
+
314
+ cohere_messages = self._to_cohere_chatmessage(messages)
315
+
316
+ try:
317
+ response = await self._async_client.chat(
318
+ messages=cohere_messages,
319
+ model=self.model_type,
320
+ **request_config,
244
321
  )
245
322
  except ApiError as e:
246
323
  logging.error(f"Cohere API Error: {e.status_code}")