camel-ai 0.2.17__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.17'
17
+ __version__ = '0.2.18'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -579,13 +579,18 @@ class ChatAgent(BaseAgent):
579
579
  )
580
580
 
581
581
  self.original_model_dict = self.model_backend.model_config_dict
582
- if response_format and self.model_type in {"gpt-4o", "gpt-4o-mini"}:
582
+ model_response_format_modified = False
583
+ if (
584
+ response_format
585
+ and self.model_type.support_native_structured_output
586
+ ):
583
587
  self.model_backend.model_config_dict = (
584
588
  self.original_model_dict.copy()
585
589
  )
586
590
  self.model_backend.model_config_dict["response_format"] = (
587
591
  response_format
588
592
  )
593
+ model_response_format_modified = True
589
594
 
590
595
  # Convert input message to BaseMessage if necessary
591
596
  if isinstance(input_message, str):
@@ -604,7 +609,12 @@ class ChatAgent(BaseAgent):
604
609
  # Add user input to memory
605
610
  self.update_memory(input_message, OpenAIBackendRole.USER)
606
611
 
607
- return self._handle_step(response_format, self.single_iteration)
612
+ try:
613
+ return self._handle_step(response_format, self.single_iteration)
614
+ finally:
615
+ if model_response_format_modified:
616
+ # Reset model config back to original state
617
+ self.model_backend.model_config_dict = self.original_model_dict
608
618
 
609
619
  def _inject_tool_prompt(self) -> None:
610
620
  r"""Generate and add the tool prompt to memory."""
@@ -30,7 +30,7 @@ class DeepSeekConfig(BaseConfig):
30
30
  temperature (float, optional): Sampling temperature to use, between
31
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
32
32
  while lower values make it more focused and deterministic.
33
- (default: :obj:`0.2`)
33
+ (default: :obj:`1.0`)
34
34
  top_p (float, optional): Controls the diversity and focus of the
35
35
  generated results. Higher values make the output more diverse,
36
36
  while lower values make it more focused. (default: :obj:`1.0`)
@@ -86,7 +86,7 @@ class DeepSeekConfig(BaseConfig):
86
86
  :obj:`True`)
87
87
  """
88
88
 
89
- temperature: float = 0.2 # deepseek default: 1.0
89
+ temperature: float = 1.0 # deepseek default: 1.0
90
90
  top_p: float = 1.0
91
91
  stream: bool = False
92
92
  stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
@@ -27,7 +27,7 @@ class InternLMConfig(BaseConfig):
27
27
  (default: :obj:`False`)
28
28
  temperature (float, optional): Controls the diversity and focus of
29
29
  the generated results. Lower values make the output more focused,
30
- while higher values make it more diverse. (default: :obj:`0.3`)
30
+ while higher values make it more diverse. (default: :obj:`0.8`)
31
31
  top_p (float, optional): Controls the diversity and focus of the
32
32
  generated results. Higher values make the output more diverse,
33
33
  while lower values make it more focused. (default: :obj:`0.9`)
@@ -171,6 +171,7 @@ class SelfInstructPipeline:
171
171
  )
172
172
 
173
173
  response = self.agent.step(prompt)
174
+ self.agent.reset()
174
175
  generated_tasks = [
175
176
  line.strip()
176
177
  for line in response.msgs[0].content.split("\n")
@@ -197,6 +198,7 @@ class SelfInstructPipeline:
197
198
  "{\n \"answer\": false\n}\n"
198
199
  )
199
200
  response = self.agent.step(clf_prompt)
201
+ self.agent.reset()
200
202
  try:
201
203
  structured_response = AgentResponse.parse_raw(
202
204
  response.msgs[0].content.strip()
@@ -241,6 +243,7 @@ class SelfInstructPipeline:
241
243
  )
242
244
 
243
245
  response = self.agent.step(prompt)
246
+ self.agent.reset()
244
247
  generated_text = response.msgs[0].content.strip()
245
248
 
246
249
  if classification:
@@ -73,7 +73,8 @@ class InternalPythonInterpreter(BaseInterpreter):
73
73
  module and its submodule or function name are separated by a period
74
74
  (:obj:`.`). (default: :obj:`None`)
75
75
  unsafe_mode (bool, optional): If `True`, the interpreter runs the code
76
- by `eval()` without any security check. (default: :obj:`False`)
76
+ by `eval()` or `exec()` without any security check.
77
+ (default: :obj:`False`)
77
78
  raise_error (bool, optional): Raise error if the interpreter fails.
78
79
  (default: :obj:`False`)
79
80
  """
@@ -102,9 +103,9 @@ class InternalPythonInterpreter(BaseInterpreter):
102
103
  type is supported, and then executes the code. If `unsafe_mode` is
103
104
  set to `False`, the code is executed in a controlled environment using
104
105
  the `execute` method. If `unsafe_mode` is `True`, the code is executed
105
- using `eval()` with the action space as the global context. An
106
- `InterpreterError` is raised if the code type is unsupported or if any
107
- runtime error occurs during execution.
106
+ using `eval()` or `exec()` with the action space as the global context.
107
+ An `InterpreterError` is raised if the code type is unsupported or if
108
+ any runtime error occurs during execution.
108
109
 
109
110
  Args:
110
111
  code (str): The python code to be executed.
@@ -125,10 +126,26 @@ class InternalPythonInterpreter(BaseInterpreter):
125
126
  f"`{self.__class__.__name__}` only supports "
126
127
  f"{', '.join(self._CODE_TYPES)}."
127
128
  )
128
- if not self.unsafe_mode:
129
- return str(self.execute(code))
129
+ if self.unsafe_mode:
130
+ import contextlib
131
+ import io
132
+
133
+ # Try to execute first and capture stdout
134
+ output_buffer = io.StringIO()
135
+ with contextlib.redirect_stdout(output_buffer):
136
+ exec(code, self.action_space)
137
+ result = output_buffer.getvalue()
138
+
139
+ # If no output was captured, try to evaluate the code
140
+ if not result:
141
+ try:
142
+ result = str(eval(code, self.action_space))
143
+ except (SyntaxError, NameError):
144
+ result = "" # If eval fails, return empty string
145
+
146
+ return result
130
147
  else:
131
- return str(eval(code, self.action_space))
148
+ return str(self.execute(code))
132
149
 
133
150
  def update_action_space(self, action_space: Dict[str, Any]) -> None:
134
151
  r"""Updates action space for *python* interpreter."""
@@ -69,11 +69,6 @@ class ShareGPTConversation(RootModel):
69
69
  for i in range(1, len(messages)):
70
70
  curr, prev = messages[i], messages[i - 1]
71
71
 
72
- print("@@@@")
73
- print(curr)
74
- print(prev)
75
- print("@@@@")
76
-
77
72
  if curr.from_ == "tool":
78
73
  if prev.from_ != "gpt" or "<tool_call>" not in prev.value:
79
74
  raise ValueError(
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
+ import warnings
16
17
  from typing import Any, Dict, List, Optional, Union
17
18
 
18
19
  from openai import OpenAI, Stream
@@ -110,11 +111,77 @@ class DeepSeekModel(BaseModelBackend):
110
111
  `ChatCompletion` in the non-stream mode, or
111
112
  `Stream[ChatCompletionChunk]` in the stream mode.
112
113
  """
114
+ # deepseek reasoner has limitations
115
+ # reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
116
+ if self.model_type in [
117
+ ModelType.DEEPSEEK_REASONER,
118
+ ]:
119
+ warnings.warn(
120
+ "Warning: You are using an DeepSeek Reasoner model, "
121
+ "which has certain limitations, reference: "
122
+ "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`.",
123
+ UserWarning,
124
+ )
125
+
126
+ # Check and remove unsupported parameters and reset the fixed
127
+ # parameters
128
+ unsupported_keys = [
129
+ "temperature",
130
+ "top_p",
131
+ "presence_penalty",
132
+ "frequency_penalty",
133
+ "logprobs",
134
+ "top_logprobs",
135
+ "tools",
136
+ ]
137
+ for key in unsupported_keys:
138
+ if key in self.model_config_dict:
139
+ del self.model_config_dict[key]
140
+
113
141
  response = self._client.chat.completions.create(
114
142
  messages=messages,
115
143
  model=self.model_type,
116
144
  **self.model_config_dict,
117
145
  )
146
+
147
+ # Temporary solution to handle the case where
148
+ # deepseek returns a reasoning_content
149
+ if (
150
+ self.model_type
151
+ in [
152
+ ModelType.DEEPSEEK_REASONER,
153
+ ]
154
+ and os.environ.get("GET_REASONING_CONTENT", "false").lower()
155
+ == "true"
156
+ ):
157
+ reasoning_content = response.choices[0].message.reasoning_content
158
+ combined_content = (
159
+ response.choices[0].message.content
160
+ + "\n\nBELOW IS THE REASONING CONTENT:\n\n"
161
+ + (reasoning_content if reasoning_content else "")
162
+ )
163
+
164
+ response = ChatCompletion.construct(
165
+ id=response.id,
166
+ choices=[
167
+ dict(
168
+ index=response.choices[0].index,
169
+ message={
170
+ "role": response.choices[0].message.role,
171
+ "content": combined_content,
172
+ "tool_calls": None,
173
+ },
174
+ finish_reason=response.choices[0].finish_reason
175
+ if response.choices[0].finish_reason
176
+ else None,
177
+ )
178
+ ],
179
+ created=response.created,
180
+ model=response.model,
181
+ object="chat.completion",
182
+ usage=response.usage,
183
+ )
184
+
118
185
  return response
119
186
 
120
187
  def check_model_config(self):
camel/types/enums.py CHANGED
@@ -54,6 +54,19 @@ class ModelType(UnifiedModelType, Enum):
54
54
  GROQ_GEMMA_7B_IT = "gemma-7b-it"
55
55
  GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
56
56
 
57
+ # TogetherAI platform models support tool calling
58
+ TOGETHER_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
59
+ TOGETHER_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
60
+ TOGETHER_LLAMA_3_1_405B = "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"
61
+ TOGETHER_LLAMA_3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
62
+ TOGETHER_MIXTRAL_8_7B = "mistralai/Mixtral-8x7B-Instruct-v0.1"
63
+ TOGETHER_MISTRAL_7B = "mistralai/Mistral-7B-Instruct-v0.1"
64
+
65
+ # SambaNova Cloud platform models support tool calling
66
+ SAMBA_LLAMA_3_1_8B = "Meta-Llama-3.1-8B-Instruct"
67
+ SAMBA_LLAMA_3_1_70B = "Meta-Llama-3.1-70B-Instruct"
68
+ SAMBA_LLAMA_3_1_405B = "Meta-Llama-3.1-405B-Instruct"
69
+
57
70
  STUB = "stub"
58
71
 
59
72
  # Legacy anthropic models
@@ -141,7 +154,7 @@ class ModelType(UnifiedModelType, Enum):
141
154
 
142
155
  # DeepSeek models
143
156
  DEEPSEEK_CHAT = "deepseek-chat"
144
-
157
+ DEEPSEEK_REASONER = "deepseek-reasoner"
145
158
  # InternLM models
146
159
  INTERNLM3_LATEST = "internlm3-latest"
147
160
  INTERNLM3_8B_INSTRUCT = "internlm3-8b-instruct"
@@ -175,6 +188,8 @@ class ModelType(UnifiedModelType, Enum):
175
188
  self.is_deepseek,
176
189
  self.is_cohere,
177
190
  self.is_internlm,
191
+ self.is_together,
192
+ self.is_sambanova,
178
193
  ]
179
194
  )
180
195
 
@@ -248,6 +263,27 @@ class ModelType(UnifiedModelType, Enum):
248
263
  ModelType.GROQ_GEMMA_2_9B_IT,
249
264
  }
250
265
 
266
+ @property
267
+ def is_together(self) -> bool:
268
+ r"""Returns whether this type of models is served by Together AI."""
269
+ return self in {
270
+ ModelType.TOGETHER_LLAMA_3_1_405B,
271
+ ModelType.TOGETHER_LLAMA_3_1_70B,
272
+ ModelType.TOGETHER_LLAMA_3_3_70B,
273
+ ModelType.TOGETHER_LLAMA_3_3_70B,
274
+ ModelType.TOGETHER_MISTRAL_7B,
275
+ ModelType.TOGETHER_MIXTRAL_8_7B,
276
+ }
277
+
278
+ @property
279
+ def is_sambanova(self) -> bool:
280
+ r"""Returns whether this type of models is served by SambaNova AI."""
281
+ return self in {
282
+ ModelType.SAMBA_LLAMA_3_1_8B,
283
+ ModelType.SAMBA_LLAMA_3_1_70B,
284
+ ModelType.SAMBA_LLAMA_3_1_405B,
285
+ }
286
+
251
287
  @property
252
288
  def is_mistral(self) -> bool:
253
289
  r"""Returns whether this type of models is served by Mistral."""
@@ -365,6 +401,7 @@ class ModelType(UnifiedModelType, Enum):
365
401
  def is_deepseek(self) -> bool:
366
402
  return self in {
367
403
  ModelType.DEEPSEEK_CHAT,
404
+ ModelType.DEEPSEEK_REASONER,
368
405
  }
369
406
 
370
407
  @property
@@ -409,6 +446,7 @@ class ModelType(UnifiedModelType, Enum):
409
446
  ModelType.GLM_4,
410
447
  ModelType.QWEN_VL_PLUS,
411
448
  ModelType.NVIDIA_LLAMA3_70B,
449
+ ModelType.TOGETHER_MISTRAL_7B,
412
450
  }:
413
451
  return 8_192
414
452
  elif self in {
@@ -419,6 +457,8 @@ class ModelType(UnifiedModelType, Enum):
419
457
  ModelType.YI_VISION,
420
458
  ModelType.YI_SPARK,
421
459
  ModelType.YI_LARGE_RAG,
460
+ ModelType.SAMBA_LLAMA_3_1_8B,
461
+ ModelType.SAMBA_LLAMA_3_1_405B,
422
462
  }:
423
463
  return 16_384
424
464
  elif self in {
@@ -438,11 +478,13 @@ class ModelType(UnifiedModelType, Enum):
438
478
  ModelType.INTERNLM3_LATEST,
439
479
  ModelType.INTERNLM2_5_LATEST,
440
480
  ModelType.INTERNLM2_PRO_CHAT,
481
+ ModelType.TOGETHER_MIXTRAL_8_7B,
441
482
  }:
442
483
  return 32_768
443
484
  elif self in {
444
485
  ModelType.MISTRAL_MIXTRAL_8x22B,
445
486
  ModelType.DEEPSEEK_CHAT,
487
+ ModelType.DEEPSEEK_REASONER,
446
488
  }:
447
489
  return 64_000
448
490
  elif self in {
@@ -475,6 +517,7 @@ class ModelType(UnifiedModelType, Enum):
475
517
  ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
476
518
  ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
477
519
  ModelType.GROQ_LLAMA_3_3_70B,
520
+ ModelType.SAMBA_LLAMA_3_1_70B,
478
521
  }:
479
522
  return 128_000
480
523
  elif self in {
@@ -484,6 +527,10 @@ class ModelType(UnifiedModelType, Enum):
484
527
  ModelType.QWEN_PLUS,
485
528
  ModelType.QWEN_TURBO,
486
529
  ModelType.QWEN_CODER_TURBO,
530
+ ModelType.TOGETHER_LLAMA_3_1_8B,
531
+ ModelType.TOGETHER_LLAMA_3_1_70B,
532
+ ModelType.TOGETHER_LLAMA_3_1_405B,
533
+ ModelType.TOGETHER_LLAMA_3_3_70B,
487
534
  }:
488
535
  return 131_072
489
536
  elif self in {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: camel-ai
3
- Version: 0.2.17
3
+ Version: 0.2.18
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  License: Apache-2.0
6
6
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
@@ -1,7 +1,7 @@
1
- camel/__init__.py,sha256=GMXArXemoUFgm0mT_15RdGHxa2wiUJ4JTwFT5qN02Fk,912
1
+ camel/__init__.py,sha256=W2l5H1hfYCuOVb4ujOE8couL8uN8a5Y-Woc6TeNwhug,912
2
2
  camel/agents/__init__.py,sha256=LcS4m8s97-yADfznvcaAdUe9W0E9h3m6zrSc9H6m9so,1545
3
3
  camel/agents/base.py,sha256=c4bJYL3G3Z41SaFdMPMn8ZjLdFiFaVOFO6EQIfuCVR8,1124
4
- camel/agents/chat_agent.py,sha256=YmcG-zNhmsc-JvcAh2XNz6OTQVggBKIOYjqEGrMUjcA,56299
4
+ camel/agents/chat_agent.py,sha256=KJM7FbcuOYcTF-PT9IYKtONjjpa-ZhsPFlrlCKf8zW0,56657
5
5
  camel/agents/critic_agent.py,sha256=qFVlHlQo0CVgmPWfWYLT8_oP_KyzCLFsQw_nN_vu5Bs,7487
6
6
  camel/agents/deductive_reasoner_agent.py,sha256=6BZGaq1hR6hKJuQtOfoYQnk_AkZpw_Mr7mUy2MspQgs,13540
7
7
  camel/agents/embodied_agent.py,sha256=XBxBu5ZMmSJ4B2U3Z7SMwvLlgp6yNpaBe8HNQmY9CZA,7536
@@ -34,10 +34,10 @@ camel/configs/__init__.py,sha256=4Q9kpo_hs7wKrpMMTCMXNtjMXdWsQ1GcUj2wB4dEODk,290
34
34
  camel/configs/anthropic_config.py,sha256=WIIyPYx7z70jiJoCc1Rz_58jrXRirpyJMlr0FrIii2I,3435
35
35
  camel/configs/base_config.py,sha256=RrlOwwTUXeTjsDChZXUZIBK1uCojyavEbX21bGVLuog,3286
36
36
  camel/configs/cohere_config.py,sha256=joF4GHqoTIRuEDlyTmxW5Ud23psE0xP1VCcEvKychko,3997
37
- camel/configs/deepseek_config.py,sha256=ZH6VELkvZ0rjA64PTwcvINDo4PWtkiPMTY7VwNXxc0I,6685
37
+ camel/configs/deepseek_config.py,sha256=jME5rfx8MYwyykZkAXZG-QhpMe1IBOvF9W38s5t9Fq0,6685
38
38
  camel/configs/gemini_config.py,sha256=m4p9zijSBmIba41NbSa997NRz2HumiRcb5nTICAMPXM,5686
39
39
  camel/configs/groq_config.py,sha256=Xe82_EbEYfacNXQApIHZiXw-NscufZxnLau72YEy_iA,5744
40
- camel/configs/internlm_config.py,sha256=CyByp-I93Vry_WKypq8E-qxfbgQ-VuONJFrFyjtAZx0,2979
40
+ camel/configs/internlm_config.py,sha256=I1Hcyj5r3Sq7WUu0ypEUroqtOGbI2dXawUS6GVGhW6U,2979
41
41
  camel/configs/litellm_config.py,sha256=oa6b67M0UotlvN7NuXrSUXLrskdpm3RMcew0rBfSsBc,4686
42
42
  camel/configs/mistral_config.py,sha256=ul7AAeG3172PtodEEruAZky0OURwgp6YeNq8ma-4os4,3463
43
43
  camel/configs/nvidia_config.py,sha256=1Oc3tulHOqAfx1mkrEywrxKIV1SBNzPm0CNrWgj9HXo,3226
@@ -62,7 +62,7 @@ camel/datagen/self_instruct/filter/__init__.py,sha256=UiGBfDRYO-3Z3dhaxAFVh4F8PF
62
62
  camel/datagen/self_instruct/filter/filter_function.py,sha256=-voPwP83c_bkZrSAhwludBCtfsKDFG_jlDHcNUOLV7o,6691
63
63
  camel/datagen/self_instruct/filter/filter_registry.py,sha256=5M_aNIopBeBj7U4fUsrAQpXQ2cZT6o6GaIIo0briFw0,2125
64
64
  camel/datagen/self_instruct/filter/instruction_filter.py,sha256=la_7P5bVdrk2qffnYFI2Ie3cjCEEHBxe4HB8PZ5jMq0,3426
65
- camel/datagen/self_instruct/self_instruct.py,sha256=3HRAptA2ZqCLdTs-_adNTReAMirDjSlQsCXzfvxk4GM,13995
65
+ camel/datagen/self_instruct/self_instruct.py,sha256=W_0LSSnTBcqZD1dtdWIgXeTcgFEVqjLyTZojj6lYC-0,14076
66
66
  camel/datagen/self_instruct/templates.py,sha256=7YMOUcIig6vLjqSwkWCq8XeRCjWq0Mfyzptn7DOmeAo,19480
67
67
  camel/datahubs/__init__.py,sha256=1a8fRuzgirO2pHtPnuisZ76iF_AN9GxMFq9gwFKWE5I,906
68
68
  camel/datahubs/base.py,sha256=4QKWiJaeL5ReQpyTAbOtzHs-2CzAYbVyoMngYwdpZGU,4357
@@ -81,7 +81,7 @@ camel/interpreters/__init__.py,sha256=NOQUsg7gR84zO8nBXu4JGUatsxSDJqZS6otltjXfop
81
81
  camel/interpreters/base.py,sha256=F026f2ZnvHwikSMbk6APYNvB9qP4Ye5quSkTbFKV3O0,1898
82
82
  camel/interpreters/docker_interpreter.py,sha256=Uo5r2jcJGjC6rn5Yzx9qLzlXTsA5RH7AnFe7I0rxo10,8700
83
83
  camel/interpreters/e2b_interpreter.py,sha256=UC0en39x705cnnMCX4GxN7Tx0gCpu5yuWOFSBl_TagE,4815
84
- camel/interpreters/internal_python_interpreter.py,sha256=YYAXAmDWayrPQgeae7UVdD_k35DHxqUyFuHfDsApQjc,21860
84
+ camel/interpreters/internal_python_interpreter.py,sha256=9psFm8mkN5-5WdTW__VBjDoh_u-PCifJMQYeo0DEoZo,22464
85
85
  camel/interpreters/interpreter_error.py,sha256=uEhcmHmmcajt5C9PLeHs21h1fE6cmyt23tCAGie1kTA,880
86
86
  camel/interpreters/ipython_interpreter.py,sha256=-erOR6imuh5pUtpbUYky3zoLDr30Y5E7lm59BwwxzNs,5976
87
87
  camel/interpreters/subprocess_interpreter.py,sha256=HZBpYBI_W1WPZ6W0uEXYnlAzGC-7fJChGMXl1yoMTss,6909
@@ -107,7 +107,7 @@ camel/messages/__init__.py,sha256=Px-gTFp2Kcgbeb2sZQ_f4tqjoLHE-QEOiMHIMfPrvTw,19
107
107
  camel/messages/base.py,sha256=1jCeQn0Rs7vujjG2iqlBG449dGqM1INZVlVZGG5IY2E,19614
108
108
  camel/messages/conversion/__init__.py,sha256=8B4C-0wj-dm925YRKNyx31WYK25PWpME7Q9jPtx2jkY,1047
109
109
  camel/messages/conversion/alpaca.py,sha256=jBU2bMhzNjzptGuoasThYvFov_cYPCYt3pEfs0T7z7U,4163
110
- camel/messages/conversion/conversation_models.py,sha256=uWVga8CyHG4Q61ABMz8KtZngRvhXJeJ5cY2QZTSD9Wo,5401
110
+ camel/messages/conversion/conversation_models.py,sha256=f2ybtYdCbILq9IYgaHkQ57yYxDdCBSspKrfaArZvNw8,5300
111
111
  camel/messages/conversion/sharegpt/__init__.py,sha256=oWUuHV5w85kxqhz_hoElLmCfzLm-ccku-fM9SnUJ5zI,794
112
112
  camel/messages/conversion/sharegpt/function_call_formatter.py,sha256=cn7e7CfmxEVFlfOqhjhNuA8nuWvWD6hXYn-3okXNxxQ,1832
113
113
  camel/messages/conversion/sharegpt/hermes/__init__.py,sha256=mxuMSm-neaTgInIjYXuIVdC310E6jKJzM3IdtaJ4qY4,812
@@ -118,7 +118,7 @@ camel/models/anthropic_model.py,sha256=BOj4vEtYVWbgy3DmBBlFh6LPXHbi1-LCPWzIxFuw9
118
118
  camel/models/azure_openai_model.py,sha256=ptL4YK8KkAbOA6XDxIhcEqxPOVGrYmzXqBzdsZAyHss,6083
119
119
  camel/models/base_model.py,sha256=rxRZc31cKone4OGuvXi14FI_O9TC1aBvIy8WFSlVeSI,5727
120
120
  camel/models/cohere_model.py,sha256=4Sm-YvQnSquz8L4EN9qGn0qz6WTz4cm_BQtP7_NZOHQ,10731
121
- camel/models/deepseek_model.py,sha256=3RfF3-QXzN3mvtYwno6nyS20_-gqxdua_5k4ywDzN0A,5086
121
+ camel/models/deepseek_model.py,sha256=VQRsxYp4W1h7tD_WcBgz5rfJGg4s6fLX8-FO_ggQ7bw,7555
122
122
  camel/models/fish_audio_model.py,sha256=mid-wdV_hkxHay-vgnF3zshwdXLyO4eM31139_0BXzo,5586
123
123
  camel/models/gemini_model.py,sha256=mS3_91vlLifsIolDR3TYRFzpV67iAnFiIRAZ8F5O7Qc,5462
124
124
  camel/models/groq_model.py,sha256=dSD23iHOeQ7ppDp34h2waSbRWRL0OjpsisA9_oUEprc,5014
@@ -285,7 +285,7 @@ camel/toolkits/video_toolkit.py,sha256=n1P7F_cjdnC2jfUQQiJnhueRYA83GIjUF7HWIrES5
285
285
  camel/toolkits/weather_toolkit.py,sha256=qHAMD56zqd5GWnEWiaA_0aBDwvgacdx0pAHScinY4GY,6965
286
286
  camel/toolkits/whatsapp_toolkit.py,sha256=H_83AFCIoBMvZUcfUvfRTIAjfR2DR79xP2J-rfQKtNo,6326
287
287
  camel/types/__init__.py,sha256=_NYwmy412tubPYJon26fS9itGnylP48NLFKgwyMiJNs,2251
288
- camel/types/enums.py,sha256=DnregHhO2yv61k39qT0CRA7ZP8s4-GAGMdMaXzIC6dM,24695
288
+ camel/types/enums.py,sha256=HUXCDBUfIqpHofhjuWTyCrlm81KSjcG1ghTeu6cuZqY,26730
289
289
  camel/types/openai_types.py,sha256=7Vlci1uRbpSS81B958Z8ADnkzVyqxV7O5H8hv0i-tdo,2328
290
290
  camel/types/unified_model_type.py,sha256=FT-abBHFTRN6qMRSVa_Lqljoc7JmyyMs8PeJfQUOSag,4119
291
291
  camel/utils/__init__.py,sha256=0K8HKzUlOStBjo9Mt3tgFbv6YINMIiF0b7MSWRyZ-NA,2471
@@ -294,7 +294,7 @@ camel/utils/commons.py,sha256=Ph5O_vihyH85BfQ-A4Z2kc0uO45QXtwQr0qbfpbK6Rg,21934
294
294
  camel/utils/constants.py,sha256=MQD3bgLIq_NATp0D1iFkrwfkCwVX-PAOSXheTkkEdkY,1410
295
295
  camel/utils/response_format.py,sha256=9KrbwtOM9cA3LSjTgLiK7oKy-53_uMh1cvpyNwwJpng,2419
296
296
  camel/utils/token_counting.py,sha256=wLVgCFiLOWAzW2NtrZ-1t1VIsf2MT8hVAbc3pqAx4V4,15319
297
- camel_ai-0.2.17.dist-info/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
298
- camel_ai-0.2.17.dist-info/METADATA,sha256=8KO-En_8opfeGKUAok5Q0VRtPyqKBgcqrZuAswFfA4M,35136
299
- camel_ai-0.2.17.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
300
- camel_ai-0.2.17.dist-info/RECORD,,
297
+ camel_ai-0.2.18.dist-info/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
298
+ camel_ai-0.2.18.dist-info/METADATA,sha256=7ecOLcwfdyOmJJn0p5pqRC270A3KxndMP6yuZnhZGEc,35136
299
+ camel_ai-0.2.18.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
300
+ camel_ai-0.2.18.dist-info/RECORD,,