camel-ai 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (51) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +30 -6
  3. camel/agents/multi_hop_generator_agent.py +85 -0
  4. camel/agents/programmed_agent_instruction.py +148 -0
  5. camel/benchmarks/__init__.py +2 -0
  6. camel/benchmarks/apibank.py +5 -0
  7. camel/benchmarks/apibench.py +8 -4
  8. camel/benchmarks/gaia.py +2 -2
  9. camel/benchmarks/ragbench.py +333 -0
  10. camel/bots/__init__.py +1 -1
  11. camel/bots/discord/__init__.py +26 -0
  12. camel/bots/discord/discord_app.py +384 -0
  13. camel/bots/discord/discord_installation.py +64 -0
  14. camel/bots/discord/discord_store.py +160 -0
  15. camel/configs/__init__.py +3 -0
  16. camel/configs/anthropic_config.py +17 -15
  17. camel/configs/deepseek_config.py +2 -2
  18. camel/configs/internlm_config.py +60 -0
  19. camel/data_collector/base.py +5 -5
  20. camel/data_collector/sharegpt_collector.py +2 -2
  21. camel/datagen/self_instruct/self_instruct.py +4 -1
  22. camel/datagen/self_instruct/templates.py +12 -14
  23. camel/interpreters/internal_python_interpreter.py +24 -7
  24. camel/loaders/__init__.py +2 -0
  25. camel/loaders/panda_reader.py +337 -0
  26. camel/messages/__init__.py +10 -4
  27. camel/messages/func_message.py +30 -22
  28. camel/models/__init__.py +2 -0
  29. camel/models/anthropic_model.py +1 -22
  30. camel/models/cohere_model.py +8 -0
  31. camel/models/deepseek_model.py +67 -0
  32. camel/models/gemini_model.py +10 -1
  33. camel/models/internlm_model.py +143 -0
  34. camel/models/mistral_model.py +14 -7
  35. camel/models/model_factory.py +3 -0
  36. camel/models/reward/__init__.py +2 -0
  37. camel/models/reward/skywork_model.py +88 -0
  38. camel/synthetic_datagen/source2synth/data_processor.py +373 -0
  39. camel/synthetic_datagen/source2synth/models.py +68 -0
  40. camel/synthetic_datagen/source2synth/user_data_processor_config.py +73 -0
  41. camel/toolkits/google_scholar_toolkit.py +9 -0
  42. camel/types/__init__.py +4 -2
  43. camel/types/enums.py +81 -1
  44. camel/types/openai_types.py +6 -4
  45. camel/types/unified_model_type.py +5 -0
  46. camel/utils/token_counting.py +3 -3
  47. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/METADATA +158 -187
  48. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/RECORD +50 -37
  49. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/WHEEL +1 -1
  50. camel/bots/discord_app.py +0 -138
  51. {camel_ai-0.2.16.dist-info → camel_ai-0.2.18.dist-info}/LICENSE +0 -0
camel/types/enums.py CHANGED
@@ -54,6 +54,19 @@ class ModelType(UnifiedModelType, Enum):
54
54
  GROQ_GEMMA_7B_IT = "gemma-7b-it"
55
55
  GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
56
56
 
57
+ # TogetherAI platform models support tool calling
58
+ TOGETHER_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"
59
+ TOGETHER_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
60
+ TOGETHER_LLAMA_3_1_405B = "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"
61
+ TOGETHER_LLAMA_3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
62
+ TOGETHER_MIXTRAL_8_7B = "mistralai/Mixtral-8x7B-Instruct-v0.1"
63
+ TOGETHER_MISTRAL_7B = "mistralai/Mistral-7B-Instruct-v0.1"
64
+
65
+ # SambaNova Cloud platform models support tool calling
66
+ SAMBA_LLAMA_3_1_8B = "Meta-Llama-3.1-8B-Instruct"
67
+ SAMBA_LLAMA_3_1_70B = "Meta-Llama-3.1-70B-Instruct"
68
+ SAMBA_LLAMA_3_1_405B = "Meta-Llama-3.1-405B-Instruct"
69
+
57
70
  STUB = "stub"
58
71
 
59
72
  # Legacy anthropic models
@@ -141,6 +154,12 @@ class ModelType(UnifiedModelType, Enum):
141
154
 
142
155
  # DeepSeek models
143
156
  DEEPSEEK_CHAT = "deepseek-chat"
157
+ DEEPSEEK_REASONER = "deepseek-reasoner"
158
+ # InternLM models
159
+ INTERNLM3_LATEST = "internlm3-latest"
160
+ INTERNLM3_8B_INSTRUCT = "internlm3-8b-instruct"
161
+ INTERNLM2_5_LATEST = "internlm2.5-latest"
162
+ INTERNLM2_PRO_CHAT = "internlm2-pro-chat"
144
163
 
145
164
  def __str__(self):
146
165
  return self.value
@@ -161,7 +180,17 @@ class ModelType(UnifiedModelType, Enum):
161
180
  @property
162
181
  def support_native_tool_calling(self) -> bool:
163
182
  return any(
164
- [self.is_openai, self.is_gemini, self.is_mistral, self.is_qwen]
183
+ [
184
+ self.is_openai,
185
+ self.is_gemini,
186
+ self.is_mistral,
187
+ self.is_qwen,
188
+ self.is_deepseek,
189
+ self.is_cohere,
190
+ self.is_internlm,
191
+ self.is_together,
192
+ self.is_sambanova,
193
+ ]
165
194
  )
166
195
 
167
196
  @property
@@ -234,6 +263,27 @@ class ModelType(UnifiedModelType, Enum):
234
263
  ModelType.GROQ_GEMMA_2_9B_IT,
235
264
  }
236
265
 
266
+ @property
267
+ def is_together(self) -> bool:
268
+ r"""Returns whether this type of models is served by Together AI."""
269
+ return self in {
270
+ ModelType.TOGETHER_LLAMA_3_1_405B,
271
+ ModelType.TOGETHER_LLAMA_3_1_70B,
272
+ ModelType.TOGETHER_LLAMA_3_3_70B,
273
+ ModelType.TOGETHER_LLAMA_3_3_70B,
274
+ ModelType.TOGETHER_MISTRAL_7B,
275
+ ModelType.TOGETHER_MIXTRAL_8_7B,
276
+ }
277
+
278
+ @property
279
+ def is_sambanova(self) -> bool:
280
+ r"""Returns whether this type of models is served by SambaNova AI."""
281
+ return self in {
282
+ ModelType.SAMBA_LLAMA_3_1_8B,
283
+ ModelType.SAMBA_LLAMA_3_1_70B,
284
+ ModelType.SAMBA_LLAMA_3_1_405B,
285
+ }
286
+
237
287
  @property
238
288
  def is_mistral(self) -> bool:
239
289
  r"""Returns whether this type of models is served by Mistral."""
@@ -351,6 +401,16 @@ class ModelType(UnifiedModelType, Enum):
351
401
  def is_deepseek(self) -> bool:
352
402
  return self in {
353
403
  ModelType.DEEPSEEK_CHAT,
404
+ ModelType.DEEPSEEK_REASONER,
405
+ }
406
+
407
+ @property
408
+ def is_internlm(self) -> bool:
409
+ return self in {
410
+ ModelType.INTERNLM3_LATEST,
411
+ ModelType.INTERNLM3_8B_INSTRUCT,
412
+ ModelType.INTERNLM2_5_LATEST,
413
+ ModelType.INTERNLM2_PRO_CHAT,
354
414
  }
355
415
 
356
416
  @property
@@ -386,6 +446,7 @@ class ModelType(UnifiedModelType, Enum):
386
446
  ModelType.GLM_4,
387
447
  ModelType.QWEN_VL_PLUS,
388
448
  ModelType.NVIDIA_LLAMA3_70B,
449
+ ModelType.TOGETHER_MISTRAL_7B,
389
450
  }:
390
451
  return 8_192
391
452
  elif self in {
@@ -396,6 +457,8 @@ class ModelType(UnifiedModelType, Enum):
396
457
  ModelType.YI_VISION,
397
458
  ModelType.YI_SPARK,
398
459
  ModelType.YI_LARGE_RAG,
460
+ ModelType.SAMBA_LLAMA_3_1_8B,
461
+ ModelType.SAMBA_LLAMA_3_1_405B,
399
462
  }:
400
463
  return 16_384
401
464
  elif self in {
@@ -411,11 +474,17 @@ class ModelType(UnifiedModelType, Enum):
411
474
  ModelType.NVIDIA_MISTRAL_LARGE,
412
475
  ModelType.NVIDIA_MIXTRAL_8X7B,
413
476
  ModelType.QWEN_QWQ_32B,
477
+ ModelType.INTERNLM3_8B_INSTRUCT,
478
+ ModelType.INTERNLM3_LATEST,
479
+ ModelType.INTERNLM2_5_LATEST,
480
+ ModelType.INTERNLM2_PRO_CHAT,
481
+ ModelType.TOGETHER_MIXTRAL_8_7B,
414
482
  }:
415
483
  return 32_768
416
484
  elif self in {
417
485
  ModelType.MISTRAL_MIXTRAL_8x22B,
418
486
  ModelType.DEEPSEEK_CHAT,
487
+ ModelType.DEEPSEEK_REASONER,
419
488
  }:
420
489
  return 64_000
421
490
  elif self in {
@@ -448,6 +517,7 @@ class ModelType(UnifiedModelType, Enum):
448
517
  ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
449
518
  ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
450
519
  ModelType.GROQ_LLAMA_3_3_70B,
520
+ ModelType.SAMBA_LLAMA_3_1_70B,
451
521
  }:
452
522
  return 128_000
453
523
  elif self in {
@@ -457,6 +527,10 @@ class ModelType(UnifiedModelType, Enum):
457
527
  ModelType.QWEN_PLUS,
458
528
  ModelType.QWEN_TURBO,
459
529
  ModelType.QWEN_CODER_TURBO,
530
+ ModelType.TOGETHER_LLAMA_3_1_8B,
531
+ ModelType.TOGETHER_LLAMA_3_1_70B,
532
+ ModelType.TOGETHER_LLAMA_3_1_405B,
533
+ ModelType.TOGETHER_LLAMA_3_3_70B,
460
534
  }:
461
535
  return 131_072
462
536
  elif self in {
@@ -634,6 +708,7 @@ class ModelPlatformType(Enum):
634
708
  NVIDIA = "nvidia"
635
709
  DEEPSEEK = "deepseek"
636
710
  SGLANG = "sglang"
711
+ INTERNLM = "internlm"
637
712
 
638
713
  @property
639
714
  def is_openai(self) -> bool:
@@ -736,6 +811,11 @@ class ModelPlatformType(Enum):
736
811
  r"""Returns whether this platform is DeepSeek."""
737
812
  return self is ModelPlatformType.DEEPSEEK
738
813
 
814
+ @property
815
+ def is_internlm(self) -> bool:
816
+ r"""Returns whether this platform is InternLM."""
817
+ return self is ModelPlatformType.INTERNLM
818
+
739
819
 
740
820
  class AudioModelType(Enum):
741
821
  TTS_1 = "tts-1"
@@ -16,10 +16,10 @@ from openai.types.chat.chat_completion import ChatCompletion, Choice
16
16
  from openai.types.chat.chat_completion_assistant_message_param import (
17
17
  ChatCompletionAssistantMessageParam,
18
18
  )
19
- from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
20
- from openai.types.chat.chat_completion_function_message_param import (
21
- ChatCompletionFunctionMessageParam,
19
+ from openai.types.chat.chat_completion_tool_message_param import (
20
+ ChatCompletionToolMessageParam,
22
21
  )
22
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
23
23
  from openai.types.chat.chat_completion_message import ChatCompletionMessage
24
24
  from openai.types.chat.chat_completion_message_param import (
25
25
  ChatCompletionMessageParam,
@@ -33,6 +33,7 @@ from openai.types.chat.chat_completion_user_message_param import (
33
33
  from openai.types.completion_usage import CompletionUsage
34
34
  from openai.types.chat import ParsedChatCompletion
35
35
  from openai._types import NOT_GIVEN, NotGiven
36
+ from openai.types.chat import ChatCompletionMessageToolCall
36
37
 
37
38
  Choice = Choice
38
39
  ChatCompletion = ChatCompletion
@@ -42,7 +43,8 @@ ChatCompletionMessageParam = ChatCompletionMessageParam
42
43
  ChatCompletionSystemMessageParam = ChatCompletionSystemMessageParam
43
44
  ChatCompletionUserMessageParam = ChatCompletionUserMessageParam
44
45
  ChatCompletionAssistantMessageParam = ChatCompletionAssistantMessageParam
45
- ChatCompletionFunctionMessageParam = ChatCompletionFunctionMessageParam
46
+ ChatCompletionToolMessageParam = ChatCompletionToolMessageParam
47
+ ChatCompletionMessageToolCall = ChatCompletionMessageToolCall
46
48
  CompletionUsage = CompletionUsage
47
49
  NOT_GIVEN = NOT_GIVEN
48
50
  NotGiven = NotGiven
@@ -113,6 +113,11 @@ class UnifiedModelType(str):
113
113
  r"""Returns whether the model is a Qwen model."""
114
114
  return True
115
115
 
116
+ @property
117
+ def is_internlm(self) -> bool:
118
+ r"""Returns whether the model is a InternLM model."""
119
+ return True
120
+
116
121
  @property
117
122
  def support_native_structured_output(self) -> bool:
118
123
  r"""Returns whether the model supports native structured output."""
@@ -253,11 +253,11 @@ class AnthropicTokenCounter(BaseTokenCounter):
253
253
  Returns:
254
254
  int: Number of tokens in the messages.
255
255
  """
256
- from anthropic.types.beta import BetaMessageParam
256
+ from anthropic.types import MessageParam
257
257
 
258
- return self.client.beta.messages.count_tokens(
258
+ return self.client.messages.count_tokens(
259
259
  messages=[
260
- BetaMessageParam(
260
+ MessageParam(
261
261
  content=str(msg["content"]),
262
262
  role="user" if msg["role"] == "user" else "assistant",
263
263
  )