camel-ai 0.1.5.6__py3-none-any.whl → 0.1.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (97) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +3 -3
  3. camel/agents/critic_agent.py +1 -1
  4. camel/agents/deductive_reasoner_agent.py +4 -4
  5. camel/agents/embodied_agent.py +1 -1
  6. camel/agents/knowledge_graph_agent.py +2 -2
  7. camel/agents/role_assignment_agent.py +1 -1
  8. camel/agents/search_agent.py +4 -5
  9. camel/agents/task_agent.py +5 -5
  10. camel/configs/__init__.py +9 -0
  11. camel/configs/gemini_config.py +15 -14
  12. camel/configs/groq_config.py +119 -0
  13. camel/configs/litellm_config.py +1 -1
  14. camel/configs/mistral_config.py +81 -0
  15. camel/configs/ollama_config.py +1 -1
  16. camel/configs/openai_config.py +1 -1
  17. camel/configs/vllm_config.py +103 -0
  18. camel/configs/zhipuai_config.py +1 -1
  19. camel/embeddings/__init__.py +2 -0
  20. camel/embeddings/mistral_embedding.py +89 -0
  21. camel/interpreters/__init__.py +2 -0
  22. camel/interpreters/ipython_interpreter.py +167 -0
  23. camel/models/__init__.py +8 -0
  24. camel/models/anthropic_model.py +7 -2
  25. camel/models/azure_openai_model.py +152 -0
  26. camel/models/base_model.py +9 -2
  27. camel/models/gemini_model.py +14 -2
  28. camel/models/groq_model.py +131 -0
  29. camel/models/litellm_model.py +26 -4
  30. camel/models/mistral_model.py +169 -0
  31. camel/models/model_factory.py +30 -3
  32. camel/models/ollama_model.py +21 -2
  33. camel/models/open_source_model.py +11 -3
  34. camel/models/openai_model.py +7 -2
  35. camel/models/stub_model.py +4 -4
  36. camel/models/vllm_model.py +138 -0
  37. camel/models/zhipuai_model.py +7 -4
  38. camel/prompts/__init__.py +2 -2
  39. camel/prompts/task_prompt_template.py +4 -4
  40. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  41. camel/retrievers/auto_retriever.py +2 -0
  42. camel/storages/graph_storages/neo4j_graph.py +5 -0
  43. camel/toolkits/__init__.py +36 -0
  44. camel/toolkits/base.py +1 -1
  45. camel/toolkits/code_execution.py +1 -1
  46. camel/toolkits/github_toolkit.py +3 -2
  47. camel/toolkits/google_maps_toolkit.py +367 -0
  48. camel/toolkits/math_toolkit.py +79 -0
  49. camel/toolkits/open_api_toolkit.py +548 -0
  50. camel/toolkits/retrieval_toolkit.py +76 -0
  51. camel/toolkits/search_toolkit.py +326 -0
  52. camel/toolkits/slack_toolkit.py +308 -0
  53. camel/toolkits/twitter_toolkit.py +522 -0
  54. camel/toolkits/weather_toolkit.py +173 -0
  55. camel/types/enums.py +149 -34
  56. camel/utils/__init__.py +2 -0
  57. camel/utils/async_func.py +1 -1
  58. camel/utils/token_counting.py +148 -40
  59. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/METADATA +42 -3
  60. camel_ai-0.1.5.9.dist-info/RECORD +165 -0
  61. camel/functions/__init__.py +0 -51
  62. camel/functions/google_maps_function.py +0 -335
  63. camel/functions/math_functions.py +0 -61
  64. camel/functions/open_api_function.py +0 -508
  65. camel/functions/retrieval_functions.py +0 -61
  66. camel/functions/search_functions.py +0 -298
  67. camel/functions/slack_functions.py +0 -286
  68. camel/functions/twitter_function.py +0 -479
  69. camel/functions/weather_functions.py +0 -144
  70. camel_ai-0.1.5.6.dist-info/RECORD +0 -157
  71. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  72. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  73. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  74. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  75. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  76. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  77. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  78. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  79. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  80. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  81. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  82. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  83. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  84. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  85. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  86. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  87. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  88. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  89. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  90. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  91. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  92. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  93. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  94. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  95. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  96. /camel/{functions → toolkits}/openai_function.py +0 -0
  97. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/WHEEL +0 -0
camel/types/enums.py CHANGED
@@ -29,11 +29,22 @@ class ModelType(Enum):
29
29
  GPT_4_32K = "gpt-4-32k"
30
30
  GPT_4_TURBO = "gpt-4-turbo"
31
31
  GPT_4O = "gpt-4o"
32
+ GPT_4O_MINI = "gpt-4o-mini"
33
+
32
34
  GLM_4 = "glm-4"
33
35
  GLM_4_OPEN_SOURCE = "glm-4-open-source"
34
36
  GLM_4V = 'glm-4v'
35
37
  GLM_3_TURBO = "glm-3-turbo"
36
38
 
39
+ GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
40
+ GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
41
+ GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
42
+ GROQ_LLAMA_3_8B = "llama3-8b-8192"
43
+ GROQ_LLAMA_3_70B = "llama3-70b-8192"
44
+ GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
45
+ GROQ_GEMMA_7B_IT = "gemma-7b-it"
46
+ GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
47
+
37
48
  STUB = "stub"
38
49
 
39
50
  LLAMA_2 = "llama-2"
@@ -44,7 +55,7 @@ class ModelType(Enum):
44
55
  QWEN_2 = "qwen-2"
45
56
 
46
57
  # Legacy anthropic models
47
- # NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
58
+ # NOTE: anthropic legacy models only Claude 2.1 has system prompt support
48
59
  CLAUDE_2_1 = "claude-2.1"
49
60
  CLAUDE_2_0 = "claude-2.0"
50
61
  CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
@@ -62,6 +73,15 @@ class ModelType(Enum):
62
73
  GEMINI_1_5_FLASH = "gemini-1.5-flash"
63
74
  GEMINI_1_5_PRO = "gemini-1.5-pro"
64
75
 
76
+ # Mistral AI Model
77
+ MISTRAL_LARGE = "mistral-large-latest"
78
+ MISTRAL_NEMO = "open-mistral-nemo"
79
+ MISTRAL_CODESTRAL = "codestral-latest"
80
+ MISTRAL_7B = "open-mistral-7b"
81
+ MISTRAL_MIXTRAL_8x7B = "open-mixtral-8x7b"
82
+ MISTRAL_MIXTRAL_8x22B = "open-mixtral-8x22b"
83
+ MISTRAL_CODESTRAL_MAMBA = "open-codestral-mamba"
84
+
65
85
  @property
66
86
  def value_for_tiktoken(self) -> str:
67
87
  return (
@@ -73,6 +93,20 @@ class ModelType(Enum):
73
93
  @property
74
94
  def is_openai(self) -> bool:
75
95
  r"""Returns whether this type of models is an OpenAI-released model."""
96
+ return self in {
97
+ ModelType.GPT_3_5_TURBO,
98
+ ModelType.GPT_4,
99
+ ModelType.GPT_4_32K,
100
+ ModelType.GPT_4_TURBO,
101
+ ModelType.GPT_4O,
102
+ ModelType.GPT_4O_MINI,
103
+ }
104
+
105
+ @property
106
+ def is_azure_openai(self) -> bool:
107
+ r"""Returns whether this type of models is an OpenAI-released model
108
+ from Azure.
109
+ """
76
110
  return self in {
77
111
  ModelType.GPT_3_5_TURBO,
78
112
  ModelType.GPT_4,
@@ -119,6 +153,33 @@ class ModelType(Enum):
119
153
  ModelType.CLAUDE_3_5_SONNET,
120
154
  }
121
155
 
156
+ @property
157
+ def is_groq(self) -> bool:
158
+ r"""Returns whether this type of models is served by Groq."""
159
+ return self in {
160
+ ModelType.GROQ_LLAMA_3_1_8B,
161
+ ModelType.GROQ_LLAMA_3_1_70B,
162
+ ModelType.GROQ_LLAMA_3_1_405B,
163
+ ModelType.GROQ_LLAMA_3_8B,
164
+ ModelType.GROQ_LLAMA_3_70B,
165
+ ModelType.GROQ_MIXTRAL_8_7B,
166
+ ModelType.GROQ_GEMMA_7B_IT,
167
+ ModelType.GROQ_GEMMA_2_9B_IT,
168
+ }
169
+
170
+ @property
171
+ def is_mistral(self) -> bool:
172
+ r"""Returns whether this type of models is served by Mistral."""
173
+ return self in {
174
+ ModelType.MISTRAL_LARGE,
175
+ ModelType.MISTRAL_NEMO,
176
+ ModelType.MISTRAL_CODESTRAL,
177
+ ModelType.MISTRAL_7B,
178
+ ModelType.MISTRAL_MIXTRAL_8x7B,
179
+ ModelType.MISTRAL_MIXTRAL_8x22B,
180
+ ModelType.MISTRAL_CODESTRAL_MAMBA,
181
+ }
182
+
122
183
  @property
123
184
  def is_nvidia(self) -> bool:
124
185
  r"""Returns whether this type of models is Nvidia-released model.
@@ -137,46 +198,63 @@ class ModelType(Enum):
137
198
  @property
138
199
  def token_limit(self) -> int:
139
200
  r"""Returns the maximum token limit for a given model.
201
+
140
202
  Returns:
141
203
  int: The maximum token limit for the given model.
142
204
  """
143
- if self is ModelType.GPT_3_5_TURBO:
144
- return 16385
145
- elif self is ModelType.GPT_4:
146
- return 8192
147
- elif self is ModelType.GPT_4_32K:
148
- return 32768
149
- elif self is ModelType.GPT_4_TURBO:
150
- return 128000
151
- elif self is ModelType.GPT_4O:
152
- return 128000
153
- elif self == ModelType.GEMINI_1_5_FLASH:
154
- return 1048576
155
- elif self == ModelType.GEMINI_1_5_PRO:
156
- return 1048576
157
- elif self == ModelType.GLM_4_OPEN_SOURCE:
158
- return 8192
159
- elif self == ModelType.GLM_3_TURBO:
160
- return 8192
161
- elif self == ModelType.GLM_4V:
205
+ if self is ModelType.GLM_4V:
162
206
  return 1024
163
- elif self is ModelType.STUB:
164
- return 4096
165
- elif self is ModelType.LLAMA_2:
166
- return 4096
167
- elif self is ModelType.LLAMA_3:
168
- return 8192
169
- elif self is ModelType.QWEN_2:
170
- return 128000
171
- elif self is ModelType.GLM_4:
172
- return 8192
173
207
  elif self is ModelType.VICUNA:
174
208
  # reference: https://lmsys.org/blog/2023-03-30-vicuna/
175
209
  return 2048
210
+ elif self in {
211
+ ModelType.GPT_3_5_TURBO,
212
+ ModelType.LLAMA_2,
213
+ ModelType.NEMOTRON_4_REWARD,
214
+ ModelType.STUB,
215
+ }:
216
+ return 4_096
217
+ elif self in {
218
+ ModelType.GPT_4,
219
+ ModelType.GROQ_LLAMA_3_8B,
220
+ ModelType.GROQ_LLAMA_3_70B,
221
+ ModelType.GROQ_GEMMA_7B_IT,
222
+ ModelType.GROQ_GEMMA_2_9B_IT,
223
+ ModelType.LLAMA_3,
224
+ ModelType.GLM_3_TURBO,
225
+ ModelType.GLM_4,
226
+ ModelType.GLM_4_OPEN_SOURCE,
227
+ }:
228
+ return 8_192
176
229
  elif self is ModelType.VICUNA_16K:
177
- return 16384
230
+ return 16_384
231
+ elif self in {
232
+ ModelType.GPT_4_32K,
233
+ ModelType.MISTRAL_CODESTRAL,
234
+ ModelType.MISTRAL_7B,
235
+ ModelType.MISTRAL_MIXTRAL_8x7B,
236
+ ModelType.GROQ_MIXTRAL_8_7B,
237
+ }:
238
+ return 32_768
239
+ elif self in {ModelType.MISTRAL_MIXTRAL_8x22B}:
240
+ return 64_000
178
241
  elif self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
179
242
  return 100_000
243
+ elif self in {
244
+ ModelType.GPT_4O,
245
+ ModelType.GPT_4O_MINI,
246
+ ModelType.GPT_4_TURBO,
247
+ ModelType.MISTRAL_LARGE,
248
+ ModelType.MISTRAL_NEMO,
249
+ ModelType.QWEN_2,
250
+ }:
251
+ return 128_000
252
+ elif self in {
253
+ ModelType.GROQ_LLAMA_3_1_8B,
254
+ ModelType.GROQ_LLAMA_3_1_70B,
255
+ ModelType.GROQ_LLAMA_3_1_405B,
256
+ }:
257
+ return 131_072
180
258
  elif self in {
181
259
  ModelType.CLAUDE_2_1,
182
260
  ModelType.CLAUDE_3_OPUS,
@@ -185,8 +263,12 @@ class ModelType(Enum):
185
263
  ModelType.CLAUDE_3_5_SONNET,
186
264
  }:
187
265
  return 200_000
188
- elif self is ModelType.NEMOTRON_4_REWARD:
189
- return 4096
266
+ elif self in {
267
+ ModelType.MISTRAL_CODESTRAL_MAMBA,
268
+ }:
269
+ return 256_000
270
+ elif self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}:
271
+ return 1_048_576
190
272
  else:
191
273
  raise ValueError("Unknown model type")
192
274
 
@@ -195,8 +277,9 @@ class ModelType(Enum):
195
277
 
196
278
  Args:
197
279
  model_name (str): The name of the model, e.g. "vicuna-7b-v1.5".
280
+
198
281
  Returns:
199
- bool: Whether the model type mathches the model name.
282
+ bool: Whether the model type matches the model name.
200
283
  """
201
284
  if self is ModelType.VICUNA:
202
285
  pattern = r'^vicuna-\d+b-v\d+\.\d+$'
@@ -232,6 +315,8 @@ class EmbeddingModelType(Enum):
232
315
  TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
233
316
  TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
234
317
 
318
+ MISTRAL_EMBED = "mistral-embed"
319
+
235
320
  @property
236
321
  def is_openai(self) -> bool:
237
322
  r"""Returns whether this type of models is an OpenAI-released model."""
@@ -241,6 +326,15 @@ class EmbeddingModelType(Enum):
241
326
  EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
242
327
  }
243
328
 
329
+ @property
330
+ def is_mistral(self) -> bool:
331
+ r"""Returns whether this type of models is an Mistral-released
332
+ model.
333
+ """
334
+ return self in {
335
+ EmbeddingModelType.MISTRAL_EMBED,
336
+ }
337
+
244
338
  @property
245
339
  def output_dim(self) -> int:
246
340
  if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
@@ -249,6 +343,8 @@ class EmbeddingModelType(Enum):
249
343
  return 1536
250
344
  elif self is EmbeddingModelType.TEXT_EMBEDDING_3_LARGE:
251
345
  return 3072
346
+ elif self is EmbeddingModelType.MISTRAL_EMBED:
347
+ return 1024
252
348
  else:
253
349
  raise ValueError(f"Unknown model type {self}.")
254
350
 
@@ -285,6 +381,7 @@ class OpenAIBackendRole(Enum):
285
381
  SYSTEM = "system"
286
382
  USER = "user"
287
383
  FUNCTION = "function"
384
+ TOOL = "tool"
288
385
 
289
386
 
290
387
  class TerminationMode(Enum):
@@ -338,12 +435,15 @@ class ModelPlatformType(Enum):
338
435
  OPENAI = "openai"
339
436
  AZURE = "azure"
340
437
  ANTHROPIC = "anthropic"
438
+ GROQ = "groq"
341
439
  OPENSOURCE = "opensource"
342
440
  OLLAMA = "ollama"
343
441
  LITELLM = "litellm"
344
442
  ZHIPU = "zhipuai"
345
443
  DEFAULT = "default"
346
444
  GEMINI = "gemini"
445
+ VLLM = "vllm"
446
+ MISTRAL = "mistral"
347
447
 
348
448
  @property
349
449
  def is_openai(self) -> bool:
@@ -360,11 +460,21 @@ class ModelPlatformType(Enum):
360
460
  r"""Returns whether this platform is anthropic."""
361
461
  return self is ModelPlatformType.ANTHROPIC
362
462
 
463
+ @property
464
+ def is_groq(self) -> bool:
465
+ r"""Returns whether this platform is groq."""
466
+ return self is ModelPlatformType.GROQ
467
+
363
468
  @property
364
469
  def is_ollama(self) -> bool:
365
470
  r"""Returns whether this platform is ollama."""
366
471
  return self is ModelPlatformType.OLLAMA
367
472
 
473
+ @property
474
+ def is_vllm(self) -> bool:
475
+ r"""Returns whether this platform is vllm."""
476
+ return self is ModelPlatformType.VLLM
477
+
368
478
  @property
369
479
  def is_litellm(self) -> bool:
370
480
  r"""Returns whether this platform is litellm."""
@@ -375,6 +485,11 @@ class ModelPlatformType(Enum):
375
485
  r"""Returns whether this platform is zhipu."""
376
486
  return self is ModelPlatformType.ZHIPU
377
487
 
488
+ @property
489
+ def is_mistral(self) -> bool:
490
+ r"""Returns whether this platform is mistral."""
491
+ return self is ModelPlatformType.MISTRAL
492
+
378
493
  @property
379
494
  def is_open_source(self) -> bool:
380
495
  r"""Returns whether this platform is opensource."""
camel/utils/__init__.py CHANGED
@@ -34,6 +34,7 @@ from .token_counting import (
34
34
  BaseTokenCounter,
35
35
  GeminiTokenCounter,
36
36
  LiteLLMTokenCounter,
37
+ MistralTokenCounter,
37
38
  OpenAITokenCounter,
38
39
  OpenSourceTokenCounter,
39
40
  get_model_encoding,
@@ -62,4 +63,5 @@ __all__ = [
62
63
  'api_keys_required',
63
64
  'is_docker_running',
64
65
  'GeminiTokenCounter',
66
+ 'MistralTokenCounter',
65
67
  ]
camel/utils/async_func.py CHANGED
@@ -14,7 +14,7 @@
14
14
  import asyncio
15
15
  from copy import deepcopy
16
16
 
17
- from camel.functions.openai_function import OpenAIFunction
17
+ from camel.toolkits import OpenAIFunction
18
18
 
19
19
 
20
20
  def sync_funcs_to_async(funcs: list[OpenAIFunction]) -> list[OpenAIFunction]:
@@ -26,6 +26,8 @@ from PIL import Image
26
26
  from camel.types import ModelType, OpenAIImageType, OpenAIVisionDetailType
27
27
 
28
28
  if TYPE_CHECKING:
29
+ from mistral_common.protocol.instruct.request import ChatCompletionRequest
30
+
29
31
  from camel.messages import OpenAIMessage
30
32
 
31
33
  LOW_DETAIL_TOKENS = 85
@@ -37,7 +39,7 @@ EXTRA_TOKENS = 85
37
39
 
38
40
 
39
41
  def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
40
- r"""Parse the message list into a single prompt following model-specifc
42
+ r"""Parse the message list into a single prompt following model-specific
41
43
  formats.
42
44
 
43
45
  Args:
@@ -51,7 +53,12 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
51
53
  system_message = messages[0]["content"]
52
54
 
53
55
  ret: str
54
- if model == ModelType.LLAMA_2 or model == ModelType.LLAMA_3:
56
+ if model in [
57
+ ModelType.LLAMA_2,
58
+ ModelType.LLAMA_3,
59
+ ModelType.GROQ_LLAMA_3_8B,
60
+ ModelType.GROQ_LLAMA_3_70B,
61
+ ]:
55
62
  # reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
56
63
  seps = [" ", " </s><s>"]
57
64
  role_map = {"user": "[INST]", "assistant": "[/INST]"}
@@ -74,7 +81,7 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
74
81
  else:
75
82
  ret += role
76
83
  return ret
77
- elif model == ModelType.VICUNA or model == ModelType.VICUNA_16K:
84
+ elif model in [ModelType.VICUNA, ModelType.VICUNA_16K]:
78
85
  seps = [" ", "</s>"]
79
86
  role_map = {"user": "USER", "assistant": "ASSISTANT"}
80
87
 
@@ -132,6 +139,40 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
132
139
  else:
133
140
  ret += '<|im_start|>' + role + '\n'
134
141
  return ret
142
+ elif model == ModelType.GROQ_MIXTRAL_8_7B:
143
+ # Mistral/Mixtral format
144
+ system_prompt = f"<s>[INST] {system_message} [/INST]\n"
145
+ ret = system_prompt
146
+
147
+ for msg in messages[1:]:
148
+ if msg["role"] == "user":
149
+ ret += f"[INST] {msg['content']} [/INST]\n"
150
+ elif msg["role"] == "assistant":
151
+ ret += f"{msg['content']}</s>\n"
152
+
153
+ if not isinstance(msg['content'], str):
154
+ raise ValueError(
155
+ "Currently multimodal context is not "
156
+ "supported by the token counter."
157
+ )
158
+
159
+ return ret.strip()
160
+ elif model in [ModelType.GROQ_GEMMA_7B_IT, ModelType.GROQ_GEMMA_2_9B_IT]:
161
+ # Gemma format
162
+ ret = f"<bos>{system_message}\n"
163
+ for msg in messages:
164
+ if msg["role"] == "user":
165
+ ret += f"Human: {msg['content']}\n"
166
+ elif msg["role"] == "assistant":
167
+ ret += f"Assistant: {msg['content']}\n"
168
+
169
+ if not isinstance(msg['content'], str):
170
+ raise ValueError(
171
+ "Currently multimodal context is not supported by the token counter."
172
+ )
173
+
174
+ ret += "<eos>"
175
+ return ret
135
176
  else:
136
177
  raise ValueError(f"Invalid model type: {model}")
137
178
 
@@ -232,6 +273,7 @@ class OpenAITokenCounter(BaseTokenCounter):
232
273
  model (ModelType): Model type for which tokens will be counted.
233
274
  """
234
275
  self.model: str = model.value_for_tiktoken
276
+ self.model_type = model
235
277
 
236
278
  self.tokens_per_message: int
237
279
  self.tokens_per_name: int
@@ -300,7 +342,7 @@ class OpenAITokenCounter(BaseTokenCounter):
300
342
  base64.b64decode(encoded_image)
301
343
  )
302
344
  image = Image.open(image_bytes)
303
- num_tokens += count_tokens_from_image(
345
+ num_tokens += self._count_tokens_from_image(
304
346
  image, OpenAIVisionDetailType(detail)
305
347
  )
306
348
  if key == "name":
@@ -310,6 +352,45 @@ class OpenAITokenCounter(BaseTokenCounter):
310
352
  num_tokens += 3
311
353
  return num_tokens
312
354
 
355
+ def _count_tokens_from_image(
356
+ self, image: Image.Image, detail: OpenAIVisionDetailType
357
+ ) -> int:
358
+ r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
359
+ resolution model will be treated as :obj:`"high"`. All images with
360
+ :obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
361
+ are first scaled to fit within a 2048 x 2048 square, maintaining their
362
+ aspect ratio. Then, they are scaled such that the shortest side of the
363
+ image is 768px long. Finally, we count how many 512px squares the image
364
+ consists of. Each of those squares costs 170 tokens. Another 85 tokens are
365
+ always added to the final total. For more details please refer to `OpenAI
366
+ vision docs <https://platform.openai.com/docs/guides/vision>`_
367
+
368
+ Args:
369
+ image (PIL.Image.Image): Image to count number of tokens.
370
+ detail (OpenAIVisionDetailType): Image detail type to count
371
+ number of tokens.
372
+
373
+ Returns:
374
+ int: Number of tokens for the image given a detail type.
375
+ """
376
+ if detail == OpenAIVisionDetailType.LOW:
377
+ return LOW_DETAIL_TOKENS
378
+
379
+ width, height = image.size
380
+ if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
381
+ scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
382
+ width = int(width / scaling_factor)
383
+ height = int(height / scaling_factor)
384
+
385
+ scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
386
+ scaled_width = int(width / scaling_factor)
387
+ scaled_height = int(height / scaling_factor)
388
+
389
+ h = ceil(scaled_height / SQUARE_PIXELS)
390
+ w = ceil(scaled_width / SQUARE_PIXELS)
391
+ total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
392
+ return total
393
+
313
394
 
314
395
  class AnthropicTokenCounter(BaseTokenCounter):
315
396
  def __init__(self, model_type: ModelType):
@@ -428,41 +509,68 @@ class LiteLLMTokenCounter:
428
509
  return self.completion_cost(completion_response=response)
429
510
 
430
511
 
431
- def count_tokens_from_image(
432
- image: Image.Image, detail: OpenAIVisionDetailType
433
- ) -> int:
434
- r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
435
- resolution model will be treated as :obj:`"high"`. All images with
436
- :obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
437
- are first scaled to fit within a 2048 x 2048 square, maintaining their
438
- aspect ratio. Then, they are scaled such that the shortest side of the
439
- image is 768px long. Finally, we count how many 512px squares the image
440
- consists of. Each of those squares costs 170 tokens. Another 85 tokens are
441
- always added to the final total. For more details please refer to `OpenAI
442
- vision docs <https://platform.openai.com/docs/guides/vision>`_
512
+ class MistralTokenCounter(BaseTokenCounter):
513
+ def __init__(self, model_type: ModelType):
514
+ r"""Constructor for the token counter for Mistral models.
443
515
 
444
- Args:
445
- image (PIL.Image.Image): Image to count number of tokens.
446
- detail (OpenAIVisionDetailType): Image detail type to count
447
- number of tokens.
516
+ Args:
517
+ model_type (ModelType): Model type for which tokens will be
518
+ counted.
519
+ """
520
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
448
521
 
449
- Returns:
450
- int: Number of tokens for the image given a detail type.
451
- """
452
- if detail == OpenAIVisionDetailType.LOW:
453
- return LOW_DETAIL_TOKENS
454
-
455
- width, height = image.size
456
- if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
457
- scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
458
- width = int(width / scaling_factor)
459
- height = int(height / scaling_factor)
460
-
461
- scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
462
- scaled_width = int(width / scaling_factor)
463
- scaled_height = int(height / scaling_factor)
464
-
465
- h = ceil(scaled_height / SQUARE_PIXELS)
466
- w = ceil(scaled_width / SQUARE_PIXELS)
467
- total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
468
- return total
522
+ self.model_type = model_type
523
+
524
+ # Determine the model type and set the tokenizer accordingly
525
+ model_name = (
526
+ "codestral-22b"
527
+ if self.model_type
528
+ in {ModelType.MISTRAL_CODESTRAL, ModelType.MISTRAL_CODESTRAL_MAMBA}
529
+ else self.model_type.value
530
+ )
531
+
532
+ self.tokenizer = MistralTokenizer.from_model(model_name)
533
+
534
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
535
+ r"""Count number of tokens in the provided message list using
536
+ loaded tokenizer specific for this type of model.
537
+
538
+ Args:
539
+ messages (List[OpenAIMessage]): Message list with the chat history
540
+ in OpenAI API format.
541
+
542
+ Returns:
543
+ int: Total number of tokens in the messages.
544
+ """
545
+ total_tokens = 0
546
+ for msg in messages:
547
+ tokens = self.tokenizer.encode_chat_completion(
548
+ self._convert_response_from_openai_to_mistral(msg)
549
+ ).tokens
550
+ total_tokens += len(tokens)
551
+ return total_tokens
552
+
553
+ def _convert_response_from_openai_to_mistral(
554
+ self, openai_msg: OpenAIMessage
555
+ ) -> ChatCompletionRequest:
556
+ r"""Convert an OpenAI message to a Mistral ChatCompletionRequest.
557
+
558
+ Args:
559
+ openai_msg (OpenAIMessage): An individual message with OpenAI
560
+ format.
561
+
562
+ Returns:
563
+ ChatCompletionRequest: The converted message in Mistral's request
564
+ format.
565
+ """
566
+
567
+ from mistral_common.protocol.instruct.request import (
568
+ ChatCompletionRequest,
569
+ )
570
+
571
+ mistral_request = ChatCompletionRequest( # type: ignore[type-var]
572
+ model=self.model_type.value,
573
+ messages=[openai_msg],
574
+ )
575
+
576
+ return mistral_request
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.6
3
+ Version: 0.1.5.9
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -38,10 +38,14 @@ Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
38
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
39
39
  Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
40
40
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
41
+ Requires-Dist: groq (>=0.5.0,<0.6.0)
41
42
  Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
43
+ Requires-Dist: ipykernel (>=6.0.0,<7.0.0)
42
44
  Requires-Dist: jsonschema (>=4,<5)
45
+ Requires-Dist: jupyter_client (>=8.6.2,<9.0.0) ; extra == "tools" or extra == "all"
43
46
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
44
- Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
47
+ Requires-Dist: mistral-common (>=1.3.3,<2.0.0) ; extra == "model-platforms" or extra == "all"
48
+ Requires-Dist: mistralai (>=0.4.2,<0.5.0) ; extra == "model-platforms" or extra == "all"
45
49
  Requires-Dist: mock (>=5,<6) ; extra == "test"
46
50
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
47
51
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
@@ -191,7 +195,7 @@ conda create --name camel python=3.9
191
195
  conda activate camel
192
196
 
193
197
  # Clone github repo
194
- git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
198
+ git clone -b v0.1.5.9 https://github.com/camel-ai/camel.git
195
199
 
196
200
  # Change directory into project directory
197
201
  cd camel
@@ -317,6 +321,41 @@ Please note that the environment variable is session-specific. If you open a new
317
321
  print(assistant_response.msg.content)
318
322
  ```
319
323
 
324
+ ## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
325
+ - [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
326
+ - After setting up vLLM, start an OpenAI compatible server for example by
327
+ ```bash
328
+ python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
329
+ ```
330
+ - Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
331
+ ```python
332
+ from camel.agents import ChatAgent
333
+ from camel.messages import BaseMessage
334
+ from camel.models import ModelFactory
335
+ from camel.types import ModelPlatformType
336
+
337
+ vllm_model = ModelFactory.create(
338
+ model_platform=ModelPlatformType.VLLM,
339
+ model_type="microsoft/Phi-3-mini-4k-instruct",
340
+ url="http://localhost:8000/v1",
341
+ model_config_dict={"temperature": 0.0},
342
+ api_key="vllm",
343
+ )
344
+
345
+ assistant_sys_msg = BaseMessage.make_assistant_message(
346
+ role_name="Assistant",
347
+ content="You are a helpful assistant.",
348
+ )
349
+ agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
350
+
351
+ user_msg = BaseMessage.make_user_message(
352
+ role_name="User",
353
+ content="Say hi to CAMEL AI",
354
+ )
355
+ assistant_response = agent.step(user_msg)
356
+ print(assistant_response.msg.content)
357
+ ```
358
+
320
359
  ## Data (Hosted on Hugging Face)
321
360
  | Dataset | Chat format | Instruction format | Chat format (translated) |
322
361
  |----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|