camel-ai 0.1.5.7__py3-none-any.whl → 0.1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (44) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +2 -2
  3. camel/agents/critic_agent.py +1 -1
  4. camel/agents/deductive_reasoner_agent.py +4 -4
  5. camel/agents/embodied_agent.py +1 -1
  6. camel/agents/knowledge_graph_agent.py +2 -2
  7. camel/agents/role_assignment_agent.py +1 -1
  8. camel/agents/search_agent.py +4 -5
  9. camel/agents/task_agent.py +5 -5
  10. camel/configs/__init__.py +9 -0
  11. camel/configs/groq_config.py +119 -0
  12. camel/configs/mistral_config.py +81 -0
  13. camel/configs/ollama_config.py +1 -1
  14. camel/configs/vllm_config.py +103 -0
  15. camel/embeddings/__init__.py +2 -0
  16. camel/embeddings/mistral_embedding.py +89 -0
  17. camel/interpreters/__init__.py +2 -0
  18. camel/interpreters/ipython_interpreter.py +167 -0
  19. camel/models/__init__.py +8 -0
  20. camel/models/anthropic_model.py +7 -2
  21. camel/models/azure_openai_model.py +152 -0
  22. camel/models/base_model.py +5 -1
  23. camel/models/gemini_model.py +14 -2
  24. camel/models/groq_model.py +131 -0
  25. camel/models/litellm_model.py +10 -4
  26. camel/models/mistral_model.py +169 -0
  27. camel/models/model_factory.py +30 -3
  28. camel/models/ollama_model.py +5 -2
  29. camel/models/open_source_model.py +11 -3
  30. camel/models/openai_model.py +7 -2
  31. camel/models/stub_model.py +4 -4
  32. camel/models/vllm_model.py +138 -0
  33. camel/models/zhipuai_model.py +7 -3
  34. camel/prompts/__init__.py +2 -2
  35. camel/prompts/task_prompt_template.py +4 -4
  36. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  37. camel/retrievers/auto_retriever.py +2 -2
  38. camel/storages/graph_storages/neo4j_graph.py +5 -0
  39. camel/types/enums.py +152 -35
  40. camel/utils/__init__.py +2 -0
  41. camel/utils/token_counting.py +148 -40
  42. {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/METADATA +42 -3
  43. {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/RECORD +44 -35
  44. {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/WHEEL +0 -0
camel/types/enums.py CHANGED
@@ -29,11 +29,22 @@ class ModelType(Enum):
29
29
  GPT_4_32K = "gpt-4-32k"
30
30
  GPT_4_TURBO = "gpt-4-turbo"
31
31
  GPT_4O = "gpt-4o"
32
+ GPT_4O_MINI = "gpt-4o-mini"
33
+
32
34
  GLM_4 = "glm-4"
33
35
  GLM_4_OPEN_SOURCE = "glm-4-open-source"
34
36
  GLM_4V = 'glm-4v'
35
37
  GLM_3_TURBO = "glm-3-turbo"
36
38
 
39
+ GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
40
+ GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
41
+ GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
42
+ GROQ_LLAMA_3_8B = "llama3-8b-8192"
43
+ GROQ_LLAMA_3_70B = "llama3-70b-8192"
44
+ GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
45
+ GROQ_GEMMA_7B_IT = "gemma-7b-it"
46
+ GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
47
+
37
48
  STUB = "stub"
38
49
 
39
50
  LLAMA_2 = "llama-2"
@@ -44,7 +55,7 @@ class ModelType(Enum):
44
55
  QWEN_2 = "qwen-2"
45
56
 
46
57
  # Legacy anthropic models
47
- # NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
58
+ # NOTE: anthropic legacy models only Claude 2.1 has system prompt support
48
59
  CLAUDE_2_1 = "claude-2.1"
49
60
  CLAUDE_2_0 = "claude-2.0"
50
61
  CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
@@ -62,6 +73,15 @@ class ModelType(Enum):
62
73
  GEMINI_1_5_FLASH = "gemini-1.5-flash"
63
74
  GEMINI_1_5_PRO = "gemini-1.5-pro"
64
75
 
76
+ # Mistral AI Model
77
+ MISTRAL_LARGE = "mistral-large-latest"
78
+ MISTRAL_NEMO = "open-mistral-nemo"
79
+ MISTRAL_CODESTRAL = "codestral-latest"
80
+ MISTRAL_7B = "open-mistral-7b"
81
+ MISTRAL_MIXTRAL_8x7B = "open-mixtral-8x7b"
82
+ MISTRAL_MIXTRAL_8x22B = "open-mixtral-8x22b"
83
+ MISTRAL_CODESTRAL_MAMBA = "open-codestral-mamba"
84
+
65
85
  @property
66
86
  def value_for_tiktoken(self) -> str:
67
87
  return (
@@ -73,6 +93,20 @@ class ModelType(Enum):
73
93
  @property
74
94
  def is_openai(self) -> bool:
75
95
  r"""Returns whether this type of models is an OpenAI-released model."""
96
+ return self in {
97
+ ModelType.GPT_3_5_TURBO,
98
+ ModelType.GPT_4,
99
+ ModelType.GPT_4_32K,
100
+ ModelType.GPT_4_TURBO,
101
+ ModelType.GPT_4O,
102
+ ModelType.GPT_4O_MINI,
103
+ }
104
+
105
+ @property
106
+ def is_azure_openai(self) -> bool:
107
+ r"""Returns whether this type of models is an OpenAI-released model
108
+ from Azure.
109
+ """
76
110
  return self in {
77
111
  ModelType.GPT_3_5_TURBO,
78
112
  ModelType.GPT_4,
@@ -119,6 +153,33 @@ class ModelType(Enum):
119
153
  ModelType.CLAUDE_3_5_SONNET,
120
154
  }
121
155
 
156
+ @property
157
+ def is_groq(self) -> bool:
158
+ r"""Returns whether this type of models is served by Groq."""
159
+ return self in {
160
+ ModelType.GROQ_LLAMA_3_1_8B,
161
+ ModelType.GROQ_LLAMA_3_1_70B,
162
+ ModelType.GROQ_LLAMA_3_1_405B,
163
+ ModelType.GROQ_LLAMA_3_8B,
164
+ ModelType.GROQ_LLAMA_3_70B,
165
+ ModelType.GROQ_MIXTRAL_8_7B,
166
+ ModelType.GROQ_GEMMA_7B_IT,
167
+ ModelType.GROQ_GEMMA_2_9B_IT,
168
+ }
169
+
170
+ @property
171
+ def is_mistral(self) -> bool:
172
+ r"""Returns whether this type of models is served by Mistral."""
173
+ return self in {
174
+ ModelType.MISTRAL_LARGE,
175
+ ModelType.MISTRAL_NEMO,
176
+ ModelType.MISTRAL_CODESTRAL,
177
+ ModelType.MISTRAL_7B,
178
+ ModelType.MISTRAL_MIXTRAL_8x7B,
179
+ ModelType.MISTRAL_MIXTRAL_8x22B,
180
+ ModelType.MISTRAL_CODESTRAL_MAMBA,
181
+ }
182
+
122
183
  @property
123
184
  def is_nvidia(self) -> bool:
124
185
  r"""Returns whether this type of models is Nvidia-released model.
@@ -137,46 +198,65 @@ class ModelType(Enum):
137
198
  @property
138
199
  def token_limit(self) -> int:
139
200
  r"""Returns the maximum token limit for a given model.
201
+
140
202
  Returns:
141
203
  int: The maximum token limit for the given model.
142
204
  """
143
- if self is ModelType.GPT_3_5_TURBO:
144
- return 16385
145
- elif self is ModelType.GPT_4:
146
- return 8192
147
- elif self is ModelType.GPT_4_32K:
148
- return 32768
149
- elif self is ModelType.GPT_4_TURBO:
150
- return 128000
151
- elif self is ModelType.GPT_4O:
152
- return 128000
153
- elif self == ModelType.GEMINI_1_5_FLASH:
154
- return 1048576
155
- elif self == ModelType.GEMINI_1_5_PRO:
156
- return 1048576
157
- elif self == ModelType.GLM_4_OPEN_SOURCE:
158
- return 8192
159
- elif self == ModelType.GLM_3_TURBO:
160
- return 8192
161
- elif self == ModelType.GLM_4V:
205
+ if self is ModelType.GLM_4V:
162
206
  return 1024
163
- elif self is ModelType.STUB:
164
- return 4096
165
- elif self is ModelType.LLAMA_2:
166
- return 4096
167
- elif self is ModelType.LLAMA_3:
168
- return 8192
169
- elif self is ModelType.QWEN_2:
170
- return 128000
171
- elif self is ModelType.GLM_4:
172
- return 8192
173
207
  elif self is ModelType.VICUNA:
174
208
  # reference: https://lmsys.org/blog/2023-03-30-vicuna/
175
209
  return 2048
176
- elif self is ModelType.VICUNA_16K:
177
- return 16384
210
+ elif self in {
211
+ ModelType.LLAMA_2,
212
+ ModelType.NEMOTRON_4_REWARD,
213
+ ModelType.STUB,
214
+ }:
215
+ return 4_096
216
+ elif self in {
217
+ ModelType.GPT_4,
218
+ ModelType.GROQ_LLAMA_3_8B,
219
+ ModelType.GROQ_LLAMA_3_70B,
220
+ ModelType.GROQ_GEMMA_7B_IT,
221
+ ModelType.GROQ_GEMMA_2_9B_IT,
222
+ ModelType.LLAMA_3,
223
+ ModelType.GLM_3_TURBO,
224
+ ModelType.GLM_4,
225
+ ModelType.GLM_4_OPEN_SOURCE,
226
+ }:
227
+ return 8_192
228
+ elif self in {
229
+ ModelType.GPT_3_5_TURBO,
230
+ ModelType.VICUNA_16K,
231
+ }:
232
+ return 16_384
233
+ elif self in {
234
+ ModelType.GPT_4_32K,
235
+ ModelType.MISTRAL_CODESTRAL,
236
+ ModelType.MISTRAL_7B,
237
+ ModelType.MISTRAL_MIXTRAL_8x7B,
238
+ ModelType.GROQ_MIXTRAL_8_7B,
239
+ }:
240
+ return 32_768
241
+ elif self in {ModelType.MISTRAL_MIXTRAL_8x22B}:
242
+ return 64_000
178
243
  elif self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
179
244
  return 100_000
245
+ elif self in {
246
+ ModelType.GPT_4O,
247
+ ModelType.GPT_4O_MINI,
248
+ ModelType.GPT_4_TURBO,
249
+ ModelType.MISTRAL_LARGE,
250
+ ModelType.MISTRAL_NEMO,
251
+ ModelType.QWEN_2,
252
+ }:
253
+ return 128_000
254
+ elif self in {
255
+ ModelType.GROQ_LLAMA_3_1_8B,
256
+ ModelType.GROQ_LLAMA_3_1_70B,
257
+ ModelType.GROQ_LLAMA_3_1_405B,
258
+ }:
259
+ return 131_072
180
260
  elif self in {
181
261
  ModelType.CLAUDE_2_1,
182
262
  ModelType.CLAUDE_3_OPUS,
@@ -185,8 +265,12 @@ class ModelType(Enum):
185
265
  ModelType.CLAUDE_3_5_SONNET,
186
266
  }:
187
267
  return 200_000
188
- elif self is ModelType.NEMOTRON_4_REWARD:
189
- return 4096
268
+ elif self in {
269
+ ModelType.MISTRAL_CODESTRAL_MAMBA,
270
+ }:
271
+ return 256_000
272
+ elif self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}:
273
+ return 1_048_576
190
274
  else:
191
275
  raise ValueError("Unknown model type")
192
276
 
@@ -195,8 +279,9 @@ class ModelType(Enum):
195
279
 
196
280
  Args:
197
281
  model_name (str): The name of the model, e.g. "vicuna-7b-v1.5".
282
+
198
283
  Returns:
199
- bool: Whether the model type mathches the model name.
284
+ bool: Whether the model type matches the model name.
200
285
  """
201
286
  if self is ModelType.VICUNA:
202
287
  pattern = r'^vicuna-\d+b-v\d+\.\d+$'
@@ -232,6 +317,8 @@ class EmbeddingModelType(Enum):
232
317
  TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
233
318
  TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
234
319
 
320
+ MISTRAL_EMBED = "mistral-embed"
321
+
235
322
  @property
236
323
  def is_openai(self) -> bool:
237
324
  r"""Returns whether this type of models is an OpenAI-released model."""
@@ -241,6 +328,15 @@ class EmbeddingModelType(Enum):
241
328
  EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
242
329
  }
243
330
 
331
+ @property
332
+ def is_mistral(self) -> bool:
333
+ r"""Returns whether this type of models is an Mistral-released
334
+ model.
335
+ """
336
+ return self in {
337
+ EmbeddingModelType.MISTRAL_EMBED,
338
+ }
339
+
244
340
  @property
245
341
  def output_dim(self) -> int:
246
342
  if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
@@ -249,6 +345,8 @@ class EmbeddingModelType(Enum):
249
345
  return 1536
250
346
  elif self is EmbeddingModelType.TEXT_EMBEDDING_3_LARGE:
251
347
  return 3072
348
+ elif self is EmbeddingModelType.MISTRAL_EMBED:
349
+ return 1024
252
350
  else:
253
351
  raise ValueError(f"Unknown model type {self}.")
254
352
 
@@ -285,6 +383,7 @@ class OpenAIBackendRole(Enum):
285
383
  SYSTEM = "system"
286
384
  USER = "user"
287
385
  FUNCTION = "function"
386
+ TOOL = "tool"
288
387
 
289
388
 
290
389
  class TerminationMode(Enum):
@@ -338,12 +437,15 @@ class ModelPlatformType(Enum):
338
437
  OPENAI = "openai"
339
438
  AZURE = "azure"
340
439
  ANTHROPIC = "anthropic"
440
+ GROQ = "groq"
341
441
  OPENSOURCE = "opensource"
342
442
  OLLAMA = "ollama"
343
443
  LITELLM = "litellm"
344
444
  ZHIPU = "zhipuai"
345
445
  DEFAULT = "default"
346
446
  GEMINI = "gemini"
447
+ VLLM = "vllm"
448
+ MISTRAL = "mistral"
347
449
 
348
450
  @property
349
451
  def is_openai(self) -> bool:
@@ -360,11 +462,21 @@ class ModelPlatformType(Enum):
360
462
  r"""Returns whether this platform is anthropic."""
361
463
  return self is ModelPlatformType.ANTHROPIC
362
464
 
465
+ @property
466
+ def is_groq(self) -> bool:
467
+ r"""Returns whether this platform is groq."""
468
+ return self is ModelPlatformType.GROQ
469
+
363
470
  @property
364
471
  def is_ollama(self) -> bool:
365
472
  r"""Returns whether this platform is ollama."""
366
473
  return self is ModelPlatformType.OLLAMA
367
474
 
475
+ @property
476
+ def is_vllm(self) -> bool:
477
+ r"""Returns whether this platform is vllm."""
478
+ return self is ModelPlatformType.VLLM
479
+
368
480
  @property
369
481
  def is_litellm(self) -> bool:
370
482
  r"""Returns whether this platform is litellm."""
@@ -375,6 +487,11 @@ class ModelPlatformType(Enum):
375
487
  r"""Returns whether this platform is zhipu."""
376
488
  return self is ModelPlatformType.ZHIPU
377
489
 
490
+ @property
491
+ def is_mistral(self) -> bool:
492
+ r"""Returns whether this platform is mistral."""
493
+ return self is ModelPlatformType.MISTRAL
494
+
378
495
  @property
379
496
  def is_open_source(self) -> bool:
380
497
  r"""Returns whether this platform is opensource."""
camel/utils/__init__.py CHANGED
@@ -34,6 +34,7 @@ from .token_counting import (
34
34
  BaseTokenCounter,
35
35
  GeminiTokenCounter,
36
36
  LiteLLMTokenCounter,
37
+ MistralTokenCounter,
37
38
  OpenAITokenCounter,
38
39
  OpenSourceTokenCounter,
39
40
  get_model_encoding,
@@ -62,4 +63,5 @@ __all__ = [
62
63
  'api_keys_required',
63
64
  'is_docker_running',
64
65
  'GeminiTokenCounter',
66
+ 'MistralTokenCounter',
65
67
  ]
@@ -26,6 +26,8 @@ from PIL import Image
26
26
  from camel.types import ModelType, OpenAIImageType, OpenAIVisionDetailType
27
27
 
28
28
  if TYPE_CHECKING:
29
+ from mistral_common.protocol.instruct.request import ChatCompletionRequest
30
+
29
31
  from camel.messages import OpenAIMessage
30
32
 
31
33
  LOW_DETAIL_TOKENS = 85
@@ -37,7 +39,7 @@ EXTRA_TOKENS = 85
37
39
 
38
40
 
39
41
  def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
40
- r"""Parse the message list into a single prompt following model-specifc
42
+ r"""Parse the message list into a single prompt following model-specific
41
43
  formats.
42
44
 
43
45
  Args:
@@ -51,7 +53,12 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
51
53
  system_message = messages[0]["content"]
52
54
 
53
55
  ret: str
54
- if model == ModelType.LLAMA_2 or model == ModelType.LLAMA_3:
56
+ if model in [
57
+ ModelType.LLAMA_2,
58
+ ModelType.LLAMA_3,
59
+ ModelType.GROQ_LLAMA_3_8B,
60
+ ModelType.GROQ_LLAMA_3_70B,
61
+ ]:
55
62
  # reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
56
63
  seps = [" ", " </s><s>"]
57
64
  role_map = {"user": "[INST]", "assistant": "[/INST]"}
@@ -74,7 +81,7 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
74
81
  else:
75
82
  ret += role
76
83
  return ret
77
- elif model == ModelType.VICUNA or model == ModelType.VICUNA_16K:
84
+ elif model in [ModelType.VICUNA, ModelType.VICUNA_16K]:
78
85
  seps = [" ", "</s>"]
79
86
  role_map = {"user": "USER", "assistant": "ASSISTANT"}
80
87
 
@@ -132,6 +139,40 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
132
139
  else:
133
140
  ret += '<|im_start|>' + role + '\n'
134
141
  return ret
142
+ elif model == ModelType.GROQ_MIXTRAL_8_7B:
143
+ # Mistral/Mixtral format
144
+ system_prompt = f"<s>[INST] {system_message} [/INST]\n"
145
+ ret = system_prompt
146
+
147
+ for msg in messages[1:]:
148
+ if msg["role"] == "user":
149
+ ret += f"[INST] {msg['content']} [/INST]\n"
150
+ elif msg["role"] == "assistant":
151
+ ret += f"{msg['content']}</s>\n"
152
+
153
+ if not isinstance(msg['content'], str):
154
+ raise ValueError(
155
+ "Currently multimodal context is not "
156
+ "supported by the token counter."
157
+ )
158
+
159
+ return ret.strip()
160
+ elif model in [ModelType.GROQ_GEMMA_7B_IT, ModelType.GROQ_GEMMA_2_9B_IT]:
161
+ # Gemma format
162
+ ret = f"<bos>{system_message}\n"
163
+ for msg in messages:
164
+ if msg["role"] == "user":
165
+ ret += f"Human: {msg['content']}\n"
166
+ elif msg["role"] == "assistant":
167
+ ret += f"Assistant: {msg['content']}\n"
168
+
169
+ if not isinstance(msg['content'], str):
170
+ raise ValueError(
171
+ "Currently multimodal context is not supported by the token counter."
172
+ )
173
+
174
+ ret += "<eos>"
175
+ return ret
135
176
  else:
136
177
  raise ValueError(f"Invalid model type: {model}")
137
178
 
@@ -232,6 +273,7 @@ class OpenAITokenCounter(BaseTokenCounter):
232
273
  model (ModelType): Model type for which tokens will be counted.
233
274
  """
234
275
  self.model: str = model.value_for_tiktoken
276
+ self.model_type = model
235
277
 
236
278
  self.tokens_per_message: int
237
279
  self.tokens_per_name: int
@@ -300,7 +342,7 @@ class OpenAITokenCounter(BaseTokenCounter):
300
342
  base64.b64decode(encoded_image)
301
343
  )
302
344
  image = Image.open(image_bytes)
303
- num_tokens += count_tokens_from_image(
345
+ num_tokens += self._count_tokens_from_image(
304
346
  image, OpenAIVisionDetailType(detail)
305
347
  )
306
348
  if key == "name":
@@ -310,6 +352,45 @@ class OpenAITokenCounter(BaseTokenCounter):
310
352
  num_tokens += 3
311
353
  return num_tokens
312
354
 
355
+ def _count_tokens_from_image(
356
+ self, image: Image.Image, detail: OpenAIVisionDetailType
357
+ ) -> int:
358
+ r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
359
+ resolution model will be treated as :obj:`"high"`. All images with
360
+ :obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
361
+ are first scaled to fit within a 2048 x 2048 square, maintaining their
362
+ aspect ratio. Then, they are scaled such that the shortest side of the
363
+ image is 768px long. Finally, we count how many 512px squares the image
364
+ consists of. Each of those squares costs 170 tokens. Another 85 tokens are
365
+ always added to the final total. For more details please refer to `OpenAI
366
+ vision docs <https://platform.openai.com/docs/guides/vision>`_
367
+
368
+ Args:
369
+ image (PIL.Image.Image): Image to count number of tokens.
370
+ detail (OpenAIVisionDetailType): Image detail type to count
371
+ number of tokens.
372
+
373
+ Returns:
374
+ int: Number of tokens for the image given a detail type.
375
+ """
376
+ if detail == OpenAIVisionDetailType.LOW:
377
+ return LOW_DETAIL_TOKENS
378
+
379
+ width, height = image.size
380
+ if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
381
+ scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
382
+ width = int(width / scaling_factor)
383
+ height = int(height / scaling_factor)
384
+
385
+ scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
386
+ scaled_width = int(width / scaling_factor)
387
+ scaled_height = int(height / scaling_factor)
388
+
389
+ h = ceil(scaled_height / SQUARE_PIXELS)
390
+ w = ceil(scaled_width / SQUARE_PIXELS)
391
+ total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
392
+ return total
393
+
313
394
 
314
395
  class AnthropicTokenCounter(BaseTokenCounter):
315
396
  def __init__(self, model_type: ModelType):
@@ -428,41 +509,68 @@ class LiteLLMTokenCounter:
428
509
  return self.completion_cost(completion_response=response)
429
510
 
430
511
 
431
- def count_tokens_from_image(
432
- image: Image.Image, detail: OpenAIVisionDetailType
433
- ) -> int:
434
- r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
435
- resolution model will be treated as :obj:`"high"`. All images with
436
- :obj:`"low"` detail cost 85 tokens each. Images with :obj:`"high"` detail
437
- are first scaled to fit within a 2048 x 2048 square, maintaining their
438
- aspect ratio. Then, they are scaled such that the shortest side of the
439
- image is 768px long. Finally, we count how many 512px squares the image
440
- consists of. Each of those squares costs 170 tokens. Another 85 tokens are
441
- always added to the final total. For more details please refer to `OpenAI
442
- vision docs <https://platform.openai.com/docs/guides/vision>`_
512
+ class MistralTokenCounter(BaseTokenCounter):
513
+ def __init__(self, model_type: ModelType):
514
+ r"""Constructor for the token counter for Mistral models.
443
515
 
444
- Args:
445
- image (PIL.Image.Image): Image to count number of tokens.
446
- detail (OpenAIVisionDetailType): Image detail type to count
447
- number of tokens.
516
+ Args:
517
+ model_type (ModelType): Model type for which tokens will be
518
+ counted.
519
+ """
520
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
448
521
 
449
- Returns:
450
- int: Number of tokens for the image given a detail type.
451
- """
452
- if detail == OpenAIVisionDetailType.LOW:
453
- return LOW_DETAIL_TOKENS
454
-
455
- width, height = image.size
456
- if width > FIT_SQUARE_PIXELS or height > FIT_SQUARE_PIXELS:
457
- scaling_factor = max(width, height) / FIT_SQUARE_PIXELS
458
- width = int(width / scaling_factor)
459
- height = int(height / scaling_factor)
460
-
461
- scaling_factor = min(width, height) / SHORTEST_SIDE_PIXELS
462
- scaled_width = int(width / scaling_factor)
463
- scaled_height = int(height / scaling_factor)
464
-
465
- h = ceil(scaled_height / SQUARE_PIXELS)
466
- w = ceil(scaled_width / SQUARE_PIXELS)
467
- total = EXTRA_TOKENS + SQUARE_TOKENS * h * w
468
- return total
522
+ self.model_type = model_type
523
+
524
+ # Determine the model type and set the tokenizer accordingly
525
+ model_name = (
526
+ "codestral-22b"
527
+ if self.model_type
528
+ in {ModelType.MISTRAL_CODESTRAL, ModelType.MISTRAL_CODESTRAL_MAMBA}
529
+ else self.model_type.value
530
+ )
531
+
532
+ self.tokenizer = MistralTokenizer.from_model(model_name)
533
+
534
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
535
+ r"""Count number of tokens in the provided message list using
536
+ loaded tokenizer specific for this type of model.
537
+
538
+ Args:
539
+ messages (List[OpenAIMessage]): Message list with the chat history
540
+ in OpenAI API format.
541
+
542
+ Returns:
543
+ int: Total number of tokens in the messages.
544
+ """
545
+ total_tokens = 0
546
+ for msg in messages:
547
+ tokens = self.tokenizer.encode_chat_completion(
548
+ self._convert_response_from_openai_to_mistral(msg)
549
+ ).tokens
550
+ total_tokens += len(tokens)
551
+ return total_tokens
552
+
553
+ def _convert_response_from_openai_to_mistral(
554
+ self, openai_msg: OpenAIMessage
555
+ ) -> ChatCompletionRequest:
556
+ r"""Convert an OpenAI message to a Mistral ChatCompletionRequest.
557
+
558
+ Args:
559
+ openai_msg (OpenAIMessage): An individual message with OpenAI
560
+ format.
561
+
562
+ Returns:
563
+ ChatCompletionRequest: The converted message in Mistral's request
564
+ format.
565
+ """
566
+
567
+ from mistral_common.protocol.instruct.request import (
568
+ ChatCompletionRequest,
569
+ )
570
+
571
+ mistral_request = ChatCompletionRequest( # type: ignore[type-var]
572
+ model=self.model_type.value,
573
+ messages=[openai_msg],
574
+ )
575
+
576
+ return mistral_request
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.7
3
+ Version: 0.1.6.0
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -38,10 +38,14 @@ Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
38
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
39
39
  Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
40
40
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
41
+ Requires-Dist: groq (>=0.5.0,<0.6.0)
41
42
  Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
43
+ Requires-Dist: ipykernel (>=6.0.0,<7.0.0)
42
44
  Requires-Dist: jsonschema (>=4,<5)
45
+ Requires-Dist: jupyter_client (>=8.6.2,<9.0.0) ; extra == "tools" or extra == "all"
43
46
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
44
- Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
47
+ Requires-Dist: mistral-common (>=1.3.3,<2.0.0) ; extra == "model-platforms" or extra == "all"
48
+ Requires-Dist: mistralai (>=0.4.2,<0.5.0) ; extra == "model-platforms" or extra == "all"
45
49
  Requires-Dist: mock (>=5,<6) ; extra == "test"
46
50
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
47
51
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
@@ -191,7 +195,7 @@ conda create --name camel python=3.9
191
195
  conda activate camel
192
196
 
193
197
  # Clone github repo
194
- git clone -b v0.1.5.7 https://github.com/camel-ai/camel.git
198
+ git clone -b v0.1.6.0 https://github.com/camel-ai/camel.git
195
199
 
196
200
  # Change directory into project directory
197
201
  cd camel
@@ -317,6 +321,41 @@ Please note that the environment variable is session-specific. If you open a new
317
321
  print(assistant_response.msg.content)
318
322
  ```
319
323
 
324
+ ## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
325
+ - [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
326
+ - After setting up vLLM, start an OpenAI compatible server for example by
327
+ ```bash
328
+ python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
329
+ ```
330
+ - Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
331
+ ```python
332
+ from camel.agents import ChatAgent
333
+ from camel.messages import BaseMessage
334
+ from camel.models import ModelFactory
335
+ from camel.types import ModelPlatformType
336
+
337
+ vllm_model = ModelFactory.create(
338
+ model_platform=ModelPlatformType.VLLM,
339
+ model_type="microsoft/Phi-3-mini-4k-instruct",
340
+ url="http://localhost:8000/v1",
341
+ model_config_dict={"temperature": 0.0},
342
+ api_key="vllm",
343
+ )
344
+
345
+ assistant_sys_msg = BaseMessage.make_assistant_message(
346
+ role_name="Assistant",
347
+ content="You are a helpful assistant.",
348
+ )
349
+ agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
350
+
351
+ user_msg = BaseMessage.make_user_message(
352
+ role_name="User",
353
+ content="Say hi to CAMEL AI",
354
+ )
355
+ assistant_response = agent.step(user_msg)
356
+ print(assistant_response.msg.content)
357
+ ```
358
+
320
359
  ## Data (Hosted on Hugging Face)
321
360
  | Dataset | Chat format | Instruction format | Chat format (translated) |
322
361
  |----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|