camel-ai 0.2.18__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -17,12 +17,30 @@ from pydantic import BaseModel, Field
17
17
 
18
18
 
19
19
  class ReasoningStep(BaseModel):
20
+ r"""A single step in a multi-hop reasoning process.
21
+
22
+ Attributes:
23
+ step (str): The textual description of the reasoning step.
24
+ """
25
+
20
26
  step: str = Field(
21
27
  ..., description="A single step in the reasoning process."
22
28
  )
23
29
 
24
30
 
25
31
  class MultiHopQA(BaseModel):
32
+ r"""A multi-hop question-answer pair with reasoning steps and supporting
33
+ facts.
34
+
35
+ Attributes:
36
+ question (str): The question requiring multi-hop reasoning.
37
+ reasoning_steps (List[ReasoningStep]): List of reasoning steps to
38
+ answer.
39
+ answer (str): The final answer to the question.
40
+ supporting_facts (List[str]): List of facts supporting the reasoning.
41
+ type (str): The type of question-answer pair.
42
+ """
43
+
26
44
  question: str = Field(
27
45
  ..., description="The question that requires multi-hop reasoning."
28
46
  )
@@ -57,6 +75,13 @@ class MultiHopQA(BaseModel):
57
75
 
58
76
 
59
77
  class ContextPrompt(BaseModel):
78
+ r"""A context prompt for generating multi-hop question-answer pairs.
79
+
80
+ Attributes:
81
+ main_context (str): The primary context for generating QA pairs.
82
+ related_contexts (Optional[List[str]]): Additional related contexts.
83
+ """
84
+
60
85
  main_context: str = Field(
61
86
  ...,
62
87
  description="The main context for generating"
@@ -23,7 +23,15 @@ class ProcessorConfig(BaseModel):
23
23
  r"""Data processing configuration class"""
24
24
 
25
25
  def __repr__(self):
26
- return "MultiHopGeneratorAgent()"
26
+ return (
27
+ f"ProcessorConfig("
28
+ f"seed={self.seed}, min_length={self.min_length}, "
29
+ f"max_length={self.max_length}, "
30
+ f"complexity_threshold={self.complexity_threshold}, "
31
+ f"dataset_size={self.dataset_size}, "
32
+ f"use_ai_model={self.use_ai_model}"
33
+ f")"
34
+ )
27
35
 
28
36
  model_config = ConfigDict(
29
37
  validate_assignment=True,
@@ -45,13 +53,6 @@ class ProcessorConfig(BaseModel):
45
53
  default=512, description="Maximum text length", gt=0
46
54
  )
47
55
 
48
- quality_threshold: float = Field(
49
- default=0.7,
50
- description="Quality threshold for processing",
51
- ge=0.0,
52
- le=1.0,
53
- )
54
-
55
56
  complexity_threshold: float = Field(
56
57
  default=0.5,
57
58
  description="Complexity threshold for processing",
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .base import BaseEmbedding
15
+ from .jina_embedding import JinaEmbedding
15
16
  from .mistral_embedding import MistralEmbedding
16
17
  from .openai_compatible_embedding import OpenAICompatibleEmbedding
17
18
  from .openai_embedding import OpenAIEmbedding
@@ -25,4 +26,5 @@ __all__ = [
25
26
  "VisionLanguageEmbedding",
26
27
  "MistralEmbedding",
27
28
  "OpenAICompatibleEmbedding",
29
+ "JinaEmbedding",
28
30
  ]
@@ -0,0 +1,156 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import base64
16
+ import io
17
+ import os
18
+ from typing import Any, Optional, Union
19
+
20
+ import requests
21
+ from PIL import Image
22
+
23
+ from camel.embeddings import BaseEmbedding
24
+ from camel.types.enums import EmbeddingModelType
25
+ from camel.utils import api_keys_required
26
+
27
+
28
+ class JinaEmbedding(BaseEmbedding[Union[str, Image.Image]]):
29
+ r"""Provides text and image embedding functionalities using Jina AI's API.
30
+
31
+ Args:
32
+ model_type (EmbeddingModelType, optional): The model to use for
33
+ embeddings. (default: :obj:`JINA_EMBEDDINGS_V3`)
34
+ api_key (Optional[str], optional): The API key for authenticating with
35
+ Jina AI. (default: :obj:`None`)
36
+ dimensions (Optional[int], optional): The dimension of the output
37
+ embeddings. (default: :obj:`None`)
38
+ task (Optional[str], optional): The type of task for text embeddings.
39
+ Options: retrieval.query, retrieval.passage, text-matching,
40
+ classification, separation. (default: :obj:`None`)
41
+ late_chunking (bool, optional): If true, concatenates all sentences in
42
+ input and treats as a single input. (default: :obj:`False`)
43
+ normalized (bool, optional): If true, embeddings are normalized to unit
44
+ L2 norm. (default: :obj:`False`)
45
+ """
46
+
47
+ @api_keys_required([("api_key", 'JINA_API_KEY')])
48
+ def __init__(
49
+ self,
50
+ model_type: EmbeddingModelType = EmbeddingModelType.JINA_EMBEDDINGS_V3,
51
+ api_key: Optional[str] = None,
52
+ dimensions: Optional[int] = None,
53
+ embedding_type: Optional[str] = None,
54
+ task: Optional[str] = None,
55
+ late_chunking: bool = False,
56
+ normalized: bool = False,
57
+ ) -> None:
58
+ if not model_type.is_jina:
59
+ raise ValueError(
60
+ f"Model type {model_type} is not a Jina model. "
61
+ "Please use a valid Jina model type."
62
+ )
63
+ self.model_type = model_type
64
+ if dimensions is None:
65
+ self.output_dim = model_type.output_dim
66
+ else:
67
+ self.output_dim = dimensions
68
+ self._api_key = api_key or os.environ.get("JINA_API_KEY")
69
+
70
+ self.embedding_type = embedding_type
71
+ self.task = task
72
+ self.late_chunking = late_chunking
73
+ self.normalized = normalized
74
+ self.url = 'https://api.jina.ai/v1/embeddings'
75
+ self.headers = {
76
+ 'Content-Type': 'application/json',
77
+ 'Accept': 'application/json',
78
+ 'Authorization': f'Bearer {self._api_key}',
79
+ }
80
+
81
+ def embed_list(
82
+ self,
83
+ objs: list[Union[str, Image.Image]],
84
+ **kwargs: Any,
85
+ ) -> list[list[float]]:
86
+ r"""Generates embeddings for the given texts or images.
87
+
88
+ Args:
89
+ objs (list[Union[str, Image.Image]]): The texts or images for which
90
+ to generate the embeddings.
91
+ **kwargs (Any): Extra kwargs passed to the embedding API. Not used
92
+ in this implementation.
93
+
94
+ Returns:
95
+ list[list[float]]: A list that represents the generated embedding
96
+ as a list of floating-point numbers.
97
+
98
+ Raises:
99
+ ValueError: If the input type is not supported.
100
+ RuntimeError: If the API request fails.
101
+ """
102
+ input_data = []
103
+ for obj in objs:
104
+ if isinstance(obj, str):
105
+ if self.model_type == EmbeddingModelType.JINA_CLIP_V2:
106
+ input_data.append({"text": obj})
107
+ else:
108
+ input_data.append(obj) # type: ignore[arg-type]
109
+ elif isinstance(obj, Image.Image):
110
+ if self.model_type != EmbeddingModelType.JINA_CLIP_V2:
111
+ raise ValueError(
112
+ f"Model {self.model_type} does not support "
113
+ "image input. Use JINA_CLIP_V2 for image embeddings."
114
+ )
115
+ # Convert PIL Image to base64 string
116
+ buffered = io.BytesIO()
117
+ obj.save(buffered, format="PNG")
118
+ img_str = base64.b64encode(buffered.getvalue()).decode()
119
+ input_data.append({"image": img_str})
120
+ else:
121
+ raise ValueError(
122
+ f"Input type {type(obj)} is not supported. "
123
+ "Must be either str or PIL.Image"
124
+ )
125
+
126
+ data = {
127
+ "model": self.model_type.value,
128
+ "input": input_data,
129
+ "embedding_type": "float",
130
+ }
131
+
132
+ if self.embedding_type is not None:
133
+ data["embedding_type"] = self.embedding_type
134
+ if self.task is not None:
135
+ data["task"] = self.task
136
+ if self.late_chunking:
137
+ data["late_chunking"] = self.late_chunking # type: ignore[assignment]
138
+ if self.normalized:
139
+ data["normalized"] = self.normalized # type: ignore[assignment]
140
+ try:
141
+ response = requests.post(
142
+ self.url, headers=self.headers, json=data, timeout=180
143
+ )
144
+ response.raise_for_status()
145
+ result = response.json()
146
+ return [data["embedding"] for data in result["data"]]
147
+ except requests.exceptions.RequestException as e:
148
+ raise RuntimeError(f"Failed to get embeddings from Jina AI: {e}")
149
+
150
+ def get_output_dim(self) -> int:
151
+ r"""Returns the output dimension of the embeddings.
152
+
153
+ Returns:
154
+ int: The dimensionality of the embedding for the current model.
155
+ """
156
+ return self.output_dim
@@ -154,7 +154,7 @@ class FunctionCallingMessage(BaseMessage):
154
154
  " due to missing function name."
155
155
  )
156
156
 
157
- result_content = json.dumps(self.result)
157
+ result_content = str(self.result)
158
158
 
159
159
  return {
160
160
  "role": "tool",
@@ -13,12 +13,12 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- import warnings
17
16
  from typing import Any, Dict, List, Optional, Union
18
17
 
19
18
  from openai import OpenAI, Stream
20
19
 
21
20
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
+ from camel.logger import get_logger
22
22
  from camel.messages import OpenAIMessage
23
23
  from camel.models.base_model import BaseModelBackend
24
24
  from camel.types import (
@@ -28,6 +28,8 @@ from camel.types import (
28
28
  )
29
29
  from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
30
30
 
31
+ logger = get_logger(__name__)
32
+
31
33
 
32
34
  class DeepSeekModel(BaseModelBackend):
33
35
  r"""DeepSeek API in a unified BaseModelBackend interface.
@@ -116,11 +118,12 @@ class DeepSeekModel(BaseModelBackend):
116
118
  if self.model_type in [
117
119
  ModelType.DEEPSEEK_REASONER,
118
120
  ]:
119
- warnings.warn(
120
- "Warning: You are using an DeepSeek Reasoner model, "
121
+ import re
122
+
123
+ logger.warning(
124
+ "You are using a DeepSeek Reasoner model, "
121
125
  "which has certain limitations, reference: "
122
- "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`.",
123
- UserWarning,
126
+ "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`"
124
127
  )
125
128
 
126
129
  # Check and remove unsupported parameters and reset the fixed
@@ -138,14 +141,29 @@ class DeepSeekModel(BaseModelBackend):
138
141
  if key in self.model_config_dict:
139
142
  del self.model_config_dict[key]
140
143
 
144
+ # Remove thinking content from messages before sending to API
145
+ # This ensures only the final response is sent, excluding
146
+ # intermediate thought processes
147
+ messages = [
148
+ { # type: ignore[misc]
149
+ **msg,
150
+ 'content': re.sub(
151
+ r'<think>.*?</think>',
152
+ '',
153
+ msg['content'], # type: ignore[arg-type]
154
+ flags=re.DOTALL,
155
+ ).strip(),
156
+ }
157
+ for msg in messages
158
+ ]
159
+
141
160
  response = self._client.chat.completions.create(
142
161
  messages=messages,
143
162
  model=self.model_type,
144
163
  **self.model_config_dict,
145
164
  )
146
165
 
147
- # Temporary solution to handle the case where
148
- # deepseek returns a reasoning_content
166
+ # Handle reasoning content with <think> tags at the beginning
149
167
  if (
150
168
  self.model_type
151
169
  in [
@@ -156,10 +174,10 @@ class DeepSeekModel(BaseModelBackend):
156
174
  ):
157
175
  reasoning_content = response.choices[0].message.reasoning_content
158
176
  combined_content = (
159
- response.choices[0].message.content
160
- + "\n\nBELOW IS THE REASONING CONTENT:\n\n"
161
- + (reasoning_content if reasoning_content else "")
162
- )
177
+ f"<think>\n{reasoning_content}\n</think>\n"
178
+ if reasoning_content
179
+ else ""
180
+ ) + response.choices[0].message.content
163
181
 
164
182
  response = ChatCompletion.construct(
165
183
  id=response.id,
@@ -88,8 +88,6 @@ class GroqModel(BaseModelBackend):
88
88
  BaseTokenCounter: The token counter following the model's
89
89
  tokenization style.
90
90
  """
91
- # Make sure you have the access to these open-source model in
92
- # HuggingFace
93
91
  if not self._token_counter:
94
92
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
95
93
  return self._token_counter
@@ -21,7 +21,6 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models import BaseModelBackend
23
23
  from camel.types import (
24
- NOT_GIVEN,
25
24
  ChatCompletion,
26
25
  ChatCompletionChunk,
27
26
  ModelType,
@@ -112,6 +111,7 @@ class OpenAIModel(BaseModelBackend):
112
111
  ModelType.O1,
113
112
  ModelType.O1_MINI,
114
113
  ModelType.O1_PREVIEW,
114
+ ModelType.O3_MINI,
115
115
  ]:
116
116
  warnings.warn(
117
117
  "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
@@ -148,14 +148,6 @@ class OpenAIModel(BaseModelBackend):
148
148
 
149
149
  return self._to_chat_completion(response)
150
150
 
151
- # Removing 'strict': True from the dictionary for
152
- # client.chat.completions.create
153
- if self.model_config_dict.get('tools') is not NOT_GIVEN:
154
- for tool in self.model_config_dict.get('tools', []):
155
- function_dict = tool.get('function', {})
156
- if 'strict' in function_dict:
157
- del function_dict['strict']
158
-
159
151
  response = self._client.chat.completions.create(
160
152
  messages=messages,
161
153
  model=self.model_type,
@@ -13,10 +13,9 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import xml.etree.ElementTree as ET
16
- from typing import Any, Dict, List, Literal, Optional, Type, TypeAlias, Union
16
+ from typing import Any, Dict, List, Literal, Optional, TypeAlias, Union
17
17
 
18
18
  import requests
19
- from pydantic import BaseModel
20
19
 
21
20
  from camel.toolkits.base import BaseToolkit
22
21
  from camel.toolkits.function_tool import FunctionTool
@@ -77,7 +76,7 @@ class SearchToolkit(BaseToolkit):
77
76
  output_type: Literal[
78
77
  "searchResults", "sourcedAnswer", "structured"
79
78
  ] = "searchResults",
80
- structured_output_schema: Union[Type[BaseModel], str, None] = None,
79
+ structured_output_schema: Optional[str] = None,
81
80
  ) -> Dict[str, Any]:
82
81
  r"""Search for a query in the Linkup API and return results in various
83
82
  formats.
@@ -92,9 +91,9 @@ class SearchToolkit(BaseToolkit):
92
91
  - "searchResults" for raw search results,
93
92
  - "sourcedAnswer" for an answer with supporting sources,
94
93
  - "structured" for output based on a provided schema.
95
- structured_output_schema (Union[Type[BaseModel], str, None]): If
96
- `output_type` is "structured",specify the schema of the
97
- output. Can be a Pydantic BaseModel or a JSON schema string.
94
+ structured_output_schema (Optional[str]): If `output_type` is
95
+ "structured", specify the schema of the output. Must be a
96
+ string representing a valid object JSON schema.
98
97
 
99
98
  Returns:
100
99
  Dict[str, Any]: A dictionary representing the search result. The
camel/types/enums.py CHANGED
@@ -37,6 +37,7 @@ class ModelType(UnifiedModelType, Enum):
37
37
  O1 = "o1"
38
38
  O1_PREVIEW = "o1-preview"
39
39
  O1_MINI = "o1-mini"
40
+ O3_MINI = "o3-mini"
40
41
 
41
42
  GLM_4 = "glm-4"
42
43
  GLM_4V = 'glm-4v'
@@ -44,14 +45,11 @@ class ModelType(UnifiedModelType, Enum):
44
45
 
45
46
  # Groq platform models
46
47
  GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
47
- GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
48
- GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
49
48
  GROQ_LLAMA_3_3_70B = "llama-3.3-70b-versatile"
50
49
  GROQ_LLAMA_3_3_70B_PREVIEW = "llama-3.3-70b-specdec"
51
50
  GROQ_LLAMA_3_8B = "llama3-8b-8192"
52
51
  GROQ_LLAMA_3_70B = "llama3-70b-8192"
53
52
  GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
54
- GROQ_GEMMA_7B_IT = "gemma-7b-it"
55
53
  GROQ_GEMMA_2_9B_IT = "gemma2-9b-it"
56
54
 
57
55
  # TogetherAI platform models support tool calling
@@ -67,6 +65,17 @@ class ModelType(UnifiedModelType, Enum):
67
65
  SAMBA_LLAMA_3_1_70B = "Meta-Llama-3.1-70B-Instruct"
68
66
  SAMBA_LLAMA_3_1_405B = "Meta-Llama-3.1-405B-Instruct"
69
67
 
68
+ # SGLang models support tool calling
69
+ SGLANG_LLAMA_3_1_8B = "meta-llama/Meta-Llama-3.1-8B-Instruct"
70
+ SGLANG_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct"
71
+ SGLANG_LLAMA_3_1_405B = "meta-llama/Meta-Llama-3.1-405B-Instruct"
72
+ SGLANG_LLAMA_3_2_1B = "meta-llama/Llama-3.2-1B-Instruct"
73
+ SGLANG_MIXTRAL_NEMO = "mistralai/Mistral-Nemo-Instruct-2407"
74
+ SGLANG_MISTRAL_7B = "mistralai/Mistral-7B-Instruct-v0.3"
75
+ SGLANG_QWEN_2_5_7B = "Qwen/Qwen2.5-7B-Instruct"
76
+ SGLANG_QWEN_2_5_32B = "Qwen/Qwen2.5-32B-Instruct"
77
+ SGLANG_QWEN_2_5_72B = "Qwen/Qwen2.5-72B-Instruct"
78
+
70
79
  STUB = "stub"
71
80
 
72
81
  # Legacy anthropic models
@@ -190,6 +199,8 @@ class ModelType(UnifiedModelType, Enum):
190
199
  self.is_internlm,
191
200
  self.is_together,
192
201
  self.is_sambanova,
202
+ self.is_groq,
203
+ self.is_sglang,
193
204
  ]
194
205
  )
195
206
 
@@ -205,6 +216,7 @@ class ModelType(UnifiedModelType, Enum):
205
216
  ModelType.O1,
206
217
  ModelType.O1_PREVIEW,
207
218
  ModelType.O1_MINI,
219
+ ModelType.O3_MINI,
208
220
  }
209
221
 
210
222
  @property
@@ -252,14 +264,11 @@ class ModelType(UnifiedModelType, Enum):
252
264
  r"""Returns whether this type of models is served by Groq."""
253
265
  return self in {
254
266
  ModelType.GROQ_LLAMA_3_1_8B,
255
- ModelType.GROQ_LLAMA_3_1_70B,
256
- ModelType.GROQ_LLAMA_3_1_405B,
257
267
  ModelType.GROQ_LLAMA_3_3_70B,
258
268
  ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
259
269
  ModelType.GROQ_LLAMA_3_8B,
260
270
  ModelType.GROQ_LLAMA_3_70B,
261
271
  ModelType.GROQ_MIXTRAL_8_7B,
262
- ModelType.GROQ_GEMMA_7B_IT,
263
272
  ModelType.GROQ_GEMMA_2_9B_IT,
264
273
  }
265
274
 
@@ -413,6 +422,20 @@ class ModelType(UnifiedModelType, Enum):
413
422
  ModelType.INTERNLM2_PRO_CHAT,
414
423
  }
415
424
 
425
+ @property
426
+ def is_sglang(self) -> bool:
427
+ return self in {
428
+ ModelType.SGLANG_LLAMA_3_1_8B,
429
+ ModelType.SGLANG_LLAMA_3_1_70B,
430
+ ModelType.SGLANG_LLAMA_3_1_405B,
431
+ ModelType.SGLANG_LLAMA_3_2_1B,
432
+ ModelType.SGLANG_MIXTRAL_NEMO,
433
+ ModelType.SGLANG_MISTRAL_7B,
434
+ ModelType.SGLANG_QWEN_2_5_7B,
435
+ ModelType.SGLANG_QWEN_2_5_32B,
436
+ ModelType.SGLANG_QWEN_2_5_72B,
437
+ }
438
+
416
439
  @property
417
440
  def token_limit(self) -> int:
418
441
  r"""Returns the maximum token limit for a given model.
@@ -440,7 +463,6 @@ class ModelType(UnifiedModelType, Enum):
440
463
  ModelType.GROQ_LLAMA_3_8B,
441
464
  ModelType.GROQ_LLAMA_3_70B,
442
465
  ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
443
- ModelType.GROQ_GEMMA_7B_IT,
444
466
  ModelType.GROQ_GEMMA_2_9B_IT,
445
467
  ModelType.GLM_3_TURBO,
446
468
  ModelType.GLM_4,
@@ -479,6 +501,7 @@ class ModelType(UnifiedModelType, Enum):
479
501
  ModelType.INTERNLM2_5_LATEST,
480
502
  ModelType.INTERNLM2_PRO_CHAT,
481
503
  ModelType.TOGETHER_MIXTRAL_8_7B,
504
+ ModelType.SGLANG_MISTRAL_7B,
482
505
  }:
483
506
  return 32_768
484
507
  elif self in {
@@ -518,12 +541,15 @@ class ModelType(UnifiedModelType, Enum):
518
541
  ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
519
542
  ModelType.GROQ_LLAMA_3_3_70B,
520
543
  ModelType.SAMBA_LLAMA_3_1_70B,
544
+ ModelType.SGLANG_LLAMA_3_1_8B,
545
+ ModelType.SGLANG_LLAMA_3_1_70B,
546
+ ModelType.SGLANG_LLAMA_3_1_405B,
547
+ ModelType.SGLANG_LLAMA_3_2_1B,
548
+ ModelType.SGLANG_MIXTRAL_NEMO,
521
549
  }:
522
550
  return 128_000
523
551
  elif self in {
524
552
  ModelType.GROQ_LLAMA_3_1_8B,
525
- ModelType.GROQ_LLAMA_3_1_70B,
526
- ModelType.GROQ_LLAMA_3_1_405B,
527
553
  ModelType.QWEN_PLUS,
528
554
  ModelType.QWEN_TURBO,
529
555
  ModelType.QWEN_CODER_TURBO,
@@ -531,10 +557,14 @@ class ModelType(UnifiedModelType, Enum):
531
557
  ModelType.TOGETHER_LLAMA_3_1_70B,
532
558
  ModelType.TOGETHER_LLAMA_3_1_405B,
533
559
  ModelType.TOGETHER_LLAMA_3_3_70B,
560
+ ModelType.SGLANG_QWEN_2_5_7B,
561
+ ModelType.SGLANG_QWEN_2_5_32B,
562
+ ModelType.SGLANG_QWEN_2_5_72B,
534
563
  }:
535
564
  return 131_072
536
565
  elif self in {
537
566
  ModelType.O1,
567
+ ModelType.O3_MINI,
538
568
  ModelType.CLAUDE_2_1,
539
569
  ModelType.CLAUDE_3_OPUS,
540
570
  ModelType.CLAUDE_3_SONNET,
@@ -567,6 +597,11 @@ class EmbeddingModelType(Enum):
567
597
  TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
568
598
  TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
569
599
 
600
+ JINA_EMBEDDINGS_V3 = "jina-embeddings-v3"
601
+ JINA_CLIP_V2 = "jina-clip-v2"
602
+ JINA_COLBERT_V2 = "jina-colbert-v2"
603
+ JINA_EMBEDDINGS_V2_BASE_CODE = "jina-embeddings-v2-base-code"
604
+
570
605
  MISTRAL_EMBED = "mistral-embed"
571
606
 
572
607
  @property
@@ -578,6 +613,16 @@ class EmbeddingModelType(Enum):
578
613
  EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
579
614
  }
580
615
 
616
+ @property
617
+ def is_jina(self) -> bool:
618
+ r"""Returns whether this type of models is an Jina model."""
619
+ return self in {
620
+ EmbeddingModelType.JINA_EMBEDDINGS_V3,
621
+ EmbeddingModelType.JINA_CLIP_V2,
622
+ EmbeddingModelType.JINA_COLBERT_V2,
623
+ EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
624
+ }
625
+
581
626
  @property
582
627
  def is_mistral(self) -> bool:
583
628
  r"""Returns whether this type of models is an Mistral-released
@@ -589,7 +634,20 @@ class EmbeddingModelType(Enum):
589
634
 
590
635
  @property
591
636
  def output_dim(self) -> int:
592
- if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
637
+ if self in {
638
+ EmbeddingModelType.JINA_COLBERT_V2,
639
+ }:
640
+ return 128
641
+ elif self in {
642
+ EmbeddingModelType.JINA_EMBEDDINGS_V2_BASE_CODE,
643
+ }:
644
+ return 768
645
+ elif self in {
646
+ EmbeddingModelType.JINA_EMBEDDINGS_V3,
647
+ EmbeddingModelType.JINA_CLIP_V2,
648
+ }:
649
+ return 1024
650
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
593
651
  return 1536
594
652
  elif self is EmbeddingModelType.TEXT_EMBEDDING_3_SMALL:
595
653
  return 1536
@@ -112,7 +112,7 @@ class OpenAITokenCounter(BaseTokenCounter):
112
112
  elif ("gpt-3.5-turbo" in self.model) or ("gpt-4" in self.model):
113
113
  self.tokens_per_message = 3
114
114
  self.tokens_per_name = 1
115
- elif "o1" in self.model:
115
+ elif ("o1" in self.model) or ("o3" in self.model):
116
116
  self.tokens_per_message = 2
117
117
  self.tokens_per_name = 1
118
118
  else:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: camel-ai
3
- Version: 0.2.18
3
+ Version: 0.2.19
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  License: Apache-2.0
6
6
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
@@ -71,7 +71,7 @@ Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "storage" or
71
71
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "web-tools" or extra == "all"
72
72
  Requires-Dist: notion-client (>=2.2.1,<3.0.0) ; extra == "communication-tools" or extra == "all"
73
73
  Requires-Dist: numpy (>=1,<2)
74
- Requires-Dist: openai (>=1.58.1,<2.0.0)
74
+ Requires-Dist: openai (>=1.59.7,<2.0.0)
75
75
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "document-tools" or extra == "all"
76
76
  Requires-Dist: openbb (>=4.3.5,<5.0.0) ; extra == "data-tools" or extra == "all"
77
77
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface" or extra == "all"
@@ -418,6 +418,7 @@ Practical guides and tutorials for implementing specific functionalities in CAME
418
418
  | **[Dynamic Travel Planner Role-Playing: Multi-Agent System with Real-Time Insights Powered by Dappier](https://docs.camel-ai.org/cookbooks/applications/dynamic_travel_planner.html)** | Explore an innovative approach to travel planning, blending AI-driven role-playing and real-time data for seamless experiences. |
419
419
  | **[Customer Service Discord Bot with Agentic RAG](https://docs.camel-ai.org/cookbooks/applications/customer_service_Discord_bot_using_SambaNova_with_agentic_RAG.html)** | Learn how to build a robust customer service bot for Discord using Agentic RAG. |
420
420
  | **[Customer Service Discord Bot with Local Model](https://docs.camel-ai.org/cookbooks/applications/customer_service_Discord_bot_using_local_model_with_agentic_RAG.html)** | Learn how to build a robust customer service bot for Discord using Agentic RAG which supports local deployment. |
421
+ | **[Customer Service Discord Bot for Finance with OpenBB](https://docs.camel-ai.org/cookbooks/applications/finance_discord_bot.html)**| Learn how to build a sipmle yet powerful financial data assistant Discord bot using OpenBB tools. |
421
422
 
422
423
  ### Data Processing
423
424
  | Cookbook | Description |
@@ -456,6 +457,8 @@ We implemented amazing research ideas from other works for you to build, compare
456
457
 
457
458
  - `Self-Instruct` from *Yizhong Wang et al.*: [SELF-INSTRUCT: Aligning Language Models with Self-Generated Instructions](https://arxiv.org/pdf/2212.10560). [[Example](https://github.com/camel-ai/camel/blob/master/examples/datagen/self_instruct/self_instruct.py)]
458
459
 
460
+ - `Source2Synth` from *Alisia Lupidi et al.*: [Source2Synth: Synthetic Data Generation and Curation Grounded in Real Data Sources](https://arxiv.org/abs/2409.08239). [[Example](https://github.com/camel-ai/camel/blob/master/examples/datagen/source2synth.py)]
461
+
459
462
  ## Other Research Works Based on Camel
460
463
  - [Agent Trust](http://agent-trust.camel-ai.org/): Can Large Language Model Agents Simulate Human Trust Behavior?
461
464