camel-ai 0.1.5.6__py3-none-any.whl → 0.1.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (97) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +3 -3
  3. camel/agents/critic_agent.py +1 -1
  4. camel/agents/deductive_reasoner_agent.py +4 -4
  5. camel/agents/embodied_agent.py +1 -1
  6. camel/agents/knowledge_graph_agent.py +2 -2
  7. camel/agents/role_assignment_agent.py +1 -1
  8. camel/agents/search_agent.py +4 -5
  9. camel/agents/task_agent.py +5 -5
  10. camel/configs/__init__.py +9 -0
  11. camel/configs/gemini_config.py +15 -14
  12. camel/configs/groq_config.py +119 -0
  13. camel/configs/litellm_config.py +1 -1
  14. camel/configs/mistral_config.py +81 -0
  15. camel/configs/ollama_config.py +1 -1
  16. camel/configs/openai_config.py +1 -1
  17. camel/configs/vllm_config.py +103 -0
  18. camel/configs/zhipuai_config.py +1 -1
  19. camel/embeddings/__init__.py +2 -0
  20. camel/embeddings/mistral_embedding.py +89 -0
  21. camel/interpreters/__init__.py +2 -0
  22. camel/interpreters/ipython_interpreter.py +167 -0
  23. camel/models/__init__.py +8 -0
  24. camel/models/anthropic_model.py +7 -2
  25. camel/models/azure_openai_model.py +152 -0
  26. camel/models/base_model.py +9 -2
  27. camel/models/gemini_model.py +14 -2
  28. camel/models/groq_model.py +131 -0
  29. camel/models/litellm_model.py +26 -4
  30. camel/models/mistral_model.py +169 -0
  31. camel/models/model_factory.py +30 -3
  32. camel/models/ollama_model.py +21 -2
  33. camel/models/open_source_model.py +11 -3
  34. camel/models/openai_model.py +7 -2
  35. camel/models/stub_model.py +4 -4
  36. camel/models/vllm_model.py +138 -0
  37. camel/models/zhipuai_model.py +7 -4
  38. camel/prompts/__init__.py +2 -2
  39. camel/prompts/task_prompt_template.py +4 -4
  40. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  41. camel/retrievers/auto_retriever.py +2 -0
  42. camel/storages/graph_storages/neo4j_graph.py +5 -0
  43. camel/toolkits/__init__.py +36 -0
  44. camel/toolkits/base.py +1 -1
  45. camel/toolkits/code_execution.py +1 -1
  46. camel/toolkits/github_toolkit.py +3 -2
  47. camel/toolkits/google_maps_toolkit.py +367 -0
  48. camel/toolkits/math_toolkit.py +79 -0
  49. camel/toolkits/open_api_toolkit.py +548 -0
  50. camel/toolkits/retrieval_toolkit.py +76 -0
  51. camel/toolkits/search_toolkit.py +326 -0
  52. camel/toolkits/slack_toolkit.py +308 -0
  53. camel/toolkits/twitter_toolkit.py +522 -0
  54. camel/toolkits/weather_toolkit.py +173 -0
  55. camel/types/enums.py +149 -34
  56. camel/utils/__init__.py +2 -0
  57. camel/utils/async_func.py +1 -1
  58. camel/utils/token_counting.py +148 -40
  59. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/METADATA +42 -3
  60. camel_ai-0.1.5.9.dist-info/RECORD +165 -0
  61. camel/functions/__init__.py +0 -51
  62. camel/functions/google_maps_function.py +0 -335
  63. camel/functions/math_functions.py +0 -61
  64. camel/functions/open_api_function.py +0 -508
  65. camel/functions/retrieval_functions.py +0 -61
  66. camel/functions/search_functions.py +0 -298
  67. camel/functions/slack_functions.py +0 -286
  68. camel/functions/twitter_function.py +0 -479
  69. camel/functions/weather_functions.py +0 -144
  70. camel_ai-0.1.5.6.dist-info/RECORD +0 -157
  71. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  72. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  73. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  74. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  75. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  76. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  77. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  78. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  79. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  80. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  81. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  82. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  83. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  84. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  85. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  86. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  87. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  88. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  89. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  90. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  91. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  92. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  93. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  94. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  95. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  96. /camel/{functions → toolkits}/openai_function.py +0 -0
  97. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5.6'
15
+ __version__ = '0.1.5.9'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -42,8 +42,8 @@ from camel.utils import get_model_encoding
42
42
  if TYPE_CHECKING:
43
43
  from openai import Stream
44
44
 
45
- from camel.functions import OpenAIFunction
46
45
  from camel.terminators import ResponseTerminator
46
+ from camel.toolkits import OpenAIFunction
47
47
 
48
48
 
49
49
  @dataclass(frozen=True)
@@ -82,7 +82,7 @@ class ChatAgent(BaseAgent):
82
82
  system_message (BaseMessage): The system message for the chat agent.
83
83
  model (BaseModelBackend, optional): The model backend to use for
84
84
  generating responses. (default: :obj:`OpenAIModel` with
85
- `GPT_3_5_TURBO`)
85
+ `GPT_4O_MINI`)
86
86
  api_key (str, optional): The API key for authenticating with the
87
87
  LLM service. Only OpenAI and Anthropic model supported (default:
88
88
  :obj:`None`)
@@ -127,7 +127,7 @@ class ChatAgent(BaseAgent):
127
127
  if model is not None
128
128
  else ModelFactory.create(
129
129
  model_platform=ModelPlatformType.OPENAI,
130
- model_type=ModelType.GPT_3_5_TURBO,
130
+ model_type=ModelType.GPT_4O_MINI,
131
131
  model_config_dict=ChatGPTConfig().__dict__,
132
132
  api_key=self._api_key,
133
133
  )
@@ -33,7 +33,7 @@ class CriticAgent(ChatAgent):
33
33
  agent.
34
34
  model (BaseModelBackend, optional): The model backend to use for
35
35
  generating responses. (default: :obj:`OpenAIModel` with
36
- `GPT_3_5_TURBO`)
36
+ `GPT_4O_MINI`)
37
37
  message_window_size (int, optional): The maximum number of previous
38
38
  messages to include in the context window. If `None`, no windowing
39
39
  is performed. (default: :obj:`6`)
@@ -35,7 +35,7 @@ class DeductiveReasonerAgent(ChatAgent):
35
35
  Args:
36
36
  model (BaseModelBackend, optional): The model backend to use for
37
37
  generating responses. (default: :obj:`OpenAIModel` with
38
- `GPT_3_5_TURBO`)
38
+ `GPT_4O_MINI`)
39
39
  """
40
40
 
41
41
  def __init__(
@@ -126,7 +126,7 @@ $B$.
126
126
  - Direct Path Analysis: What are the immediate and direct conditions
127
127
  required to move from $A$ to $B$?
128
128
  - Intermediate States: Are there states between $A$ and $B$ that must be
129
- transversed or can be used to make the transition smoother or more
129
+ traversed or can be used to make the transition smoother or more
130
130
  efficient? If yes, what is the content?
131
131
  - Constraints & Limitations: Identify potential barriers or restrictions
132
132
  in moving from $A$ to $B$. These can be external (e.g., environmental
@@ -244,7 +244,7 @@ square brackets)
244
244
  print(f"Message content:\n{msg.content}")
245
245
 
246
246
  # Extract the conditions from the message
247
- condistions_dict = {
247
+ conditions_dict = {
248
248
  f"condition {i}": cdt.replace("<", "")
249
249
  .replace(">", "")
250
250
  .strip()
@@ -281,7 +281,7 @@ square brackets)
281
281
  conditions_and_quality_json: Dict[
282
282
  str, Union[List[str], Dict[str, str]]
283
283
  ] = {}
284
- conditions_and_quality_json["conditions"] = condistions_dict
284
+ conditions_and_quality_json["conditions"] = conditions_dict
285
285
  conditions_and_quality_json["labels"] = labels
286
286
  conditions_and_quality_json["evaluate_quality"] = quality
287
287
 
@@ -35,7 +35,7 @@ class EmbodiedAgent(ChatAgent):
35
35
  system_message (BaseMessage): The system message for the chat agent.
36
36
  model (BaseModelBackend, optional): The model backend to use for
37
37
  generating responses. (default: :obj:`OpenAIModel` with
38
- `GPT_3_5_TURBO`)
38
+ `GPT_4O_MINI`)
39
39
  message_window_size (int, optional): The maximum number of previous
40
40
  messages to include in the context window. If `None`, no windowing
41
41
  is performed. (default: :obj:`None`)
@@ -115,14 +115,14 @@ class KnowledgeGraphAgent(ChatAgent):
115
115
  Args:
116
116
  model (BaseModelBackend, optional): The model backend to use for
117
117
  generating responses. (default: :obj:`OpenAIModel` with
118
- `GPT_3_5_TURBO`)
118
+ `GPT_4O_MINI`)
119
119
  """
120
120
  system_message = BaseMessage(
121
121
  role_name="Graphify",
122
122
  role_type=RoleType.ASSISTANT,
123
123
  meta_dict=None,
124
124
  content="Your mission is to transform unstructured content "
125
- "intostructured graph data. Extract nodes and relationships with "
125
+ "into structured graph data. Extract nodes and relationships with "
126
126
  "precision, and let the connections unfold. Your graphs will "
127
127
  "illuminate the hidden connections within the chaos of "
128
128
  "information.",
@@ -27,7 +27,7 @@ class RoleAssignmentAgent(ChatAgent):
27
27
  Args:
28
28
  model (BaseModelBackend, optional): The model backend to use for
29
29
  generating responses. (default: :obj:`OpenAIModel` with
30
- `GPT_3_5_TURBO`)
30
+ `GPT_4O_MINI`)
31
31
 
32
32
  Attributes:
33
33
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
@@ -26,10 +26,9 @@ class SearchAgent(ChatAgent):
26
26
  relevance of an answer.
27
27
 
28
28
  Args:
29
- model_type (ModelType, optional): The type of model to use for the
30
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
31
- model_config (Any, optional): The configuration for the model.
32
- (default: :obj:`None`)
29
+ model (BaseModelBackend, optional): The model backend to use for
30
+ generating responses. (default: :obj:`OpenAIModel` with
31
+ `GPT_4O_MINI`)
33
32
  """
34
33
 
35
34
  def __init__(
@@ -76,7 +75,7 @@ class SearchAgent(ChatAgent):
76
75
  result = self.step(user_msg).msg.content
77
76
  results += result + "\n"
78
77
 
79
- # Final summarise
78
+ # Final summarization
80
79
  final_prompt = TextPrompt(
81
80
  '''Here are some summarized texts which split from one text. Using
82
81
  the information to answer the question. If can't find the answer,
@@ -32,7 +32,7 @@ class TaskSpecifyAgent(ChatAgent):
32
32
  Args:
33
33
  model (BaseModelBackend, optional): The model backend to use for
34
34
  generating responses. (default: :obj:`OpenAIModel` with
35
- `GPT_3_5_TURBO`)
35
+ `GPT_4O_MINI`)
36
36
  task_type (TaskType, optional): The type of task for which to generate
37
37
  a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
38
38
  task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
@@ -126,7 +126,7 @@ class TaskPlannerAgent(ChatAgent):
126
126
  Args:
127
127
  model (BaseModelBackend, optional): The model backend to use for
128
128
  generating responses. (default: :obj:`OpenAIModel` with
129
- `GPT_3_5_TURBO`)
129
+ `GPT_4O_MINI`)
130
130
  output_language (str, optional): The language to be output by the
131
131
  agent. (default: :obj:`None`)
132
132
  """
@@ -201,7 +201,7 @@ class TaskCreationAgent(ChatAgent):
201
201
  perform the task.
202
202
  model (BaseModelBackend, optional): The LLM backend to use for
203
203
  generating responses. (default: :obj:`OpenAIModel` with
204
- `GPT_3_5_TURBO`)
204
+ `GPT_4O_MINI`)
205
205
  output_language (str, optional): The language to be output by the
206
206
  agent. (default: :obj:`None`)
207
207
  message_window_size (int, optional): The maximum number of previous
@@ -233,7 +233,7 @@ The result must be a numbered list in the format:
233
233
  #. Third Task
234
234
 
235
235
  You can only give me up to {max_task_num} tasks at a time. \
236
- Each task shoud be concise, concrete and doable for a {role_name}.
236
+ Each task should be concise, concrete and doable for a {role_name}.
237
237
  You should make task plan and not ask me questions.
238
238
  If you think no new tasks are needed right now, write "No tasks to add."
239
239
  Now start to give me new tasks one by one. No more than three tasks.
@@ -312,7 +312,7 @@ class TaskPrioritizationAgent(ChatAgent):
312
312
  perform the task.
313
313
  model (BaseModelBackend, optional): The LLM backend to use for
314
314
  generating responses. (default: :obj:`OpenAIModel` with
315
- `GPT_3_5_TURBO`)
315
+ `GPT_4O_MINI`)
316
316
  output_language (str, optional): The language to be output by the
317
317
  agent. (default: :obj:`None`)
318
318
  message_window_size (int, optional): The maximum number of previous
camel/configs/__init__.py CHANGED
@@ -17,13 +17,16 @@ from .gemini_config import (
17
17
  Gemini_API_PARAMS,
18
18
  GeminiConfig,
19
19
  )
20
+ from .groq_config import GROQ_API_PARAMS, GroqConfig
20
21
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
22
+ from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
21
23
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
22
24
  from .openai_config import (
23
25
  OPENAI_API_PARAMS,
24
26
  ChatGPTConfig,
25
27
  OpenSourceConfig,
26
28
  )
29
+ from .vllm_config import VLLM_API_PARAMS, VLLMConfig
27
30
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
28
31
 
29
32
  __all__ = [
@@ -32,6 +35,8 @@ __all__ = [
32
35
  'OPENAI_API_PARAMS',
33
36
  'AnthropicConfig',
34
37
  'ANTHROPIC_API_PARAMS',
38
+ 'GROQ_API_PARAMS',
39
+ 'GroqConfig',
35
40
  'OpenSourceConfig',
36
41
  'LiteLLMConfig',
37
42
  'LITELLM_API_PARAMS',
@@ -41,4 +46,8 @@ __all__ = [
41
46
  'ZHIPUAI_API_PARAMS',
42
47
  'GeminiConfig',
43
48
  'Gemini_API_PARAMS',
49
+ 'VLLMConfig',
50
+ 'VLLM_API_PARAMS',
51
+ 'MistralConfig',
52
+ 'MISTRAL_API_PARAMS',
44
53
  ]
@@ -15,10 +15,19 @@
15
15
 
16
16
  from collections.abc import Iterable
17
17
  from dataclasses import asdict, dataclass
18
- from typing import Optional
18
+ from typing import TYPE_CHECKING, Optional
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
21
 
22
+ if TYPE_CHECKING:
23
+ from google.generativeai.protos import Schema
24
+ from google.generativeai.types.content_types import (
25
+ FunctionLibraryType,
26
+ ToolConfigType,
27
+ )
28
+ from google.generativeai.types.helper_types import RequestOptionsType
29
+ from google.generativeai.types.safety_types import SafetySettingOptions
30
+
22
31
 
23
32
  @dataclass(frozen=True)
24
33
  class GeminiConfig(BaseConfig):
@@ -72,14 +81,6 @@ class GeminiConfig(BaseConfig):
72
81
  Options for the request.
73
82
  """
74
83
 
75
- from google.generativeai.protos import Schema
76
- from google.generativeai.types.content_types import (
77
- FunctionLibraryType,
78
- ToolConfigType,
79
- )
80
- from google.generativeai.types.helper_types import RequestOptionsType
81
- from google.generativeai.types.safety_types import SafetySettingOptions
82
-
83
84
  candidate_count: Optional[int] = None
84
85
  stop_sequences: Optional[Iterable[str]] = None
85
86
  max_output_tokens: Optional[int] = None
@@ -87,11 +88,11 @@ class GeminiConfig(BaseConfig):
87
88
  top_p: Optional[float] = None
88
89
  top_k: Optional[int] = None
89
90
  response_mime_type: Optional[str] = None
90
- response_schema: Optional[Schema] = None
91
- safety_settings: Optional[SafetySettingOptions] = None
92
- tools: Optional[FunctionLibraryType] = None
93
- tool_config: Optional[ToolConfigType] = None
94
- request_options: Optional[RequestOptionsType] = None
91
+ response_schema: Optional['Schema'] = None
92
+ safety_settings: Optional['SafetySettingOptions'] = None
93
+ tools: Optional['FunctionLibraryType'] = None
94
+ tool_config: Optional['ToolConfigType'] = None
95
+ request_options: Optional['RequestOptionsType'] = None
95
96
 
96
97
 
97
98
  Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
@@ -0,0 +1,119 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Optional, Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+ if TYPE_CHECKING:
24
+ from camel.toolkits import OpenAIFunction
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class GroqConfig(BaseConfig):
29
+ r"""Defines the parameters for generating chat completions using OpenAI
30
+ compatibility.
31
+
32
+ Reference: https://console.groq.com/docs/openai
33
+
34
+ Args:
35
+ temperature (float, optional): Sampling temperature to use, between
36
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
37
+ while lower values make it more focused and deterministic.
38
+ (default: :obj:`0.2`)
39
+ top_p (float, optional): An alternative to sampling with temperature,
40
+ called nucleus sampling, where the model considers the results of
41
+ the tokens with top_p probability mass. So :obj:`0.1` means only
42
+ the tokens comprising the top 10% probability mass are considered.
43
+ (default: :obj:`1.0`)
44
+ n (int, optional): How many chat completion choices to generate for
45
+ each input message. (default: :obj:`1`)
46
+ response_format (object, optional): An object specifying the format
47
+ that the model must output. Compatible with GPT-4 Turbo and all
48
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
49
+ {"type": "json_object"} enables JSON mode, which guarantees the
50
+ message the model generates is valid JSON. Important: when using
51
+ JSON mode, you must also instruct the model to produce JSON
52
+ yourself via a system or user message. Without this, the model
53
+ may generate an unending stream of whitespace until the generation
54
+ reaches the token limit, resulting in a long-running and seemingly
55
+ "stuck" request. Also note that the message content may be
56
+ partially cut off if finish_reason="length", which indicates the
57
+ generation exceeded max_tokens or the conversation exceeded the
58
+ max context length.
59
+ stream (bool, optional): If True, partial message deltas will be sent
60
+ as data-only server-sent events as they become available.
61
+ (default: :obj:`False`)
62
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
63
+ will stop generating further tokens. (default: :obj:`None`)
64
+ max_tokens (int, optional): The maximum number of tokens to generate
65
+ in the chat completion. The total length of input tokens and
66
+ generated tokens is limited by the model's context length.
67
+ (default: :obj:`None`)
68
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on whether
70
+ they appear in the text so far, increasing the model's likelihood
71
+ to talk about new topics. See more information about frequency and
72
+ presence penalties. (default: :obj:`0.0`)
73
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
74
+ :obj:`2.0`. Positive values penalize new tokens based on their
75
+ existing frequency in the text so far, decreasing the model's
76
+ likelihood to repeat the same line verbatim. See more information
77
+ about frequency and presence penalties. (default: :obj:`0.0`)
78
+ user (str, optional): A unique identifier representing your end-user,
79
+ which can help OpenAI to monitor and detect abuse.
80
+ (default: :obj:`""`)
81
+ tools (list[OpenAIFunction], optional): A list of tools the model may
82
+ call. Currently, only functions are supported as a tool. Use this
83
+ to provide a list of functions the model may generate JSON inputs
84
+ for. A max of 128 functions are supported.
85
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
86
+ any) tool is called by the model. :obj:`"none"` means the model
87
+ will not call any tool and instead generates a message.
88
+ :obj:`"auto"` means the model can pick between generating a
89
+ message or calling one or more tools. :obj:`"required"` means the
90
+ model must call one or more tools. Specifying a particular tool
91
+ via {"type": "function", "function": {"name": "my_function"}}
92
+ forces the model to call that tool. :obj:`"none"` is the default
93
+ when no tools are present. :obj:`"auto"` is the default if tools
94
+ are present.
95
+ """
96
+
97
+ temperature: float = 0.2 # openai default: 1.0
98
+ top_p: float = 1.0
99
+ n: int = 1
100
+ stream: bool = False
101
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
102
+ max_tokens: int | NotGiven = NOT_GIVEN
103
+ presence_penalty: float = 0.0
104
+ response_format: dict | NotGiven = NOT_GIVEN
105
+ frequency_penalty: float = 0.0
106
+ user: str = ""
107
+ tools: Optional[list[OpenAIFunction]] = None
108
+ tool_choice: Optional[dict[str, str] | str] = "none"
109
+
110
+ def __post_init__(self):
111
+ if self.tools is not None:
112
+ object.__setattr__(
113
+ self,
114
+ 'tools',
115
+ [tool.get_openai_tool_schema() for tool in self.tools],
116
+ )
117
+
118
+
119
+ GROQ_API_PARAMS = {param for param in asdict(GroqConfig()).keys()}
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Union
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
21
  if TYPE_CHECKING:
22
- from camel.functions import OpenAIFunction
22
+ from camel.toolkits import OpenAIFunction
23
23
 
24
24
 
25
25
  @dataclass(frozen=True)
@@ -0,0 +1,81 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Dict, Optional, Union
18
+
19
+ from camel.configs.base_config import BaseConfig
20
+
21
+ if TYPE_CHECKING:
22
+ from mistralai.models.chat_completion import (
23
+ ResponseFormat,
24
+ )
25
+
26
+ from camel.toolkits import OpenAIFunction
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class MistralConfig(BaseConfig):
31
+ r"""Defines the parameters for generating chat completions using the
32
+ Mistral API.
33
+
34
+ reference: https://github.com/mistralai/client-python/blob/9d238f88c41689821d7b08570f13b43426f97fd6/src/mistralai/client.py#L195
35
+
36
+ Args:
37
+ temperature (Optional[float], optional): temperature the temperature
38
+ to use for sampling, e.g. 0.5.
39
+ max_tokens (Optional[int], optional): the maximum number of tokens to
40
+ generate, e.g. 100. Defaults to None.
41
+ top_p (Optional[float], optional): the cumulative probability of
42
+ tokens to generate, e.g. 0.9. Defaults to None.
43
+ random_seed (Optional[int], optional): the random seed to use for
44
+ sampling, e.g. 42. Defaults to None.
45
+ safe_mode (bool, optional): deprecated, use safe_prompt instead.
46
+ Defaults to False.
47
+ safe_prompt (bool, optional): whether to use safe prompt, e.g. true.
48
+ Defaults to False.
49
+ response_format (Union[Dict[str, str], ResponseFormat): format of the
50
+ response.
51
+ tools (Optional[list[OpenAIFunction]], optional): a list of tools to
52
+ use.
53
+ tool_choice (str, optional): Controls which (if
54
+ any) tool is called by the model. :obj:`"none"` means the model
55
+ will not call any tool and instead generates a message.
56
+ :obj:`"auto"` means the model can pick between generating a
57
+ message or calling one or more tools. :obj:`"any"` means the
58
+ model must call one or more tools. :obj:`"auto"` is the default
59
+ value.
60
+ """
61
+
62
+ temperature: Optional[float] = None
63
+ max_tokens: Optional[int] = None
64
+ top_p: Optional[float] = None
65
+ random_seed: Optional[int] = None
66
+ safe_mode: bool = False
67
+ safe_prompt: bool = False
68
+ response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None
69
+ tools: Optional[list[OpenAIFunction]] = None
70
+ tool_choice: Optional[str] = "auto"
71
+
72
+ def __post_init__(self):
73
+ if self.tools is not None:
74
+ object.__setattr__(
75
+ self,
76
+ 'tools',
77
+ [tool.get_openai_tool_schema() for tool in self.tools],
78
+ )
79
+
80
+
81
+ MISTRAL_API_PARAMS = {param for param in asdict(MistralConfig()).keys()}
@@ -24,7 +24,7 @@ from camel.configs.base_config import BaseConfig
24
24
  @dataclass(frozen=True)
25
25
  class OllamaConfig(BaseConfig):
26
26
  r"""Defines the parameters for generating chat completions using OpenAI
27
- compatibility
27
+ compatibility.
28
28
 
29
29
  Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
30
30
 
@@ -21,7 +21,7 @@ from openai._types import NOT_GIVEN, NotGiven
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
  if TYPE_CHECKING:
24
- from camel.functions import OpenAIFunction
24
+ from camel.toolkits import OpenAIFunction
25
25
 
26
26
 
27
27
  @dataclass(frozen=True)
@@ -0,0 +1,103 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass, field
17
+ from typing import Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ # flake8: noqa: E501
25
+ @dataclass(frozen=True)
26
+ class VLLMConfig(BaseConfig):
27
+ r"""Defines the parameters for generating chat completions using the
28
+ OpenAI API.
29
+
30
+ Reference: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
31
+
32
+ Args:
33
+ temperature (float, optional): Sampling temperature to use, between
34
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
35
+ while lower values make it more focused and deterministic.
36
+ (default: :obj:`0.2`)
37
+ top_p (float, optional): An alternative to sampling with temperature,
38
+ called nucleus sampling, where the model considers the results of
39
+ the tokens with top_p probability mass. So :obj:`0.1` means only
40
+ the tokens comprising the top 10% probability mass are considered.
41
+ (default: :obj:`1.0`)
42
+ n (int, optional): How many chat completion choices to generate for
43
+ each input message. (default: :obj:`1`)
44
+ response_format (object, optional): An object specifying the format
45
+ that the model must output. Compatible with GPT-4 Turbo and all
46
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
47
+ {"type": "json_object"} enables JSON mode, which guarantees the
48
+ message the model generates is valid JSON. Important: when using
49
+ JSON mode, you must also instruct the model to produce JSON
50
+ yourself via a system or user message. Without this, the model
51
+ may generate an unending stream of whitespace until the generation
52
+ reaches the token limit, resulting in a long-running and seemingly
53
+ "stuck" request. Also note that the message content may be
54
+ partially cut off if finish_reason="length", which indicates the
55
+ generation exceeded max_tokens or the conversation exceeded the
56
+ max context length.
57
+ stream (bool, optional): If True, partial message deltas will be sent
58
+ as data-only server-sent events as they become available.
59
+ (default: :obj:`False`)
60
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
61
+ will stop generating further tokens. (default: :obj:`None`)
62
+ max_tokens (int, optional): The maximum number of tokens to generate
63
+ in the chat completion. The total length of input tokens and
64
+ generated tokens is limited by the model's context length.
65
+ (default: :obj:`None`)
66
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
67
+ :obj:`2.0`. Positive values penalize new tokens based on whether
68
+ they appear in the text so far, increasing the model's likelihood
69
+ to talk about new topics. See more information about frequency and
70
+ presence penalties. (default: :obj:`0.0`)
71
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
72
+ :obj:`2.0`. Positive values penalize new tokens based on their
73
+ existing frequency in the text so far, decreasing the model's
74
+ likelihood to repeat the same line verbatim. See more information
75
+ about frequency and presence penalties. (default: :obj:`0.0`)
76
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
77
+ appearing in the completion. Accepts a json object that maps tokens
78
+ (specified by their token ID in the tokenizer) to an associated
79
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
80
+ is added to the logits generated by the model prior to sampling.
81
+ The exact effect will vary per model, but values between:obj:` -1`
82
+ and :obj:`1` should decrease or increase likelihood of selection;
83
+ values like :obj:`-100` or :obj:`100` should result in a ban or
84
+ exclusive selection of the relevant token. (default: :obj:`{}`)
85
+ user (str, optional): A unique identifier representing your end-user,
86
+ which can help OpenAI to monitor and detect abuse.
87
+ (default: :obj:`""`)
88
+ """
89
+
90
+ temperature: float = 0.2 # openai default: 1.0
91
+ top_p: float = 1.0
92
+ n: int = 1
93
+ stream: bool = False
94
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
95
+ max_tokens: int | NotGiven = NOT_GIVEN
96
+ presence_penalty: float = 0.0
97
+ response_format: dict | NotGiven = NOT_GIVEN
98
+ frequency_penalty: float = 0.0
99
+ logit_bias: dict = field(default_factory=dict)
100
+ user: str = ""
101
+
102
+
103
+ VLLM_API_PARAMS = {param for param in asdict(VLLMConfig()).keys()}
@@ -21,7 +21,7 @@ from openai._types import NOT_GIVEN, NotGiven
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
  if TYPE_CHECKING:
24
- from camel.functions import OpenAIFunction
24
+ from camel.toolkits import OpenAIFunction
25
25
 
26
26
 
27
27
  @dataclass(frozen=True)
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .base import BaseEmbedding
15
+ from .mistral_embedding import MistralEmbedding
15
16
  from .openai_embedding import OpenAIEmbedding
16
17
  from .sentence_transformers_embeddings import SentenceTransformerEncoder
17
18
  from .vlm_embedding import VisionLanguageEmbedding
@@ -21,4 +22,5 @@ __all__ = [
21
22
  "OpenAIEmbedding",
22
23
  "SentenceTransformerEncoder",
23
24
  "VisionLanguageEmbedding",
25
+ "MistralEmbedding",
24
26
  ]