camel-ai 0.1.5.6__py3-none-any.whl → 0.1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (133) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +249 -36
  3. camel/agents/critic_agent.py +18 -2
  4. camel/agents/deductive_reasoner_agent.py +16 -4
  5. camel/agents/embodied_agent.py +20 -6
  6. camel/agents/knowledge_graph_agent.py +24 -5
  7. camel/agents/role_assignment_agent.py +13 -1
  8. camel/agents/search_agent.py +16 -5
  9. camel/agents/task_agent.py +20 -5
  10. camel/configs/__init__.py +11 -9
  11. camel/configs/anthropic_config.py +5 -6
  12. camel/configs/base_config.py +50 -4
  13. camel/configs/gemini_config.py +69 -17
  14. camel/configs/groq_config.py +105 -0
  15. camel/configs/litellm_config.py +2 -8
  16. camel/configs/mistral_config.py +78 -0
  17. camel/configs/ollama_config.py +5 -7
  18. camel/configs/openai_config.py +12 -23
  19. camel/configs/vllm_config.py +102 -0
  20. camel/configs/zhipuai_config.py +5 -11
  21. camel/embeddings/__init__.py +2 -0
  22. camel/embeddings/mistral_embedding.py +89 -0
  23. camel/human.py +1 -1
  24. camel/interpreters/__init__.py +2 -0
  25. camel/interpreters/ipython_interpreter.py +167 -0
  26. camel/loaders/__init__.py +2 -0
  27. camel/loaders/firecrawl_reader.py +213 -0
  28. camel/memories/agent_memories.py +1 -4
  29. camel/memories/blocks/chat_history_block.py +6 -2
  30. camel/memories/blocks/vectordb_block.py +3 -1
  31. camel/memories/context_creators/score_based.py +6 -6
  32. camel/memories/records.py +9 -7
  33. camel/messages/base.py +1 -0
  34. camel/models/__init__.py +8 -0
  35. camel/models/anthropic_model.py +7 -2
  36. camel/models/azure_openai_model.py +152 -0
  37. camel/models/base_model.py +9 -2
  38. camel/models/gemini_model.py +14 -2
  39. camel/models/groq_model.py +131 -0
  40. camel/models/litellm_model.py +26 -4
  41. camel/models/mistral_model.py +169 -0
  42. camel/models/model_factory.py +30 -3
  43. camel/models/ollama_model.py +21 -2
  44. camel/models/open_source_model.py +13 -5
  45. camel/models/openai_model.py +7 -2
  46. camel/models/stub_model.py +4 -4
  47. camel/models/vllm_model.py +138 -0
  48. camel/models/zhipuai_model.py +7 -4
  49. camel/prompts/__init__.py +8 -1
  50. camel/prompts/image_craft.py +34 -0
  51. camel/prompts/multi_condition_image_craft.py +34 -0
  52. camel/prompts/task_prompt_template.py +10 -4
  53. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  54. camel/responses/agent_responses.py +4 -3
  55. camel/retrievers/auto_retriever.py +2 -2
  56. camel/societies/babyagi_playing.py +6 -4
  57. camel/societies/role_playing.py +16 -8
  58. camel/storages/graph_storages/graph_element.py +10 -14
  59. camel/storages/graph_storages/neo4j_graph.py +5 -0
  60. camel/storages/vectordb_storages/base.py +24 -13
  61. camel/storages/vectordb_storages/milvus.py +1 -1
  62. camel/storages/vectordb_storages/qdrant.py +2 -3
  63. camel/tasks/__init__.py +22 -0
  64. camel/tasks/task.py +408 -0
  65. camel/tasks/task_prompt.py +65 -0
  66. camel/toolkits/__init__.py +39 -0
  67. camel/toolkits/base.py +4 -2
  68. camel/toolkits/code_execution.py +1 -1
  69. camel/toolkits/dalle_toolkit.py +146 -0
  70. camel/toolkits/github_toolkit.py +19 -34
  71. camel/toolkits/google_maps_toolkit.py +368 -0
  72. camel/toolkits/math_toolkit.py +79 -0
  73. camel/toolkits/open_api_toolkit.py +547 -0
  74. camel/{functions → toolkits}/openai_function.py +2 -7
  75. camel/toolkits/retrieval_toolkit.py +76 -0
  76. camel/toolkits/search_toolkit.py +326 -0
  77. camel/toolkits/slack_toolkit.py +308 -0
  78. camel/toolkits/twitter_toolkit.py +522 -0
  79. camel/toolkits/weather_toolkit.py +173 -0
  80. camel/types/enums.py +154 -35
  81. camel/utils/__init__.py +14 -2
  82. camel/utils/async_func.py +1 -1
  83. camel/utils/commons.py +152 -2
  84. camel/utils/constants.py +3 -0
  85. camel/utils/token_counting.py +148 -40
  86. camel/workforce/__init__.py +23 -0
  87. camel/workforce/base.py +50 -0
  88. camel/workforce/manager_node.py +299 -0
  89. camel/workforce/role_playing_node.py +168 -0
  90. camel/workforce/single_agent_node.py +77 -0
  91. camel/workforce/task_channel.py +173 -0
  92. camel/workforce/utils.py +97 -0
  93. camel/workforce/worker_node.py +115 -0
  94. camel/workforce/workforce.py +49 -0
  95. camel/workforce/workforce_prompt.py +125 -0
  96. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.6.1.dist-info}/METADATA +45 -3
  97. camel_ai-0.1.6.1.dist-info/RECORD +182 -0
  98. camel/functions/__init__.py +0 -51
  99. camel/functions/google_maps_function.py +0 -335
  100. camel/functions/math_functions.py +0 -61
  101. camel/functions/open_api_function.py +0 -508
  102. camel/functions/retrieval_functions.py +0 -61
  103. camel/functions/search_functions.py +0 -298
  104. camel/functions/slack_functions.py +0 -286
  105. camel/functions/twitter_function.py +0 -479
  106. camel/functions/weather_functions.py +0 -144
  107. camel_ai-0.1.5.6.dist-info/RECORD +0 -157
  108. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  109. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  110. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  111. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  112. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  113. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  114. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  115. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  116. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  117. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  118. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  119. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  120. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  121. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  122. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  123. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  124. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  125. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  126. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  127. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  128. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  129. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  130. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  131. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  132. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  133. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.6.1.dist-info}/WHEEL +0 -0
@@ -20,14 +20,26 @@ from camel.models import BaseModelBackend
20
20
  from camel.prompts import TextPrompt
21
21
  from camel.types import RoleType
22
22
 
23
+ # AgentOps decorator setting
24
+ try:
25
+ from agentops import track_agent
26
+ except ImportError:
23
27
 
28
+ def track_agent():
29
+ def noop(f):
30
+ return f
31
+
32
+ return noop
33
+
34
+
35
+ @track_agent(name="RoleAssignmentAgent")
24
36
  class RoleAssignmentAgent(ChatAgent):
25
37
  r"""An agent that generates role names based on the task prompt.
26
38
 
27
39
  Args:
28
40
  model (BaseModelBackend, optional): The model backend to use for
29
41
  generating responses. (default: :obj:`OpenAIModel` with
30
- `GPT_3_5_TURBO`)
42
+ `GPT_4O_MINI`)
31
43
 
32
44
  Attributes:
33
45
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
@@ -20,16 +20,27 @@ from camel.prompts import TextPrompt
20
20
  from camel.types import RoleType
21
21
  from camel.utils import create_chunks
22
22
 
23
+ # AgentOps decorator setting
24
+ try:
25
+ from agentops import track_agent
26
+ except ImportError:
23
27
 
28
+ def track_agent():
29
+ def noop(f):
30
+ return f
31
+
32
+ return noop
33
+
34
+
35
+ @track_agent(name="SearchAgent")
24
36
  class SearchAgent(ChatAgent):
25
37
  r"""An agent that summarizes text based on a query and evaluates the
26
38
  relevance of an answer.
27
39
 
28
40
  Args:
29
- model_type (ModelType, optional): The type of model to use for the
30
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
31
- model_config (Any, optional): The configuration for the model.
32
- (default: :obj:`None`)
41
+ model (BaseModelBackend, optional): The model backend to use for
42
+ generating responses. (default: :obj:`OpenAIModel` with
43
+ `GPT_4O_MINI`)
33
44
  """
34
45
 
35
46
  def __init__(
@@ -76,7 +87,7 @@ class SearchAgent(ChatAgent):
76
87
  result = self.step(user_msg).msg.content
77
88
  results += result + "\n"
78
89
 
79
- # Final summarise
90
+ # Final summarization
80
91
  final_prompt = TextPrompt(
81
92
  '''Here are some summarized texts which split from one text. Using
82
93
  the information to answer the question. If can't find the answer,
@@ -20,7 +20,19 @@ from camel.prompts import PromptTemplateGenerator, TextPrompt
20
20
  from camel.types import RoleType, TaskType
21
21
  from camel.utils import get_task_list
22
22
 
23
+ # AgentOps decorator setting
24
+ try:
25
+ from agentops import track_agent
26
+ except ImportError:
23
27
 
28
+ def track_agent():
29
+ def noop(f):
30
+ return f
31
+
32
+ return noop
33
+
34
+
35
+ @track_agent(name="TaskSpecifyAgent")
24
36
  class TaskSpecifyAgent(ChatAgent):
25
37
  r"""An agent that specifies a given task prompt by prompting the user to
26
38
  provide more details.
@@ -32,7 +44,7 @@ class TaskSpecifyAgent(ChatAgent):
32
44
  Args:
33
45
  model (BaseModelBackend, optional): The model backend to use for
34
46
  generating responses. (default: :obj:`OpenAIModel` with
35
- `GPT_3_5_TURBO`)
47
+ `GPT_4O_MINI`)
36
48
  task_type (TaskType, optional): The type of task for which to generate
37
49
  a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
38
50
  task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
@@ -115,6 +127,7 @@ class TaskSpecifyAgent(ChatAgent):
115
127
  return TextPrompt(specified_task_msg.content)
116
128
 
117
129
 
130
+ @track_agent(name="TaskPlannerAgent")
118
131
  class TaskPlannerAgent(ChatAgent):
119
132
  r"""An agent that helps divide a task into subtasks based on the input
120
133
  task prompt.
@@ -126,7 +139,7 @@ class TaskPlannerAgent(ChatAgent):
126
139
  Args:
127
140
  model (BaseModelBackend, optional): The model backend to use for
128
141
  generating responses. (default: :obj:`OpenAIModel` with
129
- `GPT_3_5_TURBO`)
142
+ `GPT_4O_MINI`)
130
143
  output_language (str, optional): The language to be output by the
131
144
  agent. (default: :obj:`None`)
132
145
  """
@@ -184,6 +197,7 @@ class TaskPlannerAgent(ChatAgent):
184
197
  return TextPrompt(sub_tasks_msg.content)
185
198
 
186
199
 
200
+ @track_agent(name="TaskCreationAgent")
187
201
  class TaskCreationAgent(ChatAgent):
188
202
  r"""An agent that helps create new tasks based on the objective
189
203
  and last completed task. Compared to :obj:`TaskPlannerAgent`,
@@ -201,7 +215,7 @@ class TaskCreationAgent(ChatAgent):
201
215
  perform the task.
202
216
  model (BaseModelBackend, optional): The LLM backend to use for
203
217
  generating responses. (default: :obj:`OpenAIModel` with
204
- `GPT_3_5_TURBO`)
218
+ `GPT_4O_MINI`)
205
219
  output_language (str, optional): The language to be output by the
206
220
  agent. (default: :obj:`None`)
207
221
  message_window_size (int, optional): The maximum number of previous
@@ -233,7 +247,7 @@ The result must be a numbered list in the format:
233
247
  #. Third Task
234
248
 
235
249
  You can only give me up to {max_task_num} tasks at a time. \
236
- Each task shoud be concise, concrete and doable for a {role_name}.
250
+ Each task should be concise, concrete and doable for a {role_name}.
237
251
  You should make task plan and not ask me questions.
238
252
  If you think no new tasks are needed right now, write "No tasks to add."
239
253
  Now start to give me new tasks one by one. No more than three tasks.
@@ -298,6 +312,7 @@ Be concrete.
298
312
  return get_task_list(sub_tasks_msg.content)
299
313
 
300
314
 
315
+ @track_agent(name="TaskPrioritizationAgent")
301
316
  class TaskPrioritizationAgent(ChatAgent):
302
317
  r"""An agent that helps re-prioritize the task list and
303
318
  returns numbered prioritized list. Modified from
@@ -312,7 +327,7 @@ class TaskPrioritizationAgent(ChatAgent):
312
327
  perform the task.
313
328
  model (BaseModelBackend, optional): The LLM backend to use for
314
329
  generating responses. (default: :obj:`OpenAIModel` with
315
- `GPT_3_5_TURBO`)
330
+ `GPT_4O_MINI`)
316
331
  output_language (str, optional): The language to be output by the
317
332
  agent. (default: :obj:`None`)
318
333
  message_window_size (int, optional): The maximum number of previous
camel/configs/__init__.py CHANGED
@@ -13,17 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
- from .gemini_config import (
17
- Gemini_API_PARAMS,
18
- GeminiConfig,
19
- )
16
+ from .gemini_config import Gemini_API_PARAMS, GeminiConfig
17
+ from .groq_config import GROQ_API_PARAMS, GroqConfig
20
18
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
19
+ from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
21
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
22
- from .openai_config import (
23
- OPENAI_API_PARAMS,
24
- ChatGPTConfig,
25
- OpenSourceConfig,
26
- )
21
+ from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
+ from .vllm_config import VLLM_API_PARAMS, VLLMConfig
27
23
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
28
24
 
29
25
  __all__ = [
@@ -32,6 +28,8 @@ __all__ = [
32
28
  'OPENAI_API_PARAMS',
33
29
  'AnthropicConfig',
34
30
  'ANTHROPIC_API_PARAMS',
31
+ 'GROQ_API_PARAMS',
32
+ 'GroqConfig',
35
33
  'OpenSourceConfig',
36
34
  'LiteLLMConfig',
37
35
  'LITELLM_API_PARAMS',
@@ -41,4 +39,8 @@ __all__ = [
41
39
  'ZHIPUAI_API_PARAMS',
42
40
  'GeminiConfig',
43
41
  'Gemini_API_PARAMS',
42
+ 'VLLMConfig',
43
+ 'VLLM_API_PARAMS',
44
+ 'MistralConfig',
45
+ 'MISTRAL_API_PARAMS',
44
46
  ]
@@ -13,14 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
16
+ from typing import List, Union
17
17
 
18
18
  from anthropic import NOT_GIVEN, NotGiven
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
21
 
22
22
 
23
- @dataclass(frozen=True)
24
23
  class AnthropicConfig(BaseConfig):
25
24
  r"""Defines the parameters for generating chat completions using the
26
25
  Anthropic API.
@@ -62,12 +61,12 @@ class AnthropicConfig(BaseConfig):
62
61
  """
63
62
 
64
63
  max_tokens: int = 256
65
- stop_sequences: list[str] | NotGiven = NOT_GIVEN
64
+ stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
66
65
  temperature: float = 1
67
- top_p: float | NotGiven = NOT_GIVEN
68
- top_k: int | NotGiven = NOT_GIVEN
66
+ top_p: Union[float, NotGiven] = NOT_GIVEN
67
+ top_k: Union[int, NotGiven] = NOT_GIVEN
69
68
  metadata: NotGiven = NOT_GIVEN
70
69
  stream: bool = False
71
70
 
72
71
 
73
- ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
72
+ ANTHROPIC_API_PARAMS = {param for param in AnthropicConfig.model_fields.keys()}
@@ -14,9 +14,55 @@
14
14
  from __future__ import annotations
15
15
 
16
16
  from abc import ABC
17
- from dataclasses import dataclass
17
+ from typing import Any, List, Optional
18
18
 
19
+ from pydantic import BaseModel, ConfigDict, field_validator
19
20
 
20
- @dataclass(frozen=True)
21
- class BaseConfig(ABC): # noqa: B024
22
- pass
21
+
22
+ class BaseConfig(ABC, BaseModel):
23
+ model_config = ConfigDict(
24
+ arbitrary_types_allowed=True,
25
+ extra="forbid",
26
+ frozen=True,
27
+ # UserWarning: conflict with protected namespace "model_"
28
+ protected_namespaces=(),
29
+ )
30
+
31
+ tools: Optional[List[Any]] = None
32
+ """A list of tools the model may
33
+ call. Currently, only functions are supported as a tool. Use this
34
+ to provide a list of functions the model may generate JSON inputs
35
+ for. A max of 128 functions are supported.
36
+ """
37
+
38
+ @field_validator("tools", mode="before")
39
+ @classmethod
40
+ def fields_type_checking(cls, tools):
41
+ if tools is not None:
42
+ from camel.toolkits import OpenAIFunction
43
+
44
+ for tool in tools:
45
+ if not isinstance(tool, OpenAIFunction):
46
+ raise ValueError(
47
+ f"The tool {tool} should "
48
+ "be an instance of `OpenAIFunction`."
49
+ )
50
+ return tools
51
+
52
+ def as_dict(self) -> dict[str, Any]:
53
+ config_dict = self.model_dump()
54
+
55
+ tools_schema = None
56
+ if self.tools:
57
+ from camel.toolkits import OpenAIFunction
58
+
59
+ tools_schema = []
60
+ for tool in self.tools:
61
+ if not isinstance(tool, OpenAIFunction):
62
+ raise ValueError(
63
+ f"The tool {tool} should "
64
+ "be an instance of `OpenAIFunction`."
65
+ )
66
+ tools_schema.append(tool.get_openai_tool_schema())
67
+ config_dict["tools"] = tools_schema
68
+ return config_dict
@@ -14,13 +14,13 @@
14
14
 
15
15
 
16
16
  from collections.abc import Iterable
17
- from dataclasses import asdict, dataclass
18
- from typing import Optional
17
+ from typing import Any, Optional
18
+
19
+ from pydantic import model_validator
19
20
 
20
21
  from camel.configs.base_config import BaseConfig
21
22
 
22
23
 
23
- @dataclass(frozen=True)
24
24
  class GeminiConfig(BaseConfig):
25
25
  r"""A simple dataclass used to configure the generation parameters of
26
26
  `GenerativeModel.generate_content`.
@@ -72,14 +72,6 @@ class GeminiConfig(BaseConfig):
72
72
  Options for the request.
73
73
  """
74
74
 
75
- from google.generativeai.protos import Schema
76
- from google.generativeai.types.content_types import (
77
- FunctionLibraryType,
78
- ToolConfigType,
79
- )
80
- from google.generativeai.types.helper_types import RequestOptionsType
81
- from google.generativeai.types.safety_types import SafetySettingOptions
82
-
83
75
  candidate_count: Optional[int] = None
84
76
  stop_sequences: Optional[Iterable[str]] = None
85
77
  max_output_tokens: Optional[int] = None
@@ -87,11 +79,71 @@ class GeminiConfig(BaseConfig):
87
79
  top_p: Optional[float] = None
88
80
  top_k: Optional[int] = None
89
81
  response_mime_type: Optional[str] = None
90
- response_schema: Optional[Schema] = None
91
- safety_settings: Optional[SafetySettingOptions] = None
92
- tools: Optional[FunctionLibraryType] = None
93
- tool_config: Optional[ToolConfigType] = None
94
- request_options: Optional[RequestOptionsType] = None
82
+ response_schema: Optional[Any] = None
83
+ safety_settings: Optional[Any] = None
84
+ tools: Optional[Any] = None
85
+ tool_config: Optional[Any] = None
86
+ request_options: Optional[Any] = None
87
+
88
+ @model_validator(mode="before")
89
+ @classmethod
90
+ def fields_type_checking(cls, data: Any):
91
+ if isinstance(data, dict):
92
+ response_schema = data.get("response_schema")
93
+ safety_settings = data.get("safety_settings")
94
+ tools = data.get("tools")
95
+ tool_config = data.get("tool_config")
96
+ request_options = data.get("request_options")
97
+
98
+ if response_schema:
99
+ from google.generativeai.protos import Schema
100
+ from google.generativeai.types.content_types import (
101
+ FunctionLibraryType,
102
+ ToolConfigType,
103
+ )
104
+ from google.generativeai.types.helper_types import (
105
+ RequestOptionsType,
106
+ )
107
+ from google.generativeai.types.safety_types import (
108
+ SafetySettingOptions,
109
+ )
110
+ else:
111
+ return data
112
+
113
+ if response_schema and not isinstance(response_schema, Schema):
114
+ raise ValueError(
115
+ "The response_schema should be "
116
+ "an instance of `google.generativeai.protos.Schema`."
117
+ )
118
+
119
+ if safety_settings and not isinstance(
120
+ safety_settings, SafetySettingOptions
121
+ ):
122
+ raise ValueError(
123
+ "The response_schema should be an instance of "
124
+ "`google.generativeai.types.safety_types.SafetySettingOptions`."
125
+ )
126
+
127
+ if tools is not None:
128
+ for tool in tools:
129
+ if not isinstance(tool, FunctionLibraryType):
130
+ raise ValueError(
131
+ "The tool should be an instance of "
132
+ "`google.generativeai.types.content_types.FunctionLibraryType`."
133
+ )
134
+ if tool_config and not isinstance(tool_config, ToolConfigType):
135
+ raise ValueError(
136
+ "The response_schema should be an instance of "
137
+ "`google.generativeai.types.content_types.ToolConfigType`."
138
+ )
139
+ if request_options and not isinstance(
140
+ request_options, RequestOptionsType
141
+ ):
142
+ raise ValueError(
143
+ "The response_schema should be an instance of "
144
+ "`google.generativeai.types.helper_types.RequestOptionsType`."
145
+ )
146
+ return data
95
147
 
96
148
 
97
- Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
149
+ Gemini_API_PARAMS = {param for param in GeminiConfig().model_fields.keys()}
@@ -0,0 +1,105 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Optional, Sequence, Union
17
+
18
+ from openai._types import NOT_GIVEN, NotGiven
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ class GroqConfig(BaseConfig):
24
+ r"""Defines the parameters for generating chat completions using OpenAI
25
+ compatibility.
26
+
27
+ Reference: https://console.groq.com/docs/openai
28
+
29
+ Args:
30
+ temperature (float, optional): Sampling temperature to use, between
31
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
32
+ while lower values make it more focused and deterministic.
33
+ (default: :obj:`0.2`)
34
+ top_p (float, optional): An alternative to sampling with temperature,
35
+ called nucleus sampling, where the model considers the results of
36
+ the tokens with top_p probability mass. So :obj:`0.1` means only
37
+ the tokens comprising the top 10% probability mass are considered.
38
+ (default: :obj:`1.0`)
39
+ n (int, optional): How many chat completion choices to generate for
40
+ each input message. (default: :obj:`1`)
41
+ response_format (object, optional): An object specifying the format
42
+ that the model must output. Compatible with GPT-4 Turbo and all
43
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
44
+ {"type": "json_object"} enables JSON mode, which guarantees the
45
+ message the model generates is valid JSON. Important: when using
46
+ JSON mode, you must also instruct the model to produce JSON
47
+ yourself via a system or user message. Without this, the model
48
+ may generate an unending stream of whitespace until the generation
49
+ reaches the token limit, resulting in a long-running and seemingly
50
+ "stuck" request. Also note that the message content may be
51
+ partially cut off if finish_reason="length", which indicates the
52
+ generation exceeded max_tokens or the conversation exceeded the
53
+ max context length.
54
+ stream (bool, optional): If True, partial message deltas will be sent
55
+ as data-only server-sent events as they become available.
56
+ (default: :obj:`False`)
57
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
58
+ will stop generating further tokens. (default: :obj:`None`)
59
+ max_tokens (int, optional): The maximum number of tokens to generate
60
+ in the chat completion. The total length of input tokens and
61
+ generated tokens is limited by the model's context length.
62
+ (default: :obj:`None`)
63
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
64
+ :obj:`2.0`. Positive values penalize new tokens based on whether
65
+ they appear in the text so far, increasing the model's likelihood
66
+ to talk about new topics. See more information about frequency and
67
+ presence penalties. (default: :obj:`0.0`)
68
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on their
70
+ existing frequency in the text so far, decreasing the model's
71
+ likelihood to repeat the same line verbatim. See more information
72
+ about frequency and presence penalties. (default: :obj:`0.0`)
73
+ user (str, optional): A unique identifier representing your end-user,
74
+ which can help OpenAI to monitor and detect abuse.
75
+ (default: :obj:`""`)
76
+ tools (list[OpenAIFunction], optional): A list of tools the model may
77
+ call. Currently, only functions are supported as a tool. Use this
78
+ to provide a list of functions the model may generate JSON inputs
79
+ for. A max of 128 functions are supported.
80
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
81
+ any) tool is called by the model. :obj:`"none"` means the model
82
+ will not call any tool and instead generates a message.
83
+ :obj:`"auto"` means the model can pick between generating a
84
+ message or calling one or more tools. :obj:`"required"` means the
85
+ model must call one or more tools. Specifying a particular tool
86
+ via {"type": "function", "function": {"name": "my_function"}}
87
+ forces the model to call that tool. :obj:`"none"` is the default
88
+ when no tools are present. :obj:`"auto"` is the default if tools
89
+ are present.
90
+ """
91
+
92
+ temperature: float = 0.2 # openai default: 1.0
93
+ top_p: float = 1.0
94
+ n: int = 1
95
+ stream: bool = False
96
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
97
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
98
+ presence_penalty: float = 0.0
99
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
100
+ frequency_penalty: float = 0.0
101
+ user: str = ""
102
+ tool_choice: Optional[Union[dict[str, str], str]] = "none"
103
+
104
+
105
+ GROQ_API_PARAMS = {param for param in GroqConfig.model_fields.keys()}
@@ -13,16 +13,11 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import TYPE_CHECKING, List, Optional, Union
16
+ from typing import List, Optional, Union
18
17
 
19
18
  from camel.configs.base_config import BaseConfig
20
19
 
21
- if TYPE_CHECKING:
22
- from camel.functions import OpenAIFunction
23
20
 
24
-
25
- @dataclass(frozen=True)
26
21
  class LiteLLMConfig(BaseConfig):
27
22
  r"""Defines the parameters for generating chat completions using the
28
23
  LiteLLM API.
@@ -88,7 +83,6 @@ class LiteLLMConfig(BaseConfig):
88
83
  user: Optional[str] = None
89
84
  response_format: Optional[dict] = None
90
85
  seed: Optional[int] = None
91
- tools: Optional[list[OpenAIFunction]] = None
92
86
  tool_choice: Optional[Union[str, dict]] = None
93
87
  logprobs: Optional[bool] = None
94
88
  top_logprobs: Optional[int] = None
@@ -100,4 +94,4 @@ class LiteLLMConfig(BaseConfig):
100
94
  max_retries: Optional[int] = None
101
95
 
102
96
 
103
- LITELLM_API_PARAMS = {param for param in asdict(LiteLLMConfig()).keys()}
97
+ LITELLM_API_PARAMS = {param for param in LiteLLMConfig.model_fields.keys()}
@@ -0,0 +1,78 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from pydantic import field_validator
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ class MistralConfig(BaseConfig):
24
+ r"""Defines the parameters for generating chat completions using the
25
+ Mistral API.
26
+
27
+ reference: https://github.com/mistralai/client-python/blob/9d238f88c41689821d7b08570f13b43426f97fd6/src/mistralai/client.py#L195
28
+
29
+ Args:
30
+ temperature (Optional[float], optional): temperature the temperature
31
+ to use for sampling, e.g. 0.5.
32
+ max_tokens (Optional[int], optional): the maximum number of tokens to
33
+ generate, e.g. 100. Defaults to None.
34
+ top_p (Optional[float], optional): the cumulative probability of
35
+ tokens to generate, e.g. 0.9. Defaults to None.
36
+ random_seed (Optional[int], optional): the random seed to use for
37
+ sampling, e.g. 42. Defaults to None.
38
+ safe_mode (bool, optional): deprecated, use safe_prompt instead.
39
+ Defaults to False.
40
+ safe_prompt (bool, optional): whether to use safe prompt, e.g. true.
41
+ Defaults to False.
42
+ response_format (Union[Dict[str, str], ResponseFormat): format of the
43
+ response.
44
+ tools (Optional[list[OpenAIFunction]], optional): a list of tools to
45
+ use.
46
+ tool_choice (str, optional): Controls which (if
47
+ any) tool is called by the model. :obj:`"none"` means the model
48
+ will not call any tool and instead generates a message.
49
+ :obj:`"auto"` means the model can pick between generating a
50
+ message or calling one or more tools. :obj:`"any"` means the
51
+ model must call one or more tools. :obj:`"auto"` is the default
52
+ value.
53
+ """
54
+
55
+ temperature: Optional[float] = None
56
+ max_tokens: Optional[int] = None
57
+ top_p: Optional[float] = None
58
+ random_seed: Optional[int] = None
59
+ safe_mode: bool = False
60
+ safe_prompt: bool = False
61
+ response_format: Optional[Union[Dict[str, str], Any]] = None
62
+ tool_choice: Optional[str] = "auto"
63
+
64
+ @field_validator("response_format", mode="before")
65
+ @classmethod
66
+ def fields_type_checking(cls, response_format):
67
+ if response_format and not isinstance(response_format, dict):
68
+ from mistralai.models.chat_completion import ResponseFormat
69
+
70
+ if not isinstance(response_format, ResponseFormat):
71
+ raise ValueError(
72
+ f"The tool {response_format} should be an instance "
73
+ "of `mistralai.models.chat_completion.ResponseFormat`."
74
+ )
75
+ return response_format
76
+
77
+
78
+ MISTRAL_API_PARAMS = {param for param in MistralConfig().model_fields.keys()}
@@ -13,15 +13,13 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass
17
- from typing import Sequence
16
+ from typing import Sequence, Union
18
17
 
19
18
  from openai._types import NOT_GIVEN, NotGiven
20
19
 
21
20
  from camel.configs.base_config import BaseConfig
22
21
 
23
22
 
24
- @dataclass(frozen=True)
25
23
  class OllamaConfig(BaseConfig):
26
24
  r"""Defines the parameters for generating chat completions using OpenAI
27
25
  compatibility
@@ -75,11 +73,11 @@ class OllamaConfig(BaseConfig):
75
73
  temperature: float = 0.2
76
74
  top_p: float = 1.0
77
75
  stream: bool = False
78
- stop: str | Sequence[str] | NotGiven = NOT_GIVEN
79
- max_tokens: int | NotGiven = NOT_GIVEN
76
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
77
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
80
78
  presence_penalty: float = 0.0
81
- response_format: dict | NotGiven = NOT_GIVEN
79
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
82
80
  frequency_penalty: float = 0.0
83
81
 
84
82
 
85
- OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
83
+ OLLAMA_API_PARAMS = {param for param in OllamaConfig.model_fields.keys()}