camel-ai 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (117) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +7 -5
  3. camel/agents/chat_agent.py +134 -86
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/knowledge_graph_agent.py +221 -0
  8. camel/agents/role_assignment_agent.py +27 -17
  9. camel/agents/task_agent.py +60 -34
  10. camel/agents/tool_agents/base.py +0 -1
  11. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  12. camel/configs/__init__.py +29 -0
  13. camel/configs/anthropic_config.py +73 -0
  14. camel/configs/base_config.py +22 -0
  15. camel/{configs.py → configs/openai_config.py} +37 -64
  16. camel/embeddings/__init__.py +2 -0
  17. camel/embeddings/base.py +3 -2
  18. camel/embeddings/openai_embedding.py +10 -5
  19. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  20. camel/functions/__init__.py +18 -3
  21. camel/functions/google_maps_function.py +335 -0
  22. camel/functions/math_functions.py +7 -7
  23. camel/functions/open_api_function.py +380 -0
  24. camel/functions/open_api_specs/coursera/__init__.py +13 -0
  25. camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
  26. camel/functions/open_api_specs/klarna/__init__.py +13 -0
  27. camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
  28. camel/functions/open_api_specs/speak/__init__.py +13 -0
  29. camel/functions/open_api_specs/speak/openapi.yaml +151 -0
  30. camel/functions/openai_function.py +346 -42
  31. camel/functions/retrieval_functions.py +61 -0
  32. camel/functions/search_functions.py +100 -35
  33. camel/functions/slack_functions.py +275 -0
  34. camel/functions/twitter_function.py +484 -0
  35. camel/functions/weather_functions.py +36 -23
  36. camel/generators.py +65 -46
  37. camel/human.py +17 -11
  38. camel/interpreters/__init__.py +25 -0
  39. camel/interpreters/base.py +49 -0
  40. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  41. camel/interpreters/interpreter_error.py +19 -0
  42. camel/interpreters/subprocess_interpreter.py +190 -0
  43. camel/loaders/__init__.py +22 -0
  44. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  45. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  46. camel/memories/__init__.py +17 -7
  47. camel/memories/agent_memories.py +156 -0
  48. camel/memories/base.py +97 -32
  49. camel/memories/blocks/__init__.py +21 -0
  50. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  51. camel/memories/blocks/vectordb_block.py +101 -0
  52. camel/memories/context_creators/__init__.py +3 -2
  53. camel/memories/context_creators/score_based.py +32 -20
  54. camel/memories/records.py +6 -5
  55. camel/messages/__init__.py +2 -2
  56. camel/messages/base.py +99 -16
  57. camel/messages/func_message.py +7 -4
  58. camel/models/__init__.py +6 -2
  59. camel/models/anthropic_model.py +146 -0
  60. camel/models/base_model.py +10 -3
  61. camel/models/model_factory.py +17 -11
  62. camel/models/open_source_model.py +25 -13
  63. camel/models/openai_audio_models.py +251 -0
  64. camel/models/openai_model.py +20 -13
  65. camel/models/stub_model.py +10 -5
  66. camel/prompts/__init__.py +7 -5
  67. camel/prompts/ai_society.py +21 -14
  68. camel/prompts/base.py +54 -47
  69. camel/prompts/code.py +22 -14
  70. camel/prompts/evaluation.py +8 -5
  71. camel/prompts/misalignment.py +26 -19
  72. camel/prompts/object_recognition.py +35 -0
  73. camel/prompts/prompt_templates.py +14 -8
  74. camel/prompts/role_description_prompt_template.py +16 -10
  75. camel/prompts/solution_extraction.py +9 -5
  76. camel/prompts/task_prompt_template.py +24 -21
  77. camel/prompts/translation.py +9 -5
  78. camel/responses/agent_responses.py +5 -2
  79. camel/retrievers/__init__.py +26 -0
  80. camel/retrievers/auto_retriever.py +330 -0
  81. camel/retrievers/base.py +69 -0
  82. camel/retrievers/bm25_retriever.py +140 -0
  83. camel/retrievers/cohere_rerank_retriever.py +108 -0
  84. camel/retrievers/vector_retriever.py +183 -0
  85. camel/societies/__init__.py +1 -1
  86. camel/societies/babyagi_playing.py +56 -32
  87. camel/societies/role_playing.py +188 -133
  88. camel/storages/__init__.py +18 -0
  89. camel/storages/graph_storages/__init__.py +23 -0
  90. camel/storages/graph_storages/base.py +82 -0
  91. camel/storages/graph_storages/graph_element.py +74 -0
  92. camel/storages/graph_storages/neo4j_graph.py +582 -0
  93. camel/storages/key_value_storages/base.py +1 -2
  94. camel/storages/key_value_storages/in_memory.py +1 -2
  95. camel/storages/key_value_storages/json.py +8 -13
  96. camel/storages/vectordb_storages/__init__.py +33 -0
  97. camel/storages/vectordb_storages/base.py +202 -0
  98. camel/storages/vectordb_storages/milvus.py +396 -0
  99. camel/storages/vectordb_storages/qdrant.py +373 -0
  100. camel/terminators/__init__.py +1 -1
  101. camel/terminators/base.py +2 -3
  102. camel/terminators/response_terminator.py +21 -12
  103. camel/terminators/token_limit_terminator.py +5 -3
  104. camel/toolkits/__init__.py +21 -0
  105. camel/toolkits/base.py +22 -0
  106. camel/toolkits/github_toolkit.py +245 -0
  107. camel/types/__init__.py +18 -6
  108. camel/types/enums.py +129 -15
  109. camel/types/openai_types.py +10 -5
  110. camel/utils/__init__.py +20 -13
  111. camel/utils/commons.py +170 -85
  112. camel/utils/token_counting.py +135 -15
  113. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/METADATA +123 -75
  114. camel_ai-0.1.4.dist-info/RECORD +119 -0
  115. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +1 -1
  116. camel/memories/context_creators/base.py +0 -72
  117. camel_ai-0.1.1.dist-info/RECORD +0 -75
@@ -14,7 +14,7 @@
14
14
  import re
15
15
  from typing import Any, Dict, Optional, Union
16
16
 
17
- from camel.agents import ChatAgent
17
+ from camel.agents.chat_agent import ChatAgent
18
18
  from camel.messages import BaseMessage
19
19
  from camel.prompts import TextPrompt
20
20
  from camel.types import ModelType, RoleType
@@ -68,21 +68,27 @@ class RoleAssignmentAgent(ChatAgent):
68
68
  expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join(
69
69
  f"Domain expert {i + 1}: <BLANK>\n"
70
70
  f"Associated competencies, characteristics, duties "
71
- f"and workflows: <BLANK>. End." for i in range(num_roles or 0))
71
+ f"and workflows: <BLANK>. End."
72
+ for i in range(num_roles or 0)
73
+ )
72
74
  role_assignment_generation_prompt = TextPrompt(
73
- "You are a role assignment agent, and you're in charge of " +
74
- "recruiting {num_roles} experts for the following task." +
75
- "\n==== TASK =====\n {task}\n\n" +
76
- "Identify the domain experts you'd recruit and detail their " +
77
- "associated competencies, characteristics, duties and workflows " +
78
- "to complete the task.\n " +
79
- "Your answer MUST adhere to the format of ANSWER PROMPT, and " +
80
- "ONLY answer the BLANKs.\n" + expert_prompt)
75
+ "You are a role assignment agent, and you're in charge of "
76
+ + "recruiting {num_roles} experts for the following task."
77
+ + "\n==== TASK =====\n {task}\n\n"
78
+ + "Identify the domain experts you'd recruit and detail their "
79
+ + "associated competencies, characteristics, duties and workflows "
80
+ + "to complete the task.\n "
81
+ + "Your answer MUST adhere to the format of ANSWER PROMPT, and "
82
+ + "ONLY answer the BLANKs.\n"
83
+ + expert_prompt
84
+ )
81
85
  role_assignment_generation = role_assignment_generation_prompt.format(
82
- num_roles=num_roles, task=task_prompt)
86
+ num_roles=num_roles, task=task_prompt
87
+ )
83
88
 
84
89
  role_assignment_generation_msg = BaseMessage.make_user_message(
85
- role_name="Role Assigner", content=role_assignment_generation)
90
+ role_name="Role Assigner", content=role_assignment_generation
91
+ )
86
92
 
87
93
  response = self.step(input_message=role_assignment_generation_msg)
88
94
 
@@ -91,21 +97,25 @@ class RoleAssignmentAgent(ChatAgent):
91
97
 
92
98
  # Distribute the output completions into role names and descriptions
93
99
  role_names = [
94
- desc.replace("<|", "").replace("|>", "") for desc in re.findall(
100
+ desc.replace("<|", "").replace("|>", "")
101
+ for desc in re.findall(
95
102
  r"Domain expert \d: (.+?)\nAssociated competencies,",
96
103
  msg.content,
97
104
  re.DOTALL,
98
105
  )
99
106
  ]
100
107
  role_descriptions = [
101
- desc.replace("<|", "").replace("|>", "") for desc in re.findall(
108
+ desc.replace("<|", "").replace("|>", "")
109
+ for desc in re.findall(
102
110
  r"Associated competencies, characteristics, "
103
- r"duties and workflows: (.+?) End.", msg.content, re.DOTALL)
111
+ r"duties and workflows: (.+?) End.",
112
+ msg.content,
113
+ re.DOTALL,
114
+ )
104
115
  ]
105
116
 
106
117
  if len(role_names) != num_roles or len(role_descriptions) != num_roles:
107
- raise RuntimeError(
108
- "Got None or insufficient information of roles.")
118
+ raise RuntimeError("Got None or insufficient information of roles.")
109
119
  if terminated:
110
120
  raise RuntimeError("Role assignment failed.")
111
121
 
@@ -13,7 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import Any, Dict, List, Optional, Union
15
15
 
16
- from camel.agents import ChatAgent
16
+ from camel.agents.chat_agent import ChatAgent
17
17
  from camel.configs import ChatGPTConfig
18
18
  from camel.messages import BaseMessage
19
19
  from camel.prompts import PromptTemplateGenerator, TextPrompt
@@ -43,6 +43,7 @@ class TaskSpecifyAgent(ChatAgent):
43
43
  output_language (str, optional): The language to be output by the
44
44
  agent. (default: :obj:`None`)
45
45
  """
46
+
46
47
  DEFAULT_WORD_LIMIT = 50
47
48
 
48
49
  def __init__(
@@ -54,14 +55,15 @@ class TaskSpecifyAgent(ChatAgent):
54
55
  word_limit: int = DEFAULT_WORD_LIMIT,
55
56
  output_language: Optional[str] = None,
56
57
  ) -> None:
57
-
58
58
  self.task_specify_prompt: Union[str, TextPrompt]
59
59
  if task_specify_prompt is None:
60
- task_specify_prompt_template = PromptTemplateGenerator(
61
- ).get_task_specify_prompt(task_type)
60
+ task_specify_prompt_template = (
61
+ PromptTemplateGenerator().get_task_specify_prompt(task_type)
62
+ )
62
63
 
63
64
  self.task_specify_prompt = task_specify_prompt_template.format(
64
- word_limit=word_limit)
65
+ word_limit=word_limit
66
+ )
65
67
  else:
66
68
  self.task_specify_prompt = TextPrompt(task_specify_prompt)
67
69
 
@@ -74,9 +76,12 @@ class TaskSpecifyAgent(ChatAgent):
74
76
  content="You can make a task more specific.",
75
77
  )
76
78
 
77
- super().__init__(system_message, model_type=model_type,
78
- model_config=model_config,
79
- output_language=output_language)
79
+ super().__init__(
80
+ system_message,
81
+ model_type=model_type,
82
+ model_config=model_config,
83
+ output_language=output_language,
84
+ )
80
85
 
81
86
  def run(
82
87
  self,
@@ -101,8 +106,9 @@ class TaskSpecifyAgent(ChatAgent):
101
106
  if meta_dict is not None:
102
107
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
108
 
104
- task_msg = BaseMessage.make_user_message(role_name="Task Specifier",
105
- content=task_specify_prompt)
109
+ task_msg = BaseMessage.make_user_message(
110
+ role_name="Task Specifier", content=task_specify_prompt
111
+ )
106
112
  specifier_response = self.step(task_msg)
107
113
 
108
114
  if specifier_response.terminated:
@@ -138,9 +144,9 @@ class TaskPlannerAgent(ChatAgent):
138
144
  model_config: Optional[Any] = None,
139
145
  output_language: Optional[str] = None,
140
146
  ) -> None:
141
-
142
147
  self.task_planner_prompt = TextPrompt(
143
- "Divide this task into subtasks: {task}. Be concise.")
148
+ "Divide this task into subtasks: {task}. Be concise."
149
+ )
144
150
  system_message = BaseMessage(
145
151
  role_name="Task Planner",
146
152
  role_type=RoleType.ASSISTANT,
@@ -148,8 +154,12 @@ class TaskPlannerAgent(ChatAgent):
148
154
  content="You are a helpful task planner.",
149
155
  )
150
156
 
151
- super().__init__(system_message, model_type, model_config,
152
- output_language=output_language)
157
+ super().__init__(
158
+ system_message,
159
+ model_type,
160
+ model_config,
161
+ output_language=output_language,
162
+ )
153
163
 
154
164
  def run(
155
165
  self,
@@ -168,8 +178,9 @@ class TaskPlannerAgent(ChatAgent):
168
178
  self.reset()
169
179
  task_planner_prompt = self.task_planner_prompt.format(task=task_prompt)
170
180
 
171
- task_msg = BaseMessage.make_user_message(role_name="Task Planner",
172
- content=task_planner_prompt)
181
+ task_msg = BaseMessage.make_user_message(
182
+ role_name="Task Planner", content=task_planner_prompt
183
+ )
173
184
 
174
185
  task_response = self.step(task_msg)
175
186
 
@@ -220,7 +231,6 @@ class TaskCreationAgent(ChatAgent):
220
231
  message_window_size: Optional[int] = None,
221
232
  max_task_num: Optional[int] = 3,
222
233
  ) -> None:
223
-
224
234
  task_creation_prompt = TextPrompt(
225
235
  """Create new a task with the following objective: {objective}.
226
236
  Never forget you are a Task Creator of {role_name}.
@@ -239,11 +249,12 @@ You should make task plan and not ask me questions.
239
249
  If you think no new tasks are needed right now, write "No tasks to add."
240
250
  Now start to give me new tasks one by one. No more than three tasks.
241
251
  Be concrete.
242
- """)
252
+ """
253
+ )
243
254
 
244
255
  self.task_creation_prompt = task_creation_prompt.format(
245
- objective=objective, role_name=role_name,
246
- max_task_num=max_task_num)
256
+ objective=objective, role_name=role_name, max_task_num=max_task_num
257
+ )
247
258
  self.objective = objective
248
259
 
249
260
  system_message = BaseMessage(
@@ -253,9 +264,13 @@ Be concrete.
253
264
  content="You are a helpful task creator.",
254
265
  )
255
266
 
256
- super().__init__(system_message, model_type, model_config,
257
- output_language=output_language,
258
- message_window_size=message_window_size)
267
+ super().__init__(
268
+ system_message,
269
+ model_type,
270
+ model_config,
271
+ output_language=output_language,
272
+ message_window_size=message_window_size,
273
+ )
259
274
 
260
275
  def run(
261
276
  self,
@@ -273,13 +288,16 @@ Be concrete.
273
288
 
274
289
  if len(task_list) > 0:
275
290
  task_creation_prompt = self.task_creation_prompt.format(
276
- task_list=task_list)
291
+ task_list=task_list
292
+ )
277
293
  else:
278
294
  task_creation_prompt = self.task_creation_prompt.format(
279
- task_list="")
295
+ task_list=""
296
+ )
280
297
 
281
- task_msg = BaseMessage.make_user_message(role_name="Task Creator",
282
- content=task_creation_prompt)
298
+ task_msg = BaseMessage.make_user_message(
299
+ role_name="Task Creator", content=task_creation_prompt
300
+ )
283
301
  task_response = self.step(task_msg)
284
302
 
285
303
  if task_response.terminated:
@@ -337,10 +355,12 @@ The result must be a numbered list in the format:
337
355
  The entries must be consecutively numbered, starting with 1.
338
356
  The number of each entry must be followed by a period.
339
357
  Do not include any headers before your ranked list or follow your list \
340
- with any other output.""")
358
+ with any other output."""
359
+ )
341
360
 
342
361
  self.task_prioritization_prompt = task_prioritization_prompt.format(
343
- objective=objective)
362
+ objective=objective
363
+ )
344
364
  self.objective = objective
345
365
 
346
366
  system_message = BaseMessage(
@@ -350,9 +370,13 @@ with any other output.""")
350
370
  content="You are a helpful task prioritizer.",
351
371
  )
352
372
 
353
- super().__init__(system_message, model_type, model_config,
354
- output_language=output_language,
355
- message_window_size=message_window_size)
373
+ super().__init__(
374
+ system_message,
375
+ model_type,
376
+ model_config,
377
+ output_language=output_language,
378
+ message_window_size=message_window_size,
379
+ )
356
380
 
357
381
  def run(
358
382
  self,
@@ -366,10 +390,12 @@ with any other output.""")
366
390
  List[str]: The new prioritized task list generated by the Agent.
367
391
  """
368
392
  task_prioritization_prompt = self.task_prioritization_prompt.format(
369
- task_list=task_list)
393
+ task_list=task_list
394
+ )
370
395
 
371
396
  task_msg = BaseMessage.make_user_message(
372
- role_name="Task Prioritizer", content=task_prioritization_prompt)
397
+ role_name="Task Prioritizer", content=task_prioritization_prompt
398
+ )
373
399
 
374
400
  task_response = self.step(task_msg)
375
401
 
@@ -24,7 +24,6 @@ class BaseToolAgent(BaseAgent):
24
24
  """
25
25
 
26
26
  def __init__(self, name: str, description: str) -> None:
27
-
28
27
  self.name = name
29
28
  self.description = description
30
29
 
@@ -13,7 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import Any, Optional
15
15
 
16
- from camel.agents.tool_agents import BaseToolAgent
16
+ from camel.agents.tool_agents.base import BaseToolAgent
17
17
 
18
18
 
19
19
  # flake8: noqa :E501
@@ -44,10 +44,13 @@ class HuggingFaceToolAgent(BaseToolAgent):
44
44
  # TODO: Support other tool agents
45
45
  import transformers
46
46
  from packaging import version
47
- if version.parse(
48
- transformers.__version__) < version.parse("4.31.0"):
47
+
48
+ if version.parse(transformers.__version__) < version.parse(
49
+ "4.31.0"
50
+ ):
49
51
  raise ValueError(
50
- "The version of \"transformers\" package should >= 4.31.0")
52
+ "The version of \"transformers\" package should >= 4.31.0"
53
+ )
51
54
 
52
55
  from transformers.tools import OpenAiAgent
53
56
  from transformers.tools.agent_types import AgentImage
@@ -0,0 +1,29 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
+ from .base_config import BaseConfig
16
+ from .openai_config import (
17
+ OPENAI_API_PARAMS,
18
+ ChatGPTConfig,
19
+ OpenSourceConfig,
20
+ )
21
+
22
+ __all__ = [
23
+ 'BaseConfig',
24
+ 'ChatGPTConfig',
25
+ 'OPENAI_API_PARAMS',
26
+ 'AnthropicConfig',
27
+ 'ANTHROPIC_API_PARAMS',
28
+ 'OpenSourceConfig',
29
+ ]
@@ -0,0 +1,73 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+
18
+ from anthropic._types import NOT_GIVEN, NotGiven
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class AnthropicConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ Anthropic API.
27
+
28
+ See: https://docs.anthropic.com/claude/reference/complete_post
29
+ Args:
30
+ max_tokens (int, optional): The maximum number of tokens to
31
+ generate before stopping. Note that Anthropic models may stop
32
+ before reaching this maximum. This parameter only specifies the
33
+ absolute maximum number of tokens to generate.
34
+ (default: :obj:`256`)
35
+ stop_sequences (List[str], optional): Sequences that will cause the
36
+ model to stop generating completion text. Anthropic models stop
37
+ on "\n\nHuman:", and may include additional built-in stop sequences
38
+ in the future. By providing the stop_sequences parameter, you may
39
+ include additional strings that will cause the model to stop
40
+ generating.
41
+ temperature (float, optional): Amount of randomness injected into the
42
+ response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
43
+ for analytical / multiple choice, and closer to 1 for creative
44
+ and generative tasks.
45
+ (default: :obj:`1`)
46
+ top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
47
+ compute the cumulative distribution over all the options for each
48
+ subsequent token in decreasing probability order and cut it off
49
+ once it reaches a particular probability specified by `top_p`.
50
+ You should either alter `temperature` or `top_p`,
51
+ but not both.
52
+ (default: :obj:`0.7`)
53
+ top_k (int, optional): Only sample from the top K options for each
54
+ subsequent token. Used to remove "long tail" low probability
55
+ responses.
56
+ (default: :obj:`5`)
57
+ metadata: An object describing metadata about the request.
58
+ stream (bool, optional): Whether to incrementally stream the response
59
+ using server-sent events.
60
+ (default: :obj:`False`)
61
+
62
+ """
63
+
64
+ max_tokens: int = 256
65
+ stop_sequences: list[str] | NotGiven = NOT_GIVEN
66
+ temperature: float = 1
67
+ top_p: float | NotGiven = NOT_GIVEN
68
+ top_k: int | NotGiven = NOT_GIVEN
69
+ metadata: NotGiven = NOT_GIVEN
70
+ stream: bool = False
71
+
72
+
73
+ ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
@@ -0,0 +1,22 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from abc import ABC
17
+ from dataclasses import dataclass
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class BaseConfig(ABC): # noqa: B024
22
+ pass
@@ -11,16 +11,17 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from abc import ABC
14
+ from __future__ import annotations
15
+
15
16
  from dataclasses import asdict, dataclass, field
16
- from typing import Any, Dict, List, Optional, Sequence, Union
17
+ from typing import TYPE_CHECKING, Optional, Sequence
17
18
 
18
- from camel.functions import OpenAIFunction
19
+ from openai._types import NOT_GIVEN, NotGiven
19
20
 
21
+ from camel.configs.base_config import BaseConfig
20
22
 
21
- @dataclass(frozen=True)
22
- class BaseConfig(ABC):
23
- pass
23
+ if TYPE_CHECKING:
24
+ from camel.functions import OpenAIFunction
24
25
 
25
26
 
26
27
  @dataclass(frozen=True)
@@ -71,67 +72,45 @@ class ChatGPTConfig(BaseConfig):
71
72
  user (str, optional): A unique identifier representing your end-user,
72
73
  which can help OpenAI to monitor and detect abuse.
73
74
  (default: :obj:`""`)
75
+ tools (list[OpenAIFunction], optional): A list of tools the model may
76
+ call. Currently, only functions are supported as a tool. Use this
77
+ to provide a list of functions the model may generate JSON inputs
78
+ for. A max of 128 functions are supported.
79
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
80
+ any) tool is called by the model. :obj:`"none"` means the model
81
+ will not call any tool and instead generates a message.
82
+ :obj:`"auto"` means the model can pick between generating a
83
+ message or calling one or more tools. :obj:`"required"` means the
84
+ model must call one or more tools. Specifying a particular tool
85
+ via {"type": "function", "function": {"name": "my_function"}}
86
+ forces the model to call that tool. :obj:`"none"` is the default
87
+ when no tools are present. :obj:`"auto"` is the default if tools
88
+ are present.
74
89
  """
90
+
75
91
  temperature: float = 0.2 # openai default: 1.0
76
92
  top_p: float = 1.0
77
93
  n: int = 1
78
94
  stream: bool = False
79
- stop: Optional[Union[str, Sequence[str]]] = None
80
- max_tokens: Optional[int] = None
95
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
96
+ max_tokens: int | NotGiven = NOT_GIVEN
81
97
  presence_penalty: float = 0.0
82
98
  frequency_penalty: float = 0.0
83
- logit_bias: Dict = field(default_factory=dict)
99
+ logit_bias: dict = field(default_factory=dict)
84
100
  user: str = ""
101
+ tools: Optional[list[OpenAIFunction]] = None
102
+ tool_choice: Optional[dict[str, str] | str] = None
85
103
 
104
+ def __post_init__(self):
105
+ if self.tools is not None:
106
+ object.__setattr__(
107
+ self,
108
+ 'tools',
109
+ [tool.get_openai_tool_schema() for tool in self.tools],
110
+ )
86
111
 
87
- @dataclass(frozen=True)
88
- class FunctionCallingConfig(ChatGPTConfig):
89
- r"""Defines the parameters for generating chat completions using the
90
- OpenAI API with functions included.
91
-
92
- Args:
93
- functions (List[Dict[str, Any]]): A list of functions the model may
94
- generate JSON inputs for.
95
- function_call (Union[Dict[str, str], str], optional): Controls how the
96
- model responds to function calls. :obj:`"none"` means the model
97
- does not call a function, and responds to the end-user.
98
- :obj:`"auto"` means the model can pick between an end-user or
99
- calling a function. Specifying a particular function via
100
- :obj:`{"name": "my_function"}` forces the model to call that
101
- function. (default: :obj:`"auto"`)
102
- """
103
- functions: List[Dict[str, Any]] = field(default_factory=list)
104
- function_call: Union[Dict[str, str], str] = "auto"
105
112
 
106
- @classmethod
107
- def from_openai_function_list(
108
- cls,
109
- function_list: List[OpenAIFunction],
110
- function_call: Union[Dict[str, str], str] = "auto",
111
- kwargs: Optional[Dict[str, Any]] = None,
112
- ):
113
- r"""Class method for creating an instance given the function-related
114
- arguments.
115
-
116
- Args:
117
- function_list (List[OpenAIFunction]): The list of function objects
118
- to be loaded into this configuration and passed to the model.
119
- function_call (Union[Dict[str, str], str], optional): Controls how
120
- the model responds to function calls, as specified in the
121
- creator's documentation.
122
- kwargs (Optional[Dict[str, Any]]): The extra modifications to be
123
- made on the original settings defined in :obj:`ChatGPTConfig`.
124
-
125
- Return:
126
- FunctionCallingConfig: A new instance which loads the given
127
- function list into a list of dictionaries and the input
128
- :obj:`function_call` argument.
129
- """
130
- return cls(
131
- functions=[func.as_dict() for func in function_list],
132
- function_call=function_call,
133
- **(kwargs or {}),
134
- )
113
+ OPENAI_API_PARAMS = {param for param in asdict(ChatGPTConfig()).keys()}
135
114
 
136
115
 
137
116
  @dataclass(frozen=True)
@@ -147,13 +126,7 @@ class OpenSourceConfig(BaseConfig):
147
126
  api_params (ChatGPTConfig): An instance of :obj:ChatGPTConfig to
148
127
  contain the arguments to be passed to OpenAI API.
149
128
  """
129
+
150
130
  model_path: str
151
131
  server_url: str
152
- api_params: ChatGPTConfig = ChatGPTConfig()
153
-
154
-
155
- OPENAI_API_PARAMS = {param for param in asdict(ChatGPTConfig()).keys()}
156
- OPENAI_API_PARAMS_WITH_FUNCTIONS = {
157
- param
158
- for param in asdict(FunctionCallingConfig()).keys()
159
- }
132
+ api_params: ChatGPTConfig = field(default_factory=ChatGPTConfig)
@@ -13,8 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .base import BaseEmbedding
15
15
  from .openai_embedding import OpenAIEmbedding
16
+ from .sentence_transformers_embeddings import SentenceTransformerEncoder
16
17
 
17
18
  __all__ = [
18
19
  "BaseEmbedding",
19
20
  "OpenAIEmbedding",
21
+ "SentenceTransformerEncoder",
20
22
  ]
camel/embeddings/base.py CHANGED
@@ -33,8 +33,9 @@ class BaseEmbedding(ABC, Generic[T]):
33
33
  **kwargs (Any): Extra kwargs passed to the embedding API.
34
34
 
35
35
  Returns:
36
- List[List[float]]: A list that represents the generated embedding
37
- as a list of floating-point numbers.
36
+ List[List[float]]: A list that represents the
37
+ generated embedding as a list of floating-point numbers or a
38
+ numpy matrix with embeddings.
38
39
  """
39
40
  pass
40
41
 
@@ -11,13 +11,14 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, List
14
+ import os
15
+ from typing import Any, List, Optional
15
16
 
16
17
  from openai import OpenAI
17
18
 
18
- from camel.embeddings import BaseEmbedding
19
+ from camel.embeddings.base import BaseEmbedding
19
20
  from camel.types import EmbeddingModelType
20
- from camel.utils import openai_api_key_required
21
+ from camel.utils import api_key_required
21
22
 
22
23
 
23
24
  class OpenAIEmbedding(BaseEmbedding[str]):
@@ -26,6 +27,8 @@ class OpenAIEmbedding(BaseEmbedding[str]):
26
27
  Args:
27
28
  model (OpenAiEmbeddingModel, optional): The model type to be used for
28
29
  generating embeddings. (default: :obj:`ModelType.ADA_2`)
30
+ api_key (Optional[str]): The API key for authenticating with the
31
+ OpenAI service. (default: :obj:`None`)
29
32
 
30
33
  Raises:
31
34
  RuntimeError: If an unsupported model type is specified.
@@ -34,14 +37,16 @@ class OpenAIEmbedding(BaseEmbedding[str]):
34
37
  def __init__(
35
38
  self,
36
39
  model_type: EmbeddingModelType = EmbeddingModelType.ADA_2,
40
+ api_key: Optional[str] = None,
37
41
  ) -> None:
38
42
  if not model_type.is_openai:
39
43
  raise ValueError("Invalid OpenAI embedding model type.")
40
44
  self.model_type = model_type
41
45
  self.output_dim = model_type.output_dim
42
- self.client = OpenAI()
46
+ self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
47
+ self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
43
48
 
44
- @openai_api_key_required
49
+ @api_key_required
45
50
  def embed_list(
46
51
  self,
47
52
  objs: List[str],