camel-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (99) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +5 -5
  3. camel/agents/chat_agent.py +124 -63
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/role_assignment_agent.py +27 -17
  8. camel/agents/task_agent.py +60 -34
  9. camel/agents/tool_agents/base.py +0 -1
  10. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  11. camel/configs.py +119 -7
  12. camel/embeddings/__init__.py +2 -0
  13. camel/embeddings/base.py +3 -2
  14. camel/embeddings/openai_embedding.py +3 -3
  15. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  16. camel/functions/__init__.py +13 -3
  17. camel/functions/google_maps_function.py +335 -0
  18. camel/functions/math_functions.py +7 -7
  19. camel/functions/openai_function.py +344 -42
  20. camel/functions/search_functions.py +100 -35
  21. camel/functions/twitter_function.py +484 -0
  22. camel/functions/weather_functions.py +36 -23
  23. camel/generators.py +65 -46
  24. camel/human.py +17 -11
  25. camel/interpreters/__init__.py +25 -0
  26. camel/interpreters/base.py +49 -0
  27. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  28. camel/interpreters/interpreter_error.py +19 -0
  29. camel/interpreters/subprocess_interpreter.py +190 -0
  30. camel/loaders/__init__.py +22 -0
  31. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  32. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  33. camel/memories/__init__.py +17 -7
  34. camel/memories/agent_memories.py +156 -0
  35. camel/memories/base.py +97 -32
  36. camel/memories/blocks/__init__.py +21 -0
  37. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  38. camel/memories/blocks/vectordb_block.py +101 -0
  39. camel/memories/context_creators/__init__.py +3 -2
  40. camel/memories/context_creators/score_based.py +32 -20
  41. camel/memories/records.py +6 -5
  42. camel/messages/__init__.py +2 -2
  43. camel/messages/base.py +99 -16
  44. camel/messages/func_message.py +7 -4
  45. camel/models/__init__.py +4 -2
  46. camel/models/anthropic_model.py +132 -0
  47. camel/models/base_model.py +3 -2
  48. camel/models/model_factory.py +10 -8
  49. camel/models/open_source_model.py +25 -13
  50. camel/models/openai_model.py +9 -10
  51. camel/models/stub_model.py +6 -5
  52. camel/prompts/__init__.py +7 -5
  53. camel/prompts/ai_society.py +21 -14
  54. camel/prompts/base.py +54 -47
  55. camel/prompts/code.py +22 -14
  56. camel/prompts/evaluation.py +8 -5
  57. camel/prompts/misalignment.py +26 -19
  58. camel/prompts/object_recognition.py +35 -0
  59. camel/prompts/prompt_templates.py +14 -8
  60. camel/prompts/role_description_prompt_template.py +16 -10
  61. camel/prompts/solution_extraction.py +9 -5
  62. camel/prompts/task_prompt_template.py +24 -21
  63. camel/prompts/translation.py +9 -5
  64. camel/responses/agent_responses.py +5 -2
  65. camel/retrievers/__init__.py +24 -0
  66. camel/retrievers/auto_retriever.py +319 -0
  67. camel/retrievers/base.py +64 -0
  68. camel/retrievers/bm25_retriever.py +149 -0
  69. camel/retrievers/vector_retriever.py +166 -0
  70. camel/societies/__init__.py +1 -1
  71. camel/societies/babyagi_playing.py +56 -32
  72. camel/societies/role_playing.py +188 -133
  73. camel/storages/__init__.py +18 -0
  74. camel/storages/graph_storages/__init__.py +23 -0
  75. camel/storages/graph_storages/base.py +82 -0
  76. camel/storages/graph_storages/graph_element.py +74 -0
  77. camel/storages/graph_storages/neo4j_graph.py +582 -0
  78. camel/storages/key_value_storages/base.py +1 -2
  79. camel/storages/key_value_storages/in_memory.py +1 -2
  80. camel/storages/key_value_storages/json.py +8 -13
  81. camel/storages/vectordb_storages/__init__.py +33 -0
  82. camel/storages/vectordb_storages/base.py +202 -0
  83. camel/storages/vectordb_storages/milvus.py +396 -0
  84. camel/storages/vectordb_storages/qdrant.py +371 -0
  85. camel/terminators/__init__.py +1 -1
  86. camel/terminators/base.py +2 -3
  87. camel/terminators/response_terminator.py +21 -12
  88. camel/terminators/token_limit_terminator.py +5 -3
  89. camel/types/__init__.py +12 -6
  90. camel/types/enums.py +86 -13
  91. camel/types/openai_types.py +10 -5
  92. camel/utils/__init__.py +18 -13
  93. camel/utils/commons.py +242 -81
  94. camel/utils/token_counting.py +135 -15
  95. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/METADATA +116 -74
  96. camel_ai-0.1.3.dist-info/RECORD +101 -0
  97. {camel_ai-0.1.1.dist-info → camel_ai-0.1.3.dist-info}/WHEEL +1 -1
  98. camel/memories/context_creators/base.py +0 -72
  99. camel_ai-0.1.1.dist-info/RECORD +0 -75
@@ -13,7 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import Any, Dict, List, Optional, Union
15
15
 
16
- from camel.agents import ChatAgent
16
+ from camel.agents.chat_agent import ChatAgent
17
17
  from camel.configs import ChatGPTConfig
18
18
  from camel.messages import BaseMessage
19
19
  from camel.prompts import PromptTemplateGenerator, TextPrompt
@@ -43,6 +43,7 @@ class TaskSpecifyAgent(ChatAgent):
43
43
  output_language (str, optional): The language to be output by the
44
44
  agent. (default: :obj:`None`)
45
45
  """
46
+
46
47
  DEFAULT_WORD_LIMIT = 50
47
48
 
48
49
  def __init__(
@@ -54,14 +55,15 @@ class TaskSpecifyAgent(ChatAgent):
54
55
  word_limit: int = DEFAULT_WORD_LIMIT,
55
56
  output_language: Optional[str] = None,
56
57
  ) -> None:
57
-
58
58
  self.task_specify_prompt: Union[str, TextPrompt]
59
59
  if task_specify_prompt is None:
60
- task_specify_prompt_template = PromptTemplateGenerator(
61
- ).get_task_specify_prompt(task_type)
60
+ task_specify_prompt_template = (
61
+ PromptTemplateGenerator().get_task_specify_prompt(task_type)
62
+ )
62
63
 
63
64
  self.task_specify_prompt = task_specify_prompt_template.format(
64
- word_limit=word_limit)
65
+ word_limit=word_limit
66
+ )
65
67
  else:
66
68
  self.task_specify_prompt = TextPrompt(task_specify_prompt)
67
69
 
@@ -74,9 +76,12 @@ class TaskSpecifyAgent(ChatAgent):
74
76
  content="You can make a task more specific.",
75
77
  )
76
78
 
77
- super().__init__(system_message, model_type=model_type,
78
- model_config=model_config,
79
- output_language=output_language)
79
+ super().__init__(
80
+ system_message,
81
+ model_type=model_type,
82
+ model_config=model_config,
83
+ output_language=output_language,
84
+ )
80
85
 
81
86
  def run(
82
87
  self,
@@ -101,8 +106,9 @@ class TaskSpecifyAgent(ChatAgent):
101
106
  if meta_dict is not None:
102
107
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
108
 
104
- task_msg = BaseMessage.make_user_message(role_name="Task Specifier",
105
- content=task_specify_prompt)
109
+ task_msg = BaseMessage.make_user_message(
110
+ role_name="Task Specifier", content=task_specify_prompt
111
+ )
106
112
  specifier_response = self.step(task_msg)
107
113
 
108
114
  if specifier_response.terminated:
@@ -138,9 +144,9 @@ class TaskPlannerAgent(ChatAgent):
138
144
  model_config: Optional[Any] = None,
139
145
  output_language: Optional[str] = None,
140
146
  ) -> None:
141
-
142
147
  self.task_planner_prompt = TextPrompt(
143
- "Divide this task into subtasks: {task}. Be concise.")
148
+ "Divide this task into subtasks: {task}. Be concise."
149
+ )
144
150
  system_message = BaseMessage(
145
151
  role_name="Task Planner",
146
152
  role_type=RoleType.ASSISTANT,
@@ -148,8 +154,12 @@ class TaskPlannerAgent(ChatAgent):
148
154
  content="You are a helpful task planner.",
149
155
  )
150
156
 
151
- super().__init__(system_message, model_type, model_config,
152
- output_language=output_language)
157
+ super().__init__(
158
+ system_message,
159
+ model_type,
160
+ model_config,
161
+ output_language=output_language,
162
+ )
153
163
 
154
164
  def run(
155
165
  self,
@@ -168,8 +178,9 @@ class TaskPlannerAgent(ChatAgent):
168
178
  self.reset()
169
179
  task_planner_prompt = self.task_planner_prompt.format(task=task_prompt)
170
180
 
171
- task_msg = BaseMessage.make_user_message(role_name="Task Planner",
172
- content=task_planner_prompt)
181
+ task_msg = BaseMessage.make_user_message(
182
+ role_name="Task Planner", content=task_planner_prompt
183
+ )
173
184
 
174
185
  task_response = self.step(task_msg)
175
186
 
@@ -220,7 +231,6 @@ class TaskCreationAgent(ChatAgent):
220
231
  message_window_size: Optional[int] = None,
221
232
  max_task_num: Optional[int] = 3,
222
233
  ) -> None:
223
-
224
234
  task_creation_prompt = TextPrompt(
225
235
  """Create new a task with the following objective: {objective}.
226
236
  Never forget you are a Task Creator of {role_name}.
@@ -239,11 +249,12 @@ You should make task plan and not ask me questions.
239
249
  If you think no new tasks are needed right now, write "No tasks to add."
240
250
  Now start to give me new tasks one by one. No more than three tasks.
241
251
  Be concrete.
242
- """)
252
+ """
253
+ )
243
254
 
244
255
  self.task_creation_prompt = task_creation_prompt.format(
245
- objective=objective, role_name=role_name,
246
- max_task_num=max_task_num)
256
+ objective=objective, role_name=role_name, max_task_num=max_task_num
257
+ )
247
258
  self.objective = objective
248
259
 
249
260
  system_message = BaseMessage(
@@ -253,9 +264,13 @@ Be concrete.
253
264
  content="You are a helpful task creator.",
254
265
  )
255
266
 
256
- super().__init__(system_message, model_type, model_config,
257
- output_language=output_language,
258
- message_window_size=message_window_size)
267
+ super().__init__(
268
+ system_message,
269
+ model_type,
270
+ model_config,
271
+ output_language=output_language,
272
+ message_window_size=message_window_size,
273
+ )
259
274
 
260
275
  def run(
261
276
  self,
@@ -273,13 +288,16 @@ Be concrete.
273
288
 
274
289
  if len(task_list) > 0:
275
290
  task_creation_prompt = self.task_creation_prompt.format(
276
- task_list=task_list)
291
+ task_list=task_list
292
+ )
277
293
  else:
278
294
  task_creation_prompt = self.task_creation_prompt.format(
279
- task_list="")
295
+ task_list=""
296
+ )
280
297
 
281
- task_msg = BaseMessage.make_user_message(role_name="Task Creator",
282
- content=task_creation_prompt)
298
+ task_msg = BaseMessage.make_user_message(
299
+ role_name="Task Creator", content=task_creation_prompt
300
+ )
283
301
  task_response = self.step(task_msg)
284
302
 
285
303
  if task_response.terminated:
@@ -337,10 +355,12 @@ The result must be a numbered list in the format:
337
355
  The entries must be consecutively numbered, starting with 1.
338
356
  The number of each entry must be followed by a period.
339
357
  Do not include any headers before your ranked list or follow your list \
340
- with any other output.""")
358
+ with any other output."""
359
+ )
341
360
 
342
361
  self.task_prioritization_prompt = task_prioritization_prompt.format(
343
- objective=objective)
362
+ objective=objective
363
+ )
344
364
  self.objective = objective
345
365
 
346
366
  system_message = BaseMessage(
@@ -350,9 +370,13 @@ with any other output.""")
350
370
  content="You are a helpful task prioritizer.",
351
371
  )
352
372
 
353
- super().__init__(system_message, model_type, model_config,
354
- output_language=output_language,
355
- message_window_size=message_window_size)
373
+ super().__init__(
374
+ system_message,
375
+ model_type,
376
+ model_config,
377
+ output_language=output_language,
378
+ message_window_size=message_window_size,
379
+ )
356
380
 
357
381
  def run(
358
382
  self,
@@ -366,10 +390,12 @@ with any other output.""")
366
390
  List[str]: The new prioritized task list generated by the Agent.
367
391
  """
368
392
  task_prioritization_prompt = self.task_prioritization_prompt.format(
369
- task_list=task_list)
393
+ task_list=task_list
394
+ )
370
395
 
371
396
  task_msg = BaseMessage.make_user_message(
372
- role_name="Task Prioritizer", content=task_prioritization_prompt)
397
+ role_name="Task Prioritizer", content=task_prioritization_prompt
398
+ )
373
399
 
374
400
  task_response = self.step(task_msg)
375
401
 
@@ -24,7 +24,6 @@ class BaseToolAgent(BaseAgent):
24
24
  """
25
25
 
26
26
  def __init__(self, name: str, description: str) -> None:
27
-
28
27
  self.name = name
29
28
  self.description = description
30
29
 
@@ -13,7 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import Any, Optional
15
15
 
16
- from camel.agents.tool_agents import BaseToolAgent
16
+ from camel.agents.tool_agents.base import BaseToolAgent
17
17
 
18
18
 
19
19
  # flake8: noqa :E501
@@ -44,10 +44,13 @@ class HuggingFaceToolAgent(BaseToolAgent):
44
44
  # TODO: Support other tool agents
45
45
  import transformers
46
46
  from packaging import version
47
- if version.parse(
48
- transformers.__version__) < version.parse("4.31.0"):
47
+
48
+ if version.parse(transformers.__version__) < version.parse(
49
+ "4.31.0"
50
+ ):
49
51
  raise ValueError(
50
- "The version of \"transformers\" package should >= 4.31.0")
52
+ "The version of \"transformers\" package should >= 4.31.0"
53
+ )
51
54
 
52
55
  from transformers.tools import OpenAiAgent
53
56
  from transformers.tools.agent_types import AgentImage
camel/configs.py CHANGED
@@ -11,15 +11,20 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
14
16
  from abc import ABC
15
17
  from dataclasses import asdict, dataclass, field
16
- from typing import Any, Dict, List, Optional, Sequence, Union
18
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
19
+
20
+ from anthropic._types import NOT_GIVEN, NotGiven
17
21
 
18
- from camel.functions import OpenAIFunction
22
+ if TYPE_CHECKING:
23
+ from camel.functions import OpenAIFunction
19
24
 
20
25
 
21
26
  @dataclass(frozen=True)
22
- class BaseConfig(ABC):
27
+ class BaseConfig(ABC): # noqa: B024
23
28
  pass
24
29
 
25
30
 
@@ -72,6 +77,7 @@ class ChatGPTConfig(BaseConfig):
72
77
  which can help OpenAI to monitor and detect abuse.
73
78
  (default: :obj:`""`)
74
79
  """
80
+
75
81
  temperature: float = 0.2 # openai default: 1.0
76
82
  top_p: float = 1.0
77
83
  n: int = 1
@@ -84,6 +90,56 @@ class ChatGPTConfig(BaseConfig):
84
90
  user: str = ""
85
91
 
86
92
 
93
+ @dataclass(frozen=True)
94
+ class ChatGPTVisionConfig(BaseConfig):
95
+ r"""Defines the parameters for generating chat completions with
96
+ vision model using the OpenAI API. The vision config here is the
97
+ subset of the :class:`ChatGPTConfig`.
98
+
99
+ Args:
100
+ temperature (float, optional): Sampling temperature to use, between
101
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
102
+ while lower values make it more focused and deterministic.
103
+ (default: :obj:`0.2`)
104
+ top_p (float, optional): An alternative to sampling with temperature,
105
+ called nucleus sampling, where the model considers the results of
106
+ the tokens with top_p probability mass. So :obj:`0.1` means only
107
+ the tokens comprising the top 10% probability mass are considered.
108
+ (default: :obj:`1.0`)
109
+ n (int, optional): How many chat completion choices to generate for
110
+ each input message. (default: :obj:`1`)
111
+ stream (bool, optional): If True, partial message deltas will be sent
112
+ as data-only server-sent events as they become available.
113
+ (default: :obj:`False`)
114
+ max_tokens (int, optional): The maximum number of tokens to generate
115
+ in the chat completion. The total length of input tokens and
116
+ generated tokens is limited by the model's context length.
117
+ (default: :obj:`4096`)
118
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
119
+ :obj:`2.0`. Positive values penalize new tokens based on whether
120
+ they appear in the text so far, increasing the model's likelihood
121
+ to talk about new topics. See more information about frequency and
122
+ presence penalties. (default: :obj:`0.0`)
123
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
124
+ :obj:`2.0`. Positive values penalize new tokens based on their
125
+ existing frequency in the text so far, decreasing the model's
126
+ likelihood to repeat the same line verbatim. See more information
127
+ about frequency and presence penalties. (default: :obj:`0.0`)
128
+ user (str, optional): A unique identifier representing your end-user,
129
+ which can help OpenAI to monitor and detect abuse.
130
+ (default: :obj:`""`)
131
+ """
132
+
133
+ temperature: float = 0.2 # openai default: 1.0
134
+ top_p: float = 1.0
135
+ n: int = 1
136
+ stream: bool = False
137
+ max_tokens: int = 4096
138
+ presence_penalty: float = 0.0
139
+ frequency_penalty: float = 0.0
140
+ user: str = ""
141
+
142
+
87
143
  @dataclass(frozen=True)
88
144
  class FunctionCallingConfig(ChatGPTConfig):
89
145
  r"""Defines the parameters for generating chat completions using the
@@ -100,6 +156,7 @@ class FunctionCallingConfig(ChatGPTConfig):
100
156
  :obj:`{"name": "my_function"}` forces the model to call that
101
157
  function. (default: :obj:`"auto"`)
102
158
  """
159
+
103
160
  functions: List[Dict[str, Any]] = field(default_factory=list)
104
161
  function_call: Union[Dict[str, str], str] = "auto"
105
162
 
@@ -128,7 +185,9 @@ class FunctionCallingConfig(ChatGPTConfig):
128
185
  :obj:`function_call` argument.
129
186
  """
130
187
  return cls(
131
- functions=[func.as_dict() for func in function_list],
188
+ functions=[
189
+ func.get_openai_function_schema() for func in function_list
190
+ ],
132
191
  function_call=function_call,
133
192
  **(kwargs or {}),
134
193
  )
@@ -147,13 +206,66 @@ class OpenSourceConfig(BaseConfig):
147
206
  api_params (ChatGPTConfig): An instance of :obj:ChatGPTConfig to
148
207
  contain the arguments to be passed to OpenAI API.
149
208
  """
209
+
150
210
  model_path: str
151
211
  server_url: str
152
- api_params: ChatGPTConfig = ChatGPTConfig()
212
+ api_params: ChatGPTConfig = field(default_factory=ChatGPTConfig)
153
213
 
154
214
 
155
215
  OPENAI_API_PARAMS = {param for param in asdict(ChatGPTConfig()).keys()}
156
216
  OPENAI_API_PARAMS_WITH_FUNCTIONS = {
157
- param
158
- for param in asdict(FunctionCallingConfig()).keys()
217
+ param for param in asdict(FunctionCallingConfig()).keys()
159
218
  }
219
+
220
+
221
+ @dataclass(frozen=True)
222
+ class AnthropicConfig(BaseConfig):
223
+ r"""Defines the parameters for generating chat completions using the
224
+ Anthropic API.
225
+
226
+ See: https://docs.anthropic.com/claude/reference/complete_post
227
+ Args:
228
+ max_tokens_to_sample (int, optional): The maximum number of tokens to
229
+ generate before stopping. Note that Anthropic models may stop
230
+ before reaching this maximum. This parameter only specifies the
231
+ absolute maximum number of tokens to generate.
232
+ (default: :obj:`256`)
233
+ stop_sequences (List[str], optional): Sequences that will cause the
234
+ model to stop generating completion text. Anthropic models stop
235
+ on "\n\nHuman:", and may include additional built-in stop sequences
236
+ in the future. By providing the stop_sequences parameter, you may
237
+ include additional strings that will cause the model to stop
238
+ generating.
239
+ temperature (float, optional): Amount of randomness injected into the
240
+ response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
241
+ for analytical / multiple choice, and closer to 1 for creative
242
+ and generative tasks.
243
+ (default: :obj:`1`)
244
+ top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
245
+ compute the cumulative distribution over all the options for each
246
+ subsequent token in decreasing probability order and cut it off
247
+ once it reaches a particular probability specified by `top_p`.
248
+ You should either alter `temperature` or `top_p`,
249
+ but not both.
250
+ (default: :obj:`0.7`)
251
+ top_k (int, optional): Only sample from the top K options for each
252
+ subsequent token. Used to remove "long tail" low probability
253
+ responses.
254
+ (default: :obj:`5`)
255
+ metadata: An object describing metadata about the request.
256
+ stream (bool, optional): Whether to incrementally stream the response
257
+ using server-sent events.
258
+ (default: :obj:`False`)
259
+
260
+ """
261
+
262
+ max_tokens: int = 256
263
+ stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
264
+ temperature: float = 1
265
+ top_p: Union[float, NotGiven] = NOT_GIVEN
266
+ top_k: Union[int, NotGiven] = NOT_GIVEN
267
+ metadata: NotGiven = NOT_GIVEN
268
+ stream: bool = False
269
+
270
+
271
+ ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}
@@ -13,8 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .base import BaseEmbedding
15
15
  from .openai_embedding import OpenAIEmbedding
16
+ from .sentence_transformers_embeddings import SentenceTransformerEncoder
16
17
 
17
18
  __all__ = [
18
19
  "BaseEmbedding",
19
20
  "OpenAIEmbedding",
21
+ "SentenceTransformerEncoder",
20
22
  ]
camel/embeddings/base.py CHANGED
@@ -33,8 +33,9 @@ class BaseEmbedding(ABC, Generic[T]):
33
33
  **kwargs (Any): Extra kwargs passed to the embedding API.
34
34
 
35
35
  Returns:
36
- List[List[float]]: A list that represents the generated embedding
37
- as a list of floating-point numbers.
36
+ List[List[float]]: A list that represents the
37
+ generated embedding as a list of floating-point numbers or a
38
+ numpy matrix with embeddings.
38
39
  """
39
40
  pass
40
41
 
@@ -15,9 +15,9 @@ from typing import Any, List
15
15
 
16
16
  from openai import OpenAI
17
17
 
18
- from camel.embeddings import BaseEmbedding
18
+ from camel.embeddings.base import BaseEmbedding
19
19
  from camel.types import EmbeddingModelType
20
- from camel.utils import openai_api_key_required
20
+ from camel.utils import api_key_required
21
21
 
22
22
 
23
23
  class OpenAIEmbedding(BaseEmbedding[str]):
@@ -41,7 +41,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
41
41
  self.output_dim = model_type.output_dim
42
42
  self.client = OpenAI()
43
43
 
44
- @openai_api_key_required
44
+ @api_key_required
45
45
  def embed_list(
46
46
  self,
47
47
  objs: List[str],
@@ -0,0 +1,65 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any, List, Union
15
+
16
+ from camel.embeddings.base import BaseEmbedding
17
+
18
+
19
+ class SentenceTransformerEncoder(BaseEmbedding[str]):
20
+ r"""This class provides functionalities to generate embeddings
21
+ using a specified model from `Sentence Transformers`.
22
+
23
+ References:
24
+ https://www.sbert.net/
25
+ """
26
+
27
+ def __init__(self, model_name: str = 'intfloat/e5-large-v2'):
28
+ r"""Initializes the: obj: `SentenceTransformerEmbedding` class
29
+ with the specified transformer model.
30
+
31
+ Args:
32
+ model_name (str, optional): The name of the model to use.
33
+ Defaults to `intfloat/e5-large-v2`.
34
+ """
35
+ from sentence_transformers import SentenceTransformer
36
+
37
+ self.model = SentenceTransformer(model_name)
38
+
39
+ def embed_list(
40
+ self,
41
+ objs: Union[str, List[str]],
42
+ **kwargs: Any,
43
+ ) -> list:
44
+ r"""Generates embeddings for the given texts using the model.
45
+
46
+ Args:
47
+ objs (str | List[str]): The texts for which to generate the
48
+ embeddings.
49
+
50
+ Returns:
51
+ list: A list of float representing embeddings.
52
+ """
53
+ if not objs:
54
+ raise ValueError("Input text list is empty")
55
+ return self.model.encode(
56
+ objs, normalize_embeddings=True, **kwargs
57
+ ).tolist()
58
+
59
+ def get_output_dim(self) -> int:
60
+ r"""Returns the output dimension of the embeddings.
61
+
62
+ Returns:
63
+ int: The dimensionality of the embeddings.
64
+ """
65
+ return self.model.get_sentence_embedding_dimension()
@@ -12,16 +12,26 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
+ from ..loaders.unstructured_io import UnstructuredIO
16
+ from .google_maps_function import MAP_FUNCS
15
17
  from .math_functions import MATH_FUNCS
16
- from .openai_function import OpenAIFunction
18
+ from .openai_function import (
19
+ OpenAIFunction,
20
+ get_openai_function_schema,
21
+ get_openai_tool_schema,
22
+ )
17
23
  from .search_functions import SEARCH_FUNCS
24
+ from .twitter_function import TWITTER_FUNCS
18
25
  from .weather_functions import WEATHER_FUNCS
19
- from .unstructured_io_fuctions import UnstructuredModules
20
26
 
21
27
  __all__ = [
22
28
  'OpenAIFunction',
29
+ 'get_openai_tool_schema',
30
+ 'get_openai_function_schema',
23
31
  'MATH_FUNCS',
24
32
  'SEARCH_FUNCS',
25
33
  'WEATHER_FUNCS',
26
- 'UnstructuredModules',
34
+ 'MAP_FUNCS',
35
+ 'TWITTER_FUNCS',
36
+ 'UnstructuredIO',
27
37
  ]