camel-ai 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +107 -22
  3. camel/configs/__init__.py +6 -0
  4. camel/configs/base_config.py +21 -0
  5. camel/configs/gemini_config.py +17 -9
  6. camel/configs/qwen_config.py +91 -0
  7. camel/configs/yi_config.py +58 -0
  8. camel/generators.py +93 -0
  9. camel/interpreters/docker_interpreter.py +5 -0
  10. camel/interpreters/ipython_interpreter.py +2 -1
  11. camel/loaders/__init__.py +2 -0
  12. camel/loaders/apify_reader.py +223 -0
  13. camel/memories/agent_memories.py +24 -1
  14. camel/messages/base.py +38 -0
  15. camel/models/__init__.py +4 -0
  16. camel/models/model_factory.py +6 -0
  17. camel/models/qwen_model.py +139 -0
  18. camel/models/yi_model.py +138 -0
  19. camel/prompts/image_craft.py +8 -0
  20. camel/prompts/video_description_prompt.py +8 -0
  21. camel/retrievers/vector_retriever.py +5 -1
  22. camel/societies/role_playing.py +29 -18
  23. camel/societies/workforce/base.py +7 -1
  24. camel/societies/workforce/task_channel.py +10 -0
  25. camel/societies/workforce/utils.py +6 -0
  26. camel/societies/workforce/worker.py +2 -0
  27. camel/storages/vectordb_storages/qdrant.py +147 -24
  28. camel/tasks/task.py +15 -0
  29. camel/terminators/base.py +4 -0
  30. camel/terminators/response_terminator.py +1 -0
  31. camel/terminators/token_limit_terminator.py +1 -0
  32. camel/toolkits/__init__.py +4 -1
  33. camel/toolkits/base.py +9 -0
  34. camel/toolkits/data_commons_toolkit.py +360 -0
  35. camel/toolkits/function_tool.py +174 -7
  36. camel/toolkits/github_toolkit.py +175 -176
  37. camel/toolkits/google_scholar_toolkit.py +36 -7
  38. camel/toolkits/notion_toolkit.py +279 -0
  39. camel/toolkits/search_toolkit.py +164 -36
  40. camel/types/enums.py +88 -0
  41. camel/types/unified_model_type.py +10 -0
  42. camel/utils/commons.py +2 -1
  43. camel/utils/constants.py +2 -0
  44. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/METADATA +129 -79
  45. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/RECORD +47 -40
  46. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/LICENSE +0 -0
  47. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.2.6'
15
+ __version__ = '0.2.7'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -106,6 +106,11 @@ class FunctionCallingRecord(BaseModel):
106
106
  )
107
107
 
108
108
  def as_dict(self) -> dict[str, Any]:
109
+ r"""Returns the function calling record as a dictionary.
110
+
111
+ Returns:
112
+ dict[str, Any]: The function calling record as a dictionary.
113
+ """
109
114
  return self.model_dump()
110
115
 
111
116
 
@@ -175,9 +180,6 @@ class ChatAgent(BaseAgent):
175
180
  model_type=ModelType.DEFAULT,
176
181
  )
177
182
  )
178
- self.output_language: Optional[str] = output_language
179
- if self.output_language is not None:
180
- self.set_output_language(self.output_language)
181
183
 
182
184
  self.model_type = self.model_backend.model_type
183
185
 
@@ -213,6 +215,10 @@ class ChatAgent(BaseAgent):
213
215
  context_creator, window_size=message_window_size
214
216
  )
215
217
 
218
+ self.output_language: Optional[str] = output_language
219
+ if self.output_language is not None:
220
+ self.set_output_language(self.output_language)
221
+
216
222
  self.terminated: bool = False
217
223
  self.response_terminators = response_terminators or []
218
224
  self.init_messages()
@@ -221,6 +227,15 @@ class ChatAgent(BaseAgent):
221
227
 
222
228
  # ruff: noqa: E501
223
229
  def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
230
+ r"""Generates a tool prompt based on the provided tool schema list.
231
+
232
+ Args:
233
+ tool_schema_list (List[Dict]): A list of dictionaries, each
234
+ containing a tool schema.
235
+
236
+ Returns:
237
+ str: A string representing the tool prompt.
238
+ """
224
239
  tool_prompts = []
225
240
 
226
241
  for tool in tool_schema_list:
@@ -241,21 +256,36 @@ class ChatAgent(BaseAgent):
241
256
 
242
257
  {tool_prompt_str}
243
258
 
244
- If you choose to call a function ONLY reply in the following format with no prefix or suffix:
259
+ If you choose to call a function ONLY reply in the following format with no
260
+ prefix or suffix:
245
261
 
246
- <function=example_function_name>{{"example_name": "example_value"}}</function>
262
+ <function=example_function_name>{{"example_name": "example_value"}}
263
+ </function>
247
264
 
248
265
  Reminder:
249
- - Function calls MUST follow the specified format, start with <function= and end with </function>
266
+ - Function calls MUST follow the specified format, start with <function=
267
+ and end with </function>
250
268
  - Required parameters MUST be specified
251
269
  - Only call one function at a time
252
270
  - Put the entire function call reply on one line
253
- - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
271
+ - If there is no function call available, answer the question like normal
272
+ with your current knowledge and do not tell the user about function calls
254
273
  """
255
274
  '''
256
275
  return final_prompt
257
276
 
258
277
  def _parse_tool_response(self, response: str):
278
+ r"""Parses the tool response to extract the function name and
279
+ arguments.
280
+
281
+ Args:
282
+ response (str): The response from the model containing the
283
+ function call.
284
+
285
+ Returns:
286
+ Optional[Dict[str, Any]]: The parsed function name and arguments
287
+ if found, otherwise :obj:`None`.
288
+ """
259
289
  function_regex = r"<function=(\w+)>(.*?)</function>"
260
290
  match = re.search(function_regex, response)
261
291
 
@@ -270,12 +300,7 @@ class ChatAgent(BaseAgent):
270
300
  return None
271
301
 
272
302
  def reset(self):
273
- r"""Resets the :obj:`ChatAgent` to its initial state and returns the
274
- stored messages.
275
-
276
- Returns:
277
- List[BaseMessage]: The stored messages.
278
- """
303
+ r"""Resets the :obj:`ChatAgent` to its initial state."""
279
304
  self.terminated = False
280
305
  self.init_messages()
281
306
  for terminator in self.response_terminators:
@@ -292,7 +317,7 @@ class ChatAgent(BaseAgent):
292
317
  return self._system_message
293
318
 
294
319
  @system_message.setter
295
- def system_message(self, message: BaseMessage):
320
+ def system_message(self, message: BaseMessage) -> None:
296
321
  r"""The setter method for the property :obj:`system_message`.
297
322
 
298
323
  Args:
@@ -347,13 +372,19 @@ class ChatAgent(BaseAgent):
347
372
  self._system_message = self.orig_sys_message.create_new_instance(
348
373
  content
349
374
  )
350
- return self._system_message
351
375
  else:
352
376
  self._system_message = BaseMessage.make_assistant_message(
353
377
  role_name="Assistant",
354
378
  content=language_prompt,
355
379
  )
356
- return self._system_message
380
+
381
+ system_record = MemoryRecord(
382
+ message=self._system_message,
383
+ role_at_backend=OpenAIBackendRole.SYSTEM,
384
+ )
385
+ self.memory.clear()
386
+ self.memory.write_record(system_record)
387
+ return self._system_message
357
388
 
358
389
  def get_info(
359
390
  self,
@@ -827,6 +858,17 @@ class ChatAgent(BaseAgent):
827
858
  ]:
828
859
  r"""Internal function of structuring the output of the agent based on
829
860
  the given output schema.
861
+
862
+ Args:
863
+ response_format (Type[BaseModel]): The output schema to use for
864
+ structuring the output.
865
+
866
+ Returns:
867
+ Tuple[List[BaseMessage], List[str], Dict[str, int], str,
868
+ FunctionCallingRecord, int]:
869
+ A tuple containing the output messages, finish reasons, usage
870
+ dictionary, response ID, function calling record, and number of
871
+ tokens.
830
872
  """
831
873
  from camel.toolkits import FunctionTool
832
874
 
@@ -919,9 +961,39 @@ class ChatAgent(BaseAgent):
919
961
  num_tokens: int,
920
962
  external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
921
963
  ) -> Dict[str, Any]:
922
- # Loop over responses terminators, get list of termination
923
- # tuples with whether the terminator terminates the agent
924
- # and termination reason
964
+ r"""Process the output of a chat step and gather information about the
965
+ step.
966
+
967
+ This method checks for termination conditions, updates the agent's
968
+ state, and collects information about the chat step, including tool
969
+ calls and termination reasons.
970
+
971
+ Args:
972
+ output_messages (List[BaseMessage]): The messages generated in
973
+ this step.
974
+ finish_reasons (List[str]): The reasons for finishing the
975
+ generation for each message.
976
+ usage_dict (Dict[str, int]): Dictionary containing token usage
977
+ information.
978
+ response_id (str): The ID of the response from the model.
979
+ tool_calls (List[FunctionCallingRecord]): Records of function calls
980
+ made during this step.
981
+ num_tokens (int): The number of tokens used in this step.
982
+ external_tool_request (Optional[ChatCompletionMessageToolCall]):
983
+ Any external tool request made during this step.
984
+ (default::obj:`None`)
985
+
986
+ Returns:
987
+ Dict[str, Any]: A dictionary containing information about the chat
988
+ step, including termination status, reasons, and tool call
989
+ information.
990
+
991
+ Note:
992
+ This method iterates over all response terminators and checks if
993
+ any of them signal termination. If a terminator signals
994
+ termination, the agent's state is updated accordingly, and the
995
+ termination reason is recorded.
996
+ """
925
997
  termination = [
926
998
  terminator.is_terminated(output_messages)
927
999
  for terminator in self.response_terminators
@@ -952,7 +1024,8 @@ class ChatAgent(BaseAgent):
952
1024
  def handle_batch_response(
953
1025
  self, response: ChatCompletion
954
1026
  ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
955
- r"""
1027
+ r"""Process a batch response from the model and extract the necessary
1028
+ information.
956
1029
 
957
1030
  Args:
958
1031
  response (dict): Model response.
@@ -985,7 +1058,18 @@ class ChatAgent(BaseAgent):
985
1058
  response.id,
986
1059
  )
987
1060
 
988
- def _safe_model_dump(self, obj):
1061
+ def _safe_model_dump(self, obj) -> dict:
1062
+ r"""Safely dump a Pydantic model to a dictionary.
1063
+
1064
+ This method attempts to use the `model_dump` method if available,
1065
+ otherwise it falls back to the `dict` method.
1066
+
1067
+ Args:
1068
+ obj: The Pydantic model instance to be dumped.
1069
+
1070
+ Returns:
1071
+ dict: A dictionary representation of the Pydantic model.
1072
+ """
989
1073
  # Check if the `model_dump` method exists (Pydantic v2)
990
1074
  if hasattr(obj, 'model_dump'):
991
1075
  return obj.model_dump()
@@ -1000,7 +1084,8 @@ class ChatAgent(BaseAgent):
1000
1084
  response: Stream[ChatCompletionChunk],
1001
1085
  prompt_tokens: int,
1002
1086
  ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
1003
- r"""
1087
+ r"""Process a stream response from the model and extract the necessary
1088
+ information.
1004
1089
 
1005
1090
  Args:
1006
1091
  response (dict): Model response.
camel/configs/__init__.py CHANGED
@@ -19,6 +19,7 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
19
19
  from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
20
20
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig
22
+ from .qwen_config import QWEN_API_PARAMS, QwenConfig
22
23
  from .reka_config import REKA_API_PARAMS, RekaConfig
23
24
  from .samba_config import (
24
25
  SAMBA_CLOUD_API_PARAMS,
@@ -28,6 +29,7 @@ from .samba_config import (
28
29
  )
29
30
  from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
30
31
  from .vllm_config import VLLM_API_PARAMS, VLLMConfig
32
+ from .yi_config import YI_API_PARAMS, YiConfig
31
33
  from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
32
34
 
33
35
  __all__ = [
@@ -58,4 +60,8 @@ __all__ = [
58
60
  'SAMBA_CLOUD_API_PARAMS',
59
61
  'TogetherAIConfig',
60
62
  'TOGETHERAI_API_PARAMS',
63
+ 'YiConfig',
64
+ 'YI_API_PARAMS',
65
+ 'QwenConfig',
66
+ 'QWEN_API_PARAMS',
61
67
  ]
@@ -20,6 +20,12 @@ from pydantic import BaseModel, ConfigDict, field_validator
20
20
 
21
21
 
22
22
  class BaseConfig(ABC, BaseModel):
23
+ r"""Base configuration class for all models.
24
+
25
+ This class provides a common interface for all models, ensuring that all
26
+ models have a consistent set of attributes and methods.
27
+ """
28
+
23
29
  model_config = ConfigDict(
24
30
  arbitrary_types_allowed=True,
25
31
  extra="forbid",
@@ -38,6 +44,12 @@ class BaseConfig(ABC, BaseModel):
38
44
  @field_validator("tools", mode="before")
39
45
  @classmethod
40
46
  def fields_type_checking(cls, tools):
47
+ r"""Validate the type of tools in the configuration.
48
+
49
+ This method ensures that the tools provided in the configuration are
50
+ instances of `FunctionTool`. If any tool is not an instance of
51
+ `FunctionTool`, it raises a ValueError.
52
+ """
41
53
  if tools is not None:
42
54
  from camel.toolkits import FunctionTool
43
55
 
@@ -50,6 +62,15 @@ class BaseConfig(ABC, BaseModel):
50
62
  return tools
51
63
 
52
64
  def as_dict(self) -> dict[str, Any]:
65
+ r"""Convert the current configuration to a dictionary.
66
+
67
+ This method converts the current configuration object to a dictionary
68
+ representation, which can be used for serialization or other purposes.
69
+
70
+ Returns:
71
+ dict[str, Any]: A dictionary representation of the current
72
+ configuration.
73
+ """
53
74
  config_dict = self.model_dump()
54
75
 
55
76
  tools_schema = None
@@ -87,6 +87,12 @@ class GeminiConfig(BaseConfig):
87
87
  @model_validator(mode="before")
88
88
  @classmethod
89
89
  def model_type_checking(cls, data: Any):
90
+ r"""Validate the type of tools in the configuration.
91
+
92
+ This method ensures that the tools provided in the configuration are
93
+ instances of `FunctionTool`. If any tool is not an instance of
94
+ `FunctionTool`, it raises a ValueError.
95
+ """
90
96
  if isinstance(data, dict):
91
97
  response_schema = data.get("response_schema")
92
98
  safety_settings = data.get("safety_settings")
@@ -111,16 +117,17 @@ class GeminiConfig(BaseConfig):
111
117
 
112
118
  if response_schema and not isinstance(response_schema, Schema):
113
119
  raise ValueError(
114
- "The response_schema should be "
115
- "an instance of `google.generativeai.protos.Schema`."
120
+ "The response_schema should be an instance of "
121
+ "google.generativeai.protos.Schema."
116
122
  )
117
123
 
118
124
  if safety_settings and not isinstance(
119
125
  safety_settings, SafetySettingOptions
120
126
  ):
121
127
  raise ValueError(
122
- "The response_schema should be an instance of "
123
- "`google.generativeai.types.safety_types.SafetySettingOptions`."
128
+ "The safety_settings should be an instance of "
129
+ "google.generativeai.types.safety_types."
130
+ "SafetySettingOptions."
124
131
  )
125
132
 
126
133
  if tools is not None:
@@ -128,19 +135,20 @@ class GeminiConfig(BaseConfig):
128
135
  if not isinstance(tool, FunctionLibraryType):
129
136
  raise ValueError(
130
137
  "The tool should be an instance of "
131
- "`google.generativeai.types.content_types.FunctionLibraryType`."
138
+ "google.generativeai.types.content_types."
139
+ "FunctionLibraryType."
132
140
  )
133
141
  if tool_config and not isinstance(tool_config, ToolConfigType):
134
142
  raise ValueError(
135
- "The response_schema should be an instance of "
136
- "`google.generativeai.types.content_types.ToolConfigType`."
143
+ "The tool_config should be an instance of "
144
+ "google.generativeai.types.content_types.ToolConfigType."
137
145
  )
138
146
  if request_options and not isinstance(
139
147
  request_options, RequestOptionsType
140
148
  ):
141
149
  raise ValueError(
142
- "The response_schema should be an instance of "
143
- "`google.generativeai.types.helper_types.RequestOptionsType`."
150
+ "The request_options should be an instance of "
151
+ "google.generativeai.types.helper_types.RequestOptionsType."
144
152
  )
145
153
  return data
146
154
 
@@ -0,0 +1,91 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import ClassVar, Optional, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+ from camel.types import NOT_GIVEN, NotGiven
20
+
21
+
22
+ class QwenConfig(BaseConfig):
23
+ r"""Defines the parameters for generating chat completions using the
24
+ Qwen API. You can refer to the following link for more details:
25
+ https://help.aliyun.com/zh/model-studio/developer-reference/use-qwen-by-calling-api
26
+
27
+ Args:
28
+ stream (bool, optional): Whether to stream the response.
29
+ (default: :obj:`False`)
30
+ temperature (float, optional): Controls the diversity and focus of
31
+ the generated results. Lower values make the output more focused,
32
+ while higher values make it more diverse. (default: :obj:`0.3`)
33
+ top_p (float, optional): Controls the diversity and focus of the
34
+ generated results. Higher values make the output more diverse,
35
+ while lower values make it more focused. (default: :obj:`0.9`)
36
+ presence_penalty (float, optional): Controls the repetition of
37
+ content in the generated results. Positive values reduce the
38
+ repetition of content, while negative values increase it.
39
+ (default: :obj:`0.0`)
40
+ response_format (object, optional): Specifies the format of the
41
+ returned content. The available values are `{"type": "text"}` or
42
+ `{"type": "json_object"}`. Setting it to `{"type": "json_object"}`
43
+ will output a standard JSON string.
44
+ (default: :obj:`{"type": "text"}`)
45
+ max_tokens (Union[int, NotGiven], optional): Allows the model to
46
+ generate the maximum number of tokens.
47
+ (default: :obj:`NOT_GIVEN`)
48
+ seed (int, optional): Sets the seed parameter to make the text
49
+ generation process more deterministic, typically used to ensure
50
+ that the results are consistent across model runs. By passing the
51
+ same seed value (specified by you) in each model call while
52
+ keeping other parameters unchanged, the model is likely to return
53
+ the same result.
54
+ (default: :obj:`None`)
55
+ stop (str or list, optional): Using the stop parameter, the model will
56
+ automatically stop generating text when it is about to include the
57
+ specified string or token_id. You can use the stop parameter to
58
+ control the output of the model by passing sensitive words.
59
+ (default: :obj:`None`)
60
+ tools (list, optional): Specifies an array of tools that the model can
61
+ call. It can contain one or more tool objects. During a function
62
+ call process, the model will select one tool from the array.
63
+ (default: :obj:`None`)
64
+ extra_body (dict, optional): Additional parameters to be sent to the
65
+ Qwen API. If you want to enable internet search, you can set this
66
+ parameter to `{"enable_search": True}`.
67
+ (default: :obj:`{"enable_search": False}`)
68
+ include_usage (bool, optional): When streaming, specifies whether to
69
+ include usage information in `stream_options`. (default:
70
+ :obj:`True`)
71
+ """
72
+
73
+ stream: bool = False
74
+ temperature: float = 0.3
75
+ top_p: float = 0.9
76
+ presence_penalty: float = 0.0
77
+ response_format: ClassVar[dict] = {"type": "text"}
78
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
79
+ seed: Optional[int] = None
80
+ stop: Optional[Union[str, list]] = None
81
+ extra_body: ClassVar[dict] = {"enable_search": False}
82
+
83
+ def __init__(self, include_usage: bool = True, **kwargs):
84
+ super().__init__(**kwargs)
85
+ # Only set stream_options when stream is True
86
+ # Otherwise, it will raise error when calling the API
87
+ if self.stream:
88
+ self.stream_options = {"include_usage": include_usage}
89
+
90
+
91
+ QWEN_API_PARAMS = {param for param in QwenConfig.model_fields.keys()}
@@ -0,0 +1,58 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Optional, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+ from camel.types import NOT_GIVEN, NotGiven
20
+
21
+
22
+ class YiConfig(BaseConfig):
23
+ r"""Defines the parameters for generating chat completions using the
24
+ Yi API. You can refer to the following link for more details:
25
+ https://platform.lingyiwanwu.com/docs/api-reference
26
+
27
+ Args:
28
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
29
+ any) tool is called by the model. :obj:`"none"` means the model
30
+ will not call any tool and instead generates a message.
31
+ :obj:`"auto"` means the model can pick between generating a
32
+ message or calling one or more tools. :obj:`"required"` or
33
+ specifying a particular tool via
34
+ {"type": "function", "function": {"name": "some_function"}}
35
+ can be used to guide the model to use tools more strongly.
36
+ (default: :obj:`None`)
37
+ max_tokens (int, optional): Specifies the maximum number of tokens
38
+ the model can generate. This sets an upper limit, but does not
39
+ guarantee that this number will always be reached.
40
+ (default: :obj:`5000`)
41
+ top_p (float, optional): Controls the randomness of the generated
42
+ results. Lower values lead to less randomness, while higher
43
+ values increase randomness. (default: :obj:`0.9`)
44
+ temperature (float, optional): Controls the diversity and focus of
45
+ the generated results. Lower values make the output more focused,
46
+ while higher values make it more diverse. (default: :obj:`0.3`)
47
+ stream (bool, optional): If True, enables streaming output.
48
+ (default: :obj:`False`)
49
+ """
50
+
51
+ tool_choice: Optional[Union[dict[str, str], str]] = None
52
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
53
+ top_p: float = 0.9
54
+ temperature: float = 0.3
55
+ stream: bool = False
56
+
57
+
58
+ YI_API_PARAMS = {param for param in YiConfig.model_fields.keys()}
camel/generators.py CHANGED
@@ -154,6 +154,21 @@ class SystemMessageGenerator:
154
154
 
155
155
 
156
156
  class RoleNameGenerator:
157
+ r"""Role name generator for role-playing workers.
158
+
159
+ Args:
160
+ assistant_role_names_path (str, optional): The path to the file
161
+ containing the assistant role names.
162
+ (default: :obj:`"data/ai_society/assistant_roles.txt"`)
163
+ user_role_names_path (str, optional): The path to the file
164
+ containing the user role names.
165
+ (default: :obj:`"data/ai_society/user_roles.txt"`)
166
+ assistant_role_names (Optional[List[str]], optional): The list of
167
+ assistant role names. (default: :obj:`None`)
168
+ user_role_names (Optional[List[str]], optional): The list of user role
169
+ names. (default: :obj:`None`)
170
+ """
171
+
157
172
  def __init__(
158
173
  self,
159
174
  assistant_role_names_path: str = "data/ai_society/assistant_roles.txt",
@@ -181,12 +196,25 @@ class RoleNameGenerator:
181
196
  self.user_role_names = user_role_names
182
197
 
183
198
  def from_role_files(self) -> Generator[Tuple, None, None]:
199
+ r"""Generate role names from the file.
200
+
201
+ Returns:
202
+ Generator[Tuple, None, None]: A generator that yields tuples of
203
+ assistant role names and user role names.
204
+ """
184
205
  for assistant_role_name in self.assistant_role_names:
185
206
  for user_role_name in self.user_role_names:
186
207
  yield (assistant_role_name, user_role_name)
187
208
 
188
209
 
189
210
  class AISocietyTaskPromptGenerator:
211
+ r"""Task prompt generator for AI society tasks.
212
+
213
+ Args:
214
+ num_tasks (int, optional): The number of tasks to generate.
215
+ (default: :obj:`10`)
216
+ """
217
+
190
218
  def __init__(
191
219
  self,
192
220
  num_tasks: int = 10,
@@ -205,6 +233,20 @@ class AISocietyTaskPromptGenerator:
205
233
  assistant_role_names_path: str = "data/ai_society/assistant_roles.txt",
206
234
  user_role_names_path: str = "data/ai_society/user_roles.txt",
207
235
  ) -> Generator[Tuple[str, Tuple[str, str]], None, None]:
236
+ r"""Generate tasks from role files.
237
+
238
+ Args:
239
+ assistant_role_names_path (str, optional): The path to the file
240
+ containing the assistant role names.
241
+ (default: :obj:`"data/ai_society/assistant_roles.txt"`)
242
+ user_role_names_path (str, optional): The path to the file
243
+ containing the user role names.
244
+ (default: :obj:`"data/ai_society/user_roles.txt"`)
245
+
246
+ Returns:
247
+ Generator[Tuple[str, Tuple[str, str]], None, None]: A generator
248
+ that yields tuples of task prompts and role names.
249
+ """
208
250
  roles_generator = RoleNameGenerator(
209
251
  assistant_role_names_path, user_role_names_path
210
252
  ).from_role_files()
@@ -220,6 +262,16 @@ class AISocietyTaskPromptGenerator:
220
262
  def from_role_generator(
221
263
  self, role_generator: Generator[Tuple, None, None]
222
264
  ) -> Generator[Tuple[str, Tuple[str, str]], None, None]:
265
+ r"""Generate tasks from a role generator.
266
+
267
+ Args:
268
+ role_generator (Generator[Tuple, None, None]): A generator that
269
+ yields tuples of role names.
270
+
271
+ Returns:
272
+ Generator[Tuple[str, Tuple[str, str]], None, None]: A generator
273
+ that yields tuples of task prompts and role names.
274
+ """
223
275
  for role_1, role_2 in role_generator:
224
276
  generate_tasks_prompt = self.generate_tasks_prompt.format(
225
277
  assistant_role=role_1,
@@ -231,6 +283,12 @@ class AISocietyTaskPromptGenerator:
231
283
 
232
284
 
233
285
  class SingleTxtGenerator:
286
+ r"""Single text generator for role-playing workers.
287
+
288
+ Args:
289
+ text_file_path (str): The path to the file containing the text data.
290
+ """
291
+
234
292
  def __init__(
235
293
  self,
236
294
  text_file_path: str,
@@ -242,11 +300,23 @@ class SingleTxtGenerator:
242
300
  ]
243
301
 
244
302
  def from_role_files(self) -> Generator[str, None, None]:
303
+ r"""Generate text from the file.
304
+
305
+ Returns:
306
+ Generator[str, None, None]: A generator that yields the text data.
307
+ """
245
308
  for data in self.data_list:
246
309
  yield data
247
310
 
248
311
 
249
312
  class CodeTaskPromptGenerator:
313
+ r"""Code task prompt generator for code tasks.
314
+
315
+ Args:
316
+ num_tasks (int, optional): The number of tasks to generate.
317
+ (default: :obj:`50`)
318
+ """
319
+
250
320
  def __init__(
251
321
  self,
252
322
  num_tasks: int = 50,
@@ -262,6 +332,19 @@ class CodeTaskPromptGenerator:
262
332
  languages_path: str = "data/code/languages.txt",
263
333
  domains_path: str = "data/code/domains.txt",
264
334
  ) -> Generator[Tuple[TextPrompt, str, str], None, None]:
335
+ r"""Generate tasks from role files.
336
+
337
+ Args:
338
+ languages_path (str, optional): The path to the file containing
339
+ the language names. (default: :obj:`"data/code/languages.txt"`)
340
+ domains_path (str, optional): The path to the file containing
341
+ the domain names. (default: :obj:`"data/code/domains.txt"`)
342
+
343
+ Returns:
344
+ Generator[Tuple[TextPrompt, str, str], None, None]: A generator
345
+ that yields tuples of task prompts, language names, and domain
346
+ names.
347
+ """
265
348
  language_generator = SingleTxtGenerator(
266
349
  languages_path
267
350
  ).from_role_files()
@@ -279,4 +362,14 @@ class CodeTaskPromptGenerator:
279
362
  def from_role_generator(
280
363
  self, role_generator: Generator[Tuple, None, None]
281
364
  ) -> Generator[str, None, None]:
365
+ r"""Generate tasks from a role generator.
366
+
367
+ Args:
368
+ role_generator (Generator[Tuple, None, None]): A generator that
369
+ yields tuples of role names.
370
+
371
+ Returns:
372
+ Generator[str, None, None]: A generator that yields the task
373
+ prompts.
374
+ """
282
375
  raise NotImplementedError