camel-ai 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (59) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/aiml_config.py +20 -19
  6. camel/configs/anthropic_config.py +25 -27
  7. camel/configs/cohere_config.py +11 -10
  8. camel/configs/deepseek_config.py +16 -16
  9. camel/configs/gemini_config.py +8 -8
  10. camel/configs/groq_config.py +18 -19
  11. camel/configs/internlm_config.py +8 -8
  12. camel/configs/litellm_config.py +26 -24
  13. camel/configs/mistral_config.py +8 -8
  14. camel/configs/moonshot_config.py +11 -11
  15. camel/configs/nvidia_config.py +13 -13
  16. camel/configs/ollama_config.py +14 -15
  17. camel/configs/openai_config.py +3 -3
  18. camel/configs/openrouter_config.py +106 -0
  19. camel/configs/qwen_config.py +8 -8
  20. camel/configs/reka_config.py +12 -11
  21. camel/configs/samba_config.py +14 -14
  22. camel/configs/sglang_config.py +15 -16
  23. camel/configs/siliconflow_config.py +18 -17
  24. camel/configs/togetherai_config.py +18 -19
  25. camel/configs/vllm_config.py +18 -19
  26. camel/configs/yi_config.py +7 -8
  27. camel/configs/zhipuai_config.py +8 -9
  28. camel/datasets/few_shot_generator.py +2 -5
  29. camel/datasets/static_dataset.py +25 -23
  30. camel/environments/models.py +3 -0
  31. camel/environments/single_step.py +212 -132
  32. camel/extractors/__init__.py +16 -1
  33. camel/memories/agent_memories.py +2 -1
  34. camel/memories/blocks/chat_history_block.py +2 -1
  35. camel/models/__init__.py +2 -0
  36. camel/models/gemini_model.py +36 -0
  37. camel/models/groq_model.py +6 -3
  38. camel/models/model_factory.py +3 -0
  39. camel/models/openrouter_model.py +204 -0
  40. camel/storages/__init__.py +2 -0
  41. camel/storages/key_value_storages/__init__.py +2 -0
  42. camel/storages/key_value_storages/mem0_cloud.py +224 -0
  43. camel/storages/vectordb_storages/qdrant.py +3 -3
  44. camel/toolkits/__init__.py +2 -0
  45. camel/toolkits/browser_toolkit.py +43 -0
  46. camel/toolkits/code_execution.py +2 -1
  47. camel/toolkits/mcp_toolkit.py +30 -1
  48. camel/toolkits/thinking_toolkit.py +74 -0
  49. camel/types/enums.py +27 -0
  50. camel/types/unified_model_type.py +5 -0
  51. camel/utils/chunker/code_chunker.py +9 -15
  52. camel/verifiers/__init__.py +1 -2
  53. camel/verifiers/base.py +159 -99
  54. camel/verifiers/models.py +0 -12
  55. camel/verifiers/python_verifier.py +316 -60
  56. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/METADATA +54 -5
  57. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/RECORD +59 -54
  58. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
  59. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -18,7 +18,7 @@ from typing import List, Optional, Union
18
18
  from pydantic import Field
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
21
+ from camel.types import NotGiven
22
22
 
23
23
 
24
24
  class NvidiaConfig(BaseConfig):
@@ -30,21 +30,21 @@ class NvidiaConfig(BaseConfig):
30
30
 
31
31
  Args:
32
32
  stream (bool, optional): Whether to stream the response.
33
- (default: :obj:`False`)
33
+ (default: :obj:`None`)
34
34
  temperature (float, optional): Controls randomness in the response.
35
35
  Higher values make output more random, lower values make it more
36
- deterministic. Range: [0.0, 2.0]. (default: :obj:`0.7`)
36
+ deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
37
37
  top_p (float, optional): Controls diversity via nucleus sampling.
38
- Range: [0.0, 1.0]. (default: :obj:`0.95`)
38
+ Range: [0.0, 1.0]. (default: :obj:`None`)
39
39
  presence_penalty (float, optional): Penalizes new tokens based on
40
40
  whether they appear in the text so far. Range: [-2.0, 2.0].
41
- (default: :obj:`0.0`)
41
+ (default: :obj:`None`)
42
42
  frequency_penalty (float, optional): Penalizes new tokens based on
43
43
  their frequency in the text so far. Range: [-2.0, 2.0].
44
- (default: :obj:`0.0`)
44
+ (default: :obj:`None`)
45
45
  max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
46
46
  to generate. If not provided, model will use its default maximum.
47
- (default: :obj:`NOT_GIVEN`)
47
+ (default: :obj:`None`)
48
48
  seed (Optional[int], optional): Random seed for deterministic sampling.
49
49
  (default: :obj:`None`)
50
50
  tools (Optional[List[Dict]], optional): List of tools available to the
@@ -56,12 +56,12 @@ class NvidiaConfig(BaseConfig):
56
56
  (default: :obj:`None`)
57
57
  """
58
58
 
59
- stream: bool = Field(default=False)
60
- temperature: float = Field(default=0.7)
61
- top_p: float = Field(default=0.95)
62
- presence_penalty: float = Field(default=0.0)
63
- frequency_penalty: float = Field(default=0.0)
64
- max_tokens: Union[int, NotGiven] = Field(default=NOT_GIVEN)
59
+ stream: Optional[bool] = Field(default=None)
60
+ temperature: Optional[float] = Field(default=None)
61
+ top_p: Optional[float] = Field(default=None)
62
+ presence_penalty: Optional[float] = Field(default=None)
63
+ frequency_penalty: Optional[float] = Field(default=None)
64
+ max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
65
65
  seed: Optional[int] = Field(default=None)
66
66
  tool_choice: Optional[str] = Field(default=None)
67
67
  stop: Optional[List[str]] = Field(default=None)
@@ -13,12 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Sequence, Type, Union
16
+ from typing import Optional, Sequence, Type, Union
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  class OllamaConfig(BaseConfig):
@@ -31,12 +30,12 @@ class OllamaConfig(BaseConfig):
31
30
  temperature (float, optional): Sampling temperature to use, between
32
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
33
32
  while lower values make it more focused and deterministic.
34
- (default: :obj:`0.2`)
33
+ (default: :obj:`None`)
35
34
  top_p (float, optional): An alternative to sampling with temperature,
36
35
  called nucleus sampling, where the model considers the results of
37
36
  the tokens with top_p probability mass. So :obj:`0.1` means only
38
37
  the tokens comprising the top 10% probability mass are considered.
39
- (default: :obj:`1.0`)
38
+ (default: :obj:`None`)
40
39
  response_format (object, optional): An object specifying the format
41
40
  that the model must output. Compatible with GPT-4 Turbo and all
42
41
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -52,7 +51,7 @@ class OllamaConfig(BaseConfig):
52
51
  max context length.
53
52
  stream (bool, optional): If True, partial message deltas will be sent
54
53
  as data-only server-sent events as they become available.
55
- (default: :obj:`False`)
54
+ (default: :obj:`None`)
56
55
  stop (str or list, optional): Up to :obj:`4` sequences where the API
57
56
  will stop generating further tokens. (default: :obj:`None`)
58
57
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -63,22 +62,22 @@ class OllamaConfig(BaseConfig):
63
62
  :obj:`2.0`. Positive values penalize new tokens based on whether
64
63
  they appear in the text so far, increasing the model's likelihood
65
64
  to talk about new topics. See more information about frequency and
66
- presence penalties. (default: :obj:`0.0`)
65
+ presence penalties. (default: :obj:`None`)
67
66
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
67
  :obj:`2.0`. Positive values penalize new tokens based on their
69
68
  existing frequency in the text so far, decreasing the model's
70
69
  likelihood to repeat the same line verbatim. See more information
71
- about frequency and presence penalties. (default: :obj:`0.0`)
70
+ about frequency and presence penalties. (default: :obj:`None`)
72
71
  """
73
72
 
74
- temperature: float = 0.2
75
- top_p: float = 1.0
76
- stream: bool = False
77
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
78
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
79
- presence_penalty: float = 0.0
80
- response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
81
- frequency_penalty: float = 0.0
73
+ temperature: Optional[float] = None
74
+ top_p: Optional[float] = None
75
+ stream: Optional[bool] = None
76
+ stop: Optional[Union[str, Sequence[str]]] = None
77
+ max_tokens: Optional[int] = None
78
+ presence_penalty: Optional[float] = None
79
+ response_format: Optional[Union[Type[BaseModel], dict]] = None
80
+ frequency_penalty: Optional[float] = None
82
81
 
83
82
 
84
83
  OLLAMA_API_PARAMS = {param for param in OllamaConfig.model_fields.keys()}
@@ -79,7 +79,7 @@ class ChatGPTConfig(BaseConfig):
79
79
  exclusive selection of the relevant token. (default: :obj:`None`)
80
80
  user (str, optional): A unique identifier representing your end-user,
81
81
  which can help OpenAI to monitor and detect abuse.
82
- (default: :obj:`""`)
82
+ (default: :obj:`None`)
83
83
  tools (list[FunctionTool], optional): A list of tools the model may
84
84
  call. Currently, only functions are supported as a tool. Use this
85
85
  to provide a list of functions the model may generate JSON inputs
@@ -102,8 +102,8 @@ class ChatGPTConfig(BaseConfig):
102
102
  or if the model type does not support it, this parameter is
103
103
  ignored. (default: :obj:`None`)
104
104
  parallel_tool_calls (bool, optional): A parameter specifying whether
105
- the model should call tools in parallel or not. (default:
106
- :obj:`None`)
105
+ the model should call tools in parallel or not.
106
+ (default: :obj:`None`)
107
107
  """
108
108
 
109
109
  temperature: Optional[float] = None
@@ -0,0 +1,106 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from __future__ import annotations
15
+
16
+ from typing import Optional, Sequence, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+ from camel.types import NotGiven
20
+
21
+
22
+ class OpenRouterConfig(BaseConfig):
23
+ r"""Defines the parameters for generating chat completions using OpenAI
24
+ compatibility.
25
+
26
+ Reference: https://openrouter.ai/docs/api-reference/parameters
27
+
28
+ Args:
29
+ temperature (float, optional): Sampling temperature to use, between
30
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
31
+ while lower values make it more focused and deterministic.
32
+ (default: :obj:`None`)
33
+ top_p (float, optional): An alternative to sampling with temperature,
34
+ called nucleus sampling, where the model considers the results of
35
+ the tokens with top_p probability mass. So :obj:`0.1` means only
36
+ the tokens comprising the top 10% probability mass are considered.
37
+ (default: :obj:`None`)
38
+ n (int, optional): How many chat completion choices to generate for
39
+ each input message. (default: :obj:`None`)
40
+ response_format (object, optional): An object specifying the format
41
+ that the model must output. Compatible with GPT-4 Turbo and all
42
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
43
+ {"type": "json_object"} enables JSON mode, which guarantees the
44
+ message the model generates is valid JSON. Important: when using
45
+ JSON mode, you must also instruct the model to produce JSON
46
+ yourself via a system or user message. Without this, the model
47
+ may generate an unending stream of whitespace until the generation
48
+ reaches the token limit, resulting in a long-running and seemingly
49
+ "stuck" request. Also note that the message content may be
50
+ partially cut off if finish_reason="length", which indicates the
51
+ generation exceeded max_tokens or the conversation exceeded the
52
+ max context length.
53
+ stream (bool, optional): If True, partial message deltas will be sent
54
+ as data-only server-sent events as they become available.
55
+ (default: :obj:`None`)
56
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
57
+ will stop generating further tokens. (default: :obj:`None`)
58
+ max_tokens (int, optional): The maximum number of tokens to generate
59
+ in the chat completion. The total length of input tokens and
60
+ generated tokens is limited by the model's context length.
61
+ (default: :obj:`None`)
62
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
63
+ :obj:`2.0`. Positive values penalize new tokens based on whether
64
+ they appear in the text so far, increasing the model's likelihood
65
+ to talk about new topics. See more information about frequency and
66
+ presence penalties. (default: :obj:`None`)
67
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
+ :obj:`2.0`. Positive values penalize new tokens based on their
69
+ existing frequency in the text so far, decreasing the model's
70
+ likelihood to repeat the same line verbatim. See more information
71
+ about frequency and presence penalties. (default: :obj:`None`)
72
+ user (str, optional): A unique identifier representing your end-user,
73
+ which can help OpenAI to monitor and detect abuse.
74
+ (default: :obj:`None`)
75
+ tools (list[FunctionTool], optional): A list of tools the model may
76
+ call. Currently, only functions are supported as a tool. Use this
77
+ to provide a list of functions the model may generate JSON inputs
78
+ for. A max of 128 functions are supported. (default: :obj:`None`)
79
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
80
+ any) tool is called by the model. :obj:`"none"` means the model
81
+ will not call any tool and instead generates a message.
82
+ :obj:`"auto"` means the model can pick between generating a
83
+ message or calling one or more tools. :obj:`"required"` means the
84
+ model must call one or more tools. Specifying a particular tool
85
+ via {"type": "function", "function": {"name": "my_function"}}
86
+ forces the model to call that tool. :obj:`"none"` is the default
87
+ when no tools are present. :obj:`"auto"` is the default if tools
88
+ are present. (default: :obj:`None`)
89
+ """
90
+
91
+ temperature: Optional[float] = None
92
+ top_p: Optional[float] = None
93
+ n: Optional[int] = None
94
+ stream: Optional[bool] = None
95
+ stop: Optional[Union[str, Sequence[str], NotGiven]] = None
96
+ max_tokens: Optional[Union[int, NotGiven]] = None
97
+ presence_penalty: Optional[float] = None
98
+ response_format: Optional[Union[dict, NotGiven]] = None
99
+ frequency_penalty: Optional[float] = None
100
+ user: Optional[str] = None
101
+ tool_choice: Optional[Union[dict[str, str], str]] = None
102
+
103
+
104
+ OPENROUTER_API_PARAMS = {
105
+ param for param in OpenRouterConfig.model_fields.keys()
106
+ }
@@ -25,18 +25,18 @@ class QwenConfig(BaseConfig):
25
25
 
26
26
  Args:
27
27
  stream (bool, optional): Whether to stream the response.
28
- (default: :obj:`False`)
28
+ (default: :obj:`None`)
29
29
  temperature (float, optional): Controls the diversity and
30
30
  focus of the generated results. Lower values make the output more
31
31
  focused, while higher values make it more diverse.
32
- (default: :obj:`0.3`)
32
+ (default: :obj:`None`)
33
33
  top_p (float, optional): Controls the diversity and focus of
34
34
  the generated results. Higher values make the output more diverse,
35
35
  while lower values make it more focused. (default: :obj:`0.9`)
36
36
  presence_penalty (float, optional): Controls the repetition
37
37
  content in the generated results. Positive values reduce the
38
38
  repetition of content, while negative values increase it.
39
- (default: :obj:`0.0`)
39
+ (default: :obj:`None`)
40
40
  response_format (Optional[Dict[str, str]], optional): Specifies the
41
41
  format of the returned content. The available values are
42
42
  `{"type": "text"}` or `{"type": "json_object"}`. Setting it to
@@ -67,13 +67,13 @@ class QwenConfig(BaseConfig):
67
67
  (default: :obj:`None`)
68
68
  include_usage (bool, optional): When streaming, specifies whether to
69
69
  include usage information in `stream_options`.
70
- (default: :obj:`True`)
70
+ (default: :obj:`None`)
71
71
  """
72
72
 
73
- stream: bool = False
74
- temperature: float = 0.3
75
- top_p: float = 0.9
76
- presence_penalty: float = 0.0
73
+ stream: Optional[bool] = None
74
+ temperature: Optional[float] = None
75
+ top_p: Optional[float] = None
76
+ presence_penalty: Optional[float] = None
77
77
  response_format: Optional[Dict[str, str]] = None
78
78
  max_tokens: Optional[int] = None
79
79
  seed: Optional[int] = None
@@ -26,32 +26,33 @@ class RekaConfig(BaseConfig):
26
26
 
27
27
  Args:
28
28
  temperature (Optional[float], optional): temperature the temperature
29
- to use for sampling, e.g. 0.5.
29
+ to use for sampling, e.g. 0.5. (default: :obj:`None`)
30
30
  top_p (Optional[float], optional): the cumulative probability of
31
- tokens to generate, e.g. 0.9. Defaults to None.
31
+ tokens to generate, e.g. 0.9. (default: :obj:`None`)
32
32
  top_k (Optional[int], optional): Parameter which forces the model to
33
33
  only consider the tokens with the `top_k` highest probabilities at
34
- the next step. Defaults to 1024.
34
+ the next step. (default: :obj:`None`)
35
35
  max_tokens (Optional[int], optional): the maximum number of tokens to
36
- generate, e.g. 100. Defaults to None.
36
+ generate, e.g. 100. (default: :obj:`None`)
37
37
  stop (Optional[Union[str,list[str]]]): Stop generation if this token
38
38
  is detected. Or if one of these tokens is detected when providing
39
- a string list.
39
+ a string list. (default: :obj:`None`)
40
40
  seed (Optional[int], optional): the random seed to use for sampling, e.
41
- g. 42. Defaults to None.
41
+ g. 42. (default: :obj:`None`)
42
42
  presence_penalty (float, optional): Number between :obj:`-2.0` and
43
43
  :obj:`2.0`. Positive values penalize new tokens based on whether
44
44
  they appear in the text so far, increasing the model's likelihood
45
45
  to talk about new topics. See more information about frequency and
46
- presence penalties. (default: :obj:`0.0`)
46
+ presence penalties. (default: :obj:`None`)
47
47
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
48
48
  :obj:`2.0`. Positive values penalize new tokens based on their
49
49
  existing frequency in the text so far, decreasing the model's
50
50
  likelihood to repeat the same line verbatim. See more information
51
- about frequency and presence penalties. (default: :obj:`0.0`)
51
+ about frequency and presence penalties. (default: :obj:`None`)
52
52
  use_search_engine (Optional[bool]): Whether to consider using search
53
53
  engine to complete the request. Note that even if this is set to
54
54
  `True`, the model might decide to not use search.
55
+ (default: :obj:`None`)
55
56
  """
56
57
 
57
58
  temperature: Optional[float] = None
@@ -60,9 +61,9 @@ class RekaConfig(BaseConfig):
60
61
  max_tokens: Optional[int] = None
61
62
  stop: Optional[Union[str, list[str]]] = None
62
63
  seed: Optional[int] = None
63
- frequency_penalty: float = 0.0
64
- presence_penalty: float = 0.0
65
- use_search_engine: Optional[bool] = False
64
+ frequency_penalty: Optional[float] = None
65
+ presence_penalty: Optional[float] = None
66
+ use_search_engine: Optional[bool] = None
66
67
 
67
68
  def as_dict(self) -> dict[str, Any]:
68
69
  config_dict = super().as_dict()
@@ -29,39 +29,39 @@ class SambaVerseAPIConfig(BaseConfig):
29
29
  temperature (float, optional): Sampling temperature to use, between
30
30
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
31
  while lower values make it more focused and deterministic.
32
- (default: :obj:`0.7`)
32
+ (default: :obj:`None`)
33
33
  top_p (float, optional): An alternative to sampling with temperature,
34
34
  called nucleus sampling, where the model considers the results of
35
35
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
36
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj:`0.95`)
37
+ (default: :obj:`None`)
38
38
  top_k (int, optional): Only sample from the top K options for each
39
39
  subsequent token. Used to remove "long tail" low probability
40
40
  responses.
41
- (default: :obj:`50`)
41
+ (default: :obj:`None`)
42
42
  max_tokens (Optional[int], optional): The maximum number of tokens to
43
43
  generate, e.g. 100.
44
- (default: :obj:`2048`)
44
+ (default: :obj:`None`)
45
45
  repetition_penalty (Optional[float], optional): The parameter for
46
46
  repetition penalty. 1.0 means no penalty.
47
- (default: :obj:`1.0`)
47
+ (default: :obj:`None`)
48
48
  stop (Optional[Union[str,list[str]]]): Stop generation if this token
49
49
  is detected. Or if one of these tokens is detected when providing
50
50
  a string list.
51
- (default: :obj:`""`)
51
+ (default: :obj:`None`)
52
52
  stream (Optional[bool]): If True, partial message deltas will be sent
53
53
  as data-only server-sent events as they become available.
54
54
  Currently SambaVerse API doesn't support stream mode.
55
- (default: :obj:`False`)
55
+ (default: :obj:`None`)
56
56
  """
57
57
 
58
- temperature: Optional[float] = 0.7
59
- top_p: Optional[float] = 0.95
60
- top_k: Optional[int] = 50
61
- max_tokens: Optional[int] = 2048
62
- repetition_penalty: Optional[float] = 1.0
63
- stop: Optional[Union[str, list[str]]] = ""
64
- stream: Optional[bool] = False
58
+ temperature: Optional[float] = None
59
+ top_p: Optional[float] = None
60
+ top_k: Optional[int] = None
61
+ max_tokens: Optional[int] = None
62
+ repetition_penalty: Optional[float] = None
63
+ stop: Optional[Union[str, list[str]]] = None
64
+ stream: Optional[bool] = None
65
65
 
66
66
  def as_dict(self) -> dict[str, Any]:
67
67
  config_dict = super().as_dict()
@@ -16,7 +16,6 @@ from __future__ import annotations
16
16
  from typing import Any, Dict, List, Optional, Sequence, Union
17
17
 
18
18
  from camel.configs.base_config import BaseConfig
19
- from camel.types import NOT_GIVEN, NotGiven
20
19
 
21
20
 
22
21
  class SGLangConfig(BaseConfig):
@@ -31,27 +30,27 @@ class SGLangConfig(BaseConfig):
31
30
  temperature (float, optional): Sampling temperature to use, between
32
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
33
32
  while lower values make it more focused and deterministic.
34
- (default: :obj:`1.0`)
33
+ (default: :obj:`None`)
35
34
  top_p (float, optional): An alternative to sampling with temperature,
36
35
  called nucleus sampling, where the model considers the results of
37
36
  the tokens with top_p probability mass. So :obj:`0.1` means only
38
37
  the tokens comprising the top 10% probability mass are considered.
39
- (default: :obj:`1.0`)
38
+ (default: :obj:`None`)
40
39
  n (int, optional): How many chat completion choices to generate for
41
- each input message. (default: :obj:`1`)
40
+ each input message. (default: :obj:`None`)
42
41
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
43
42
  :obj:`2.0`. Positive values penalize new tokens based on their
44
43
  existing frequency in the text so far, decreasing the model's
45
44
  likelihood to repeat the same line verbatim. See more information
46
- about frequency and presence penalties. (default: :obj:`0.0`)
45
+ about frequency and presence penalties. (default: :obj:`None`)
47
46
  presence_penalty (float, optional): Number between :obj:`-2.0` and
48
47
  :obj:`2.0`. Positive values penalize new tokens based on whether
49
48
  they appear in the text so far, increasing the model's likelihood
50
49
  to talk about new topics. See more information about frequency and
51
- presence penalties. (default: :obj:`0.0`)
50
+ presence penalties. (default: :obj:`None`)
52
51
  stream (bool, optional): Whether to stream the generated output in
53
52
  chunks. If set to `True`, the response will be streamed as it is
54
- generated. (default: :obj:`False`)
53
+ generated. (default: :obj:`None`)
55
54
  max_tokens (int, optional): The maximum number of tokens to generate
56
55
  in the chat completion. The total length of input tokens and
57
56
  generated tokens is limited by the model's context length.
@@ -60,17 +59,17 @@ class SGLangConfig(BaseConfig):
60
59
  that the model can dynamically invoke. Each tool should be
61
60
  defined as a dictionary following OpenAI's function calling
62
61
  specification format. For more details, refer to the OpenAI
63
- documentation.
62
+ documentation. (default: :obj:`None`)
64
63
  """
65
64
 
66
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
67
- temperature: float = 1.0
68
- top_p: float = 1.0
69
- n: int = 1
70
- frequency_penalty: float = 0.0
71
- presence_penalty: float = 0.0
72
- stream: bool = False
73
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
65
+ stop: Optional[Union[str, Sequence[str]]] = None
66
+ temperature: Optional[float] = None
67
+ top_p: Optional[float] = None
68
+ n: Optional[int] = None
69
+ frequency_penalty: Optional[float] = None
70
+ presence_penalty: Optional[float] = None
71
+ stream: Optional[bool] = None
72
+ max_tokens: Optional[int] = None
74
73
  tools: Optional[Union[List[Dict[str, Any]]]] = None
75
74
 
76
75
 
@@ -13,12 +13,12 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Any, Sequence, Type, Union
16
+ from typing import Any, Optional, Sequence, Type, Union
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
21
+ from camel.types import NOT_GIVEN
22
22
 
23
23
 
24
24
  class SiliconFlowConfig(BaseConfig):
@@ -27,15 +27,16 @@ class SiliconFlowConfig(BaseConfig):
27
27
 
28
28
  Args:
29
29
  temperature (float, optional): Determines the degree of randomness
30
- in the response. (default: :obj:`0.7`)
30
+ in the response. (default: :obj:`None`)
31
31
  top_p (float, optional): The top_p (nucleus) parameter is used to
32
32
  dynamically adjust the number of choices for each predicted token
33
- based on the cumulative probabilities. (default: :obj:`0.7`)
34
- n (int, optional): Number of generations to return. (default::obj:`1`)
33
+ based on the cumulative probabilities. (default: :obj:`None`)
34
+ n (int, optional): Number of generations to return.
35
+ (default: :obj:`None`)
35
36
  response_format (object, optional): An object specifying the format
36
- that the model must output.
37
+ that the model must output. (default: :obj:`None`)
37
38
  stream (bool, optional): If set, tokens are returned as Server-Sent
38
- Events as they are made available. (default: :obj:`False`)
39
+ Events as they are made available. (default: :obj:`None`)
39
40
  stop (str or list, optional): Up to :obj:`4` sequences where the API
40
41
  will stop generating further tokens. (default: :obj:`None`)
41
42
  max_tokens (int, optional): The maximum number of tokens to generate.
@@ -44,21 +45,21 @@ class SiliconFlowConfig(BaseConfig):
44
45
  :obj:`2.0`. Positive values penalize new tokens based on their
45
46
  existing frequency in the text so far, decreasing the model's
46
47
  likelihood to repeat the same line verbatim. See more information
47
- about frequency and presence penalties. (default: :obj:`0.0`)
48
+ about frequency and presence penalties. (default: :obj:`None`)
48
49
  tools (list[FunctionTool], optional): A list of tools the model may
49
50
  call. Currently, only functions are supported as a tool. Use this
50
51
  to provide a list of functions the model may generate JSON inputs
51
- for. A max of 128 functions are supported.
52
+ for. A max of 128 functions are supported. (default: :obj:`None`)
52
53
  """
53
54
 
54
- temperature: float = 0.7
55
- top_p: float = 0.7
56
- n: int = 1
57
- stream: bool = False
58
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
59
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
60
- response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
61
- frequency_penalty: float = 0.0
55
+ temperature: Optional[float] = None
56
+ top_p: Optional[float] = None
57
+ n: Optional[int] = None
58
+ stream: Optional[bool] = None
59
+ stop: Optional[Union[str, Sequence[str]]] = None
60
+ max_tokens: Optional[int] = None
61
+ response_format: Optional[Union[Type[BaseModel], dict]] = None
62
+ frequency_penalty: Optional[float] = None
62
63
 
63
64
  def as_dict(self) -> dict[str, Any]:
64
65
  r"""Convert the current configuration to a dictionary.
@@ -13,12 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Any, Sequence, Union
16
+ from typing import Any, Optional, Sequence, Union
17
17
 
18
18
  from pydantic import Field
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  class TogetherAIConfig(BaseConfig):
@@ -29,14 +28,14 @@ class TogetherAIConfig(BaseConfig):
29
28
  temperature (float, optional): Sampling temperature to use, between
30
29
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
30
  while lower values make it more focused and deterministic.
32
- (default: :obj:`0.2`)
31
+ (default: :obj:`None`)
33
32
  top_p (float, optional): An alternative to sampling with temperature,
34
33
  called nucleus sampling, where the model considers the results of
35
34
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
35
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj:`1.0`)
36
+ (default: :obj:`None`)
38
37
  n (int, optional): How many chat completion choices to generate for
39
- each input message. (default: :obj:`1`)
38
+ each input message. (default: :obj:`None`)
40
39
  response_format (object, optional): An object specifying the format
41
40
  that the model must output. Compatible with GPT-4 Turbo and all
42
41
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -52,7 +51,7 @@ class TogetherAIConfig(BaseConfig):
52
51
  max context length.
53
52
  stream (bool, optional): If True, partial message deltas will be sent
54
53
  as data-only server-sent events as they become available.
55
- (default: :obj:`False`)
54
+ (default: :obj:`None`)
56
55
  stop (str or list, optional): Up to :obj:`4` sequences where the API
57
56
  will stop generating further tokens. (default: :obj:`None`)
58
57
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -63,12 +62,12 @@ class TogetherAIConfig(BaseConfig):
63
62
  :obj:`2.0`. Positive values penalize new tokens based on whether
64
63
  they appear in the text so far, increasing the model's likelihood
65
64
  to talk about new topics. See more information about frequency and
66
- presence penalties. (default: :obj:`0.0`)
65
+ presence penalties. (default: :obj:`None`)
67
66
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
67
  :obj:`2.0`. Positive values penalize new tokens based on their
69
68
  existing frequency in the text so far, decreasing the model's
70
69
  likelihood to repeat the same line verbatim. See more information
71
- about frequency and presence penalties. (default: :obj:`0.0`)
70
+ about frequency and presence penalties. (default: :obj:`None`)
72
71
  logit_bias (dict, optional): Modify the likelihood of specified tokens
73
72
  appearing in the completion. Accepts a json object that maps tokens
74
73
  (specified by their token ID in the tokenizer) to an associated
@@ -80,20 +79,20 @@ class TogetherAIConfig(BaseConfig):
80
79
  exclusive selection of the relevant token. (default: :obj:`{}`)
81
80
  user (str, optional): A unique identifier representing your end-user,
82
81
  which can help OpenAI to monitor and detect abuse.
83
- (default: :obj:`""`)
82
+ (default: :obj:`None`)
84
83
  """
85
84
 
86
- temperature: float = 0.2 # openai default: 1.0
87
- top_p: float = 1.0
88
- n: int = 1
89
- stream: bool = False
90
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
91
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
92
- presence_penalty: float = 0.0
93
- response_format: Union[dict, NotGiven] = NOT_GIVEN
94
- frequency_penalty: float = 0.0
85
+ temperature: Optional[float] = None # openai default: 1.0
86
+ top_p: Optional[float] = None
87
+ n: Optional[int] = None
88
+ stream: Optional[bool] = None
89
+ stop: Optional[Union[str, Sequence[str]]] = None
90
+ max_tokens: Optional[int] = None
91
+ presence_penalty: Optional[float] = None
92
+ response_format: Optional[dict] = None
93
+ frequency_penalty: Optional[float] = None
95
94
  logit_bias: dict = Field(default_factory=dict)
96
- user: str = ""
95
+ user: Optional[str] = None
97
96
 
98
97
  def as_dict(self) -> dict[str, Any]:
99
98
  config_dict = super().as_dict()