camel-ai 0.2.36__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (40) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/aiml_config.py +20 -19
  5. camel/configs/anthropic_config.py +25 -27
  6. camel/configs/cohere_config.py +11 -10
  7. camel/configs/deepseek_config.py +16 -16
  8. camel/configs/gemini_config.py +8 -8
  9. camel/configs/groq_config.py +18 -19
  10. camel/configs/internlm_config.py +8 -8
  11. camel/configs/litellm_config.py +26 -24
  12. camel/configs/mistral_config.py +8 -8
  13. camel/configs/moonshot_config.py +11 -11
  14. camel/configs/nvidia_config.py +13 -13
  15. camel/configs/ollama_config.py +14 -15
  16. camel/configs/openai_config.py +3 -3
  17. camel/configs/openrouter_config.py +9 -9
  18. camel/configs/qwen_config.py +8 -8
  19. camel/configs/reka_config.py +12 -11
  20. camel/configs/samba_config.py +14 -14
  21. camel/configs/sglang_config.py +15 -16
  22. camel/configs/siliconflow_config.py +18 -17
  23. camel/configs/togetherai_config.py +18 -19
  24. camel/configs/vllm_config.py +18 -19
  25. camel/configs/yi_config.py +7 -8
  26. camel/configs/zhipuai_config.py +8 -9
  27. camel/datasets/static_dataset.py +25 -23
  28. camel/environments/models.py +3 -0
  29. camel/environments/single_step.py +222 -136
  30. camel/extractors/__init__.py +16 -1
  31. camel/toolkits/__init__.py +2 -0
  32. camel/toolkits/thinking_toolkit.py +74 -0
  33. camel/types/enums.py +3 -0
  34. camel/utils/chunker/code_chunker.py +9 -15
  35. camel/verifiers/base.py +28 -5
  36. camel/verifiers/python_verifier.py +313 -68
  37. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/METADATA +52 -5
  38. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/RECORD +40 -38
  39. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
  40. {camel_ai-0.2.36.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -13,12 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Sequence, Type, Union
16
+ from typing import Optional, Sequence, Type, Union
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  class OllamaConfig(BaseConfig):
@@ -31,12 +30,12 @@ class OllamaConfig(BaseConfig):
31
30
  temperature (float, optional): Sampling temperature to use, between
32
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
33
32
  while lower values make it more focused and deterministic.
34
- (default: :obj:`0.2`)
33
+ (default: :obj:`None`)
35
34
  top_p (float, optional): An alternative to sampling with temperature,
36
35
  called nucleus sampling, where the model considers the results of
37
36
  the tokens with top_p probability mass. So :obj:`0.1` means only
38
37
  the tokens comprising the top 10% probability mass are considered.
39
- (default: :obj:`1.0`)
38
+ (default: :obj:`None`)
40
39
  response_format (object, optional): An object specifying the format
41
40
  that the model must output. Compatible with GPT-4 Turbo and all
42
41
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -52,7 +51,7 @@ class OllamaConfig(BaseConfig):
52
51
  max context length.
53
52
  stream (bool, optional): If True, partial message deltas will be sent
54
53
  as data-only server-sent events as they become available.
55
- (default: :obj:`False`)
54
+ (default: :obj:`None`)
56
55
  stop (str or list, optional): Up to :obj:`4` sequences where the API
57
56
  will stop generating further tokens. (default: :obj:`None`)
58
57
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -63,22 +62,22 @@ class OllamaConfig(BaseConfig):
63
62
  :obj:`2.0`. Positive values penalize new tokens based on whether
64
63
  they appear in the text so far, increasing the model's likelihood
65
64
  to talk about new topics. See more information about frequency and
66
- presence penalties. (default: :obj:`0.0`)
65
+ presence penalties. (default: :obj:`None`)
67
66
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
67
  :obj:`2.0`. Positive values penalize new tokens based on their
69
68
  existing frequency in the text so far, decreasing the model's
70
69
  likelihood to repeat the same line verbatim. See more information
71
- about frequency and presence penalties. (default: :obj:`0.0`)
70
+ about frequency and presence penalties. (default: :obj:`None`)
72
71
  """
73
72
 
74
- temperature: float = 0.2
75
- top_p: float = 1.0
76
- stream: bool = False
77
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
78
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
79
- presence_penalty: float = 0.0
80
- response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
81
- frequency_penalty: float = 0.0
73
+ temperature: Optional[float] = None
74
+ top_p: Optional[float] = None
75
+ stream: Optional[bool] = None
76
+ stop: Optional[Union[str, Sequence[str]]] = None
77
+ max_tokens: Optional[int] = None
78
+ presence_penalty: Optional[float] = None
79
+ response_format: Optional[Union[Type[BaseModel], dict]] = None
80
+ frequency_penalty: Optional[float] = None
82
81
 
83
82
 
84
83
  OLLAMA_API_PARAMS = {param for param in OllamaConfig.model_fields.keys()}
@@ -79,7 +79,7 @@ class ChatGPTConfig(BaseConfig):
79
79
  exclusive selection of the relevant token. (default: :obj:`None`)
80
80
  user (str, optional): A unique identifier representing your end-user,
81
81
  which can help OpenAI to monitor and detect abuse.
82
- (default: :obj:`""`)
82
+ (default: :obj:`None`)
83
83
  tools (list[FunctionTool], optional): A list of tools the model may
84
84
  call. Currently, only functions are supported as a tool. Use this
85
85
  to provide a list of functions the model may generate JSON inputs
@@ -102,8 +102,8 @@ class ChatGPTConfig(BaseConfig):
102
102
  or if the model type does not support it, this parameter is
103
103
  ignored. (default: :obj:`None`)
104
104
  parallel_tool_calls (bool, optional): A parameter specifying whether
105
- the model should call tools in parallel or not. (default:
106
- :obj:`None`)
105
+ the model should call tools in parallel or not.
106
+ (default: :obj:`None`)
107
107
  """
108
108
 
109
109
  temperature: Optional[float] = None
@@ -29,14 +29,14 @@ class OpenRouterConfig(BaseConfig):
29
29
  temperature (float, optional): Sampling temperature to use, between
30
30
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
31
  while lower values make it more focused and deterministic.
32
- (default: :obj: `None`)
32
+ (default: :obj:`None`)
33
33
  top_p (float, optional): An alternative to sampling with temperature,
34
34
  called nucleus sampling, where the model considers the results of
35
35
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
36
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj: `None`)
37
+ (default: :obj:`None`)
38
38
  n (int, optional): How many chat completion choices to generate for
39
- each input message. (default: :obj: `None`)
39
+ each input message. (default: :obj:`None`)
40
40
  response_format (object, optional): An object specifying the format
41
41
  that the model must output. Compatible with GPT-4 Turbo and all
42
42
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -52,7 +52,7 @@ class OpenRouterConfig(BaseConfig):
52
52
  max context length.
53
53
  stream (bool, optional): If True, partial message deltas will be sent
54
54
  as data-only server-sent events as they become available.
55
- (default: :obj: `None`)
55
+ (default: :obj:`None`)
56
56
  stop (str or list, optional): Up to :obj:`4` sequences where the API
57
57
  will stop generating further tokens. (default: :obj:`None`)
58
58
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -63,19 +63,19 @@ class OpenRouterConfig(BaseConfig):
63
63
  :obj:`2.0`. Positive values penalize new tokens based on whether
64
64
  they appear in the text so far, increasing the model's likelihood
65
65
  to talk about new topics. See more information about frequency and
66
- presence penalties. (default: :obj: `None`)
66
+ presence penalties. (default: :obj:`None`)
67
67
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
68
  :obj:`2.0`. Positive values penalize new tokens based on their
69
69
  existing frequency in the text so far, decreasing the model's
70
70
  likelihood to repeat the same line verbatim. See more information
71
- about frequency and presence penalties. (default: :obj: `None`)
71
+ about frequency and presence penalties. (default: :obj:`None`)
72
72
  user (str, optional): A unique identifier representing your end-user,
73
73
  which can help OpenAI to monitor and detect abuse.
74
- (default: :obj: `None`)
74
+ (default: :obj:`None`)
75
75
  tools (list[FunctionTool], optional): A list of tools the model may
76
76
  call. Currently, only functions are supported as a tool. Use this
77
77
  to provide a list of functions the model may generate JSON inputs
78
- for. A max of 128 functions are supported. (default: :obj: `None`)
78
+ for. A max of 128 functions are supported. (default: :obj:`None`)
79
79
  tool_choice (Union[dict[str, str], str], optional): Controls which (if
80
80
  any) tool is called by the model. :obj:`"none"` means the model
81
81
  will not call any tool and instead generates a message.
@@ -85,7 +85,7 @@ class OpenRouterConfig(BaseConfig):
85
85
  via {"type": "function", "function": {"name": "my_function"}}
86
86
  forces the model to call that tool. :obj:`"none"` is the default
87
87
  when no tools are present. :obj:`"auto"` is the default if tools
88
- are present. (default: :obj: `None`)
88
+ are present. (default: :obj:`None`)
89
89
  """
90
90
 
91
91
  temperature: Optional[float] = None
@@ -25,18 +25,18 @@ class QwenConfig(BaseConfig):
25
25
 
26
26
  Args:
27
27
  stream (bool, optional): Whether to stream the response.
28
- (default: :obj:`False`)
28
+ (default: :obj:`None`)
29
29
  temperature (float, optional): Controls the diversity and
30
30
  focus of the generated results. Lower values make the output more
31
31
  focused, while higher values make it more diverse.
32
- (default: :obj:`0.3`)
32
+ (default: :obj:`None`)
33
33
  top_p (float, optional): Controls the diversity and focus of
34
34
  the generated results. Higher values make the output more diverse,
35
35
  while lower values make it more focused. (default: :obj:`0.9`)
36
36
  presence_penalty (float, optional): Controls the repetition
37
37
  content in the generated results. Positive values reduce the
38
38
  repetition of content, while negative values increase it.
39
- (default: :obj:`0.0`)
39
+ (default: :obj:`None`)
40
40
  response_format (Optional[Dict[str, str]], optional): Specifies the
41
41
  format of the returned content. The available values are
42
42
  `{"type": "text"}` or `{"type": "json_object"}`. Setting it to
@@ -67,13 +67,13 @@ class QwenConfig(BaseConfig):
67
67
  (default: :obj:`None`)
68
68
  include_usage (bool, optional): When streaming, specifies whether to
69
69
  include usage information in `stream_options`.
70
- (default: :obj:`True`)
70
+ (default: :obj:`None`)
71
71
  """
72
72
 
73
- stream: bool = False
74
- temperature: float = 0.3
75
- top_p: float = 0.9
76
- presence_penalty: float = 0.0
73
+ stream: Optional[bool] = None
74
+ temperature: Optional[float] = None
75
+ top_p: Optional[float] = None
76
+ presence_penalty: Optional[float] = None
77
77
  response_format: Optional[Dict[str, str]] = None
78
78
  max_tokens: Optional[int] = None
79
79
  seed: Optional[int] = None
@@ -26,32 +26,33 @@ class RekaConfig(BaseConfig):
26
26
 
27
27
  Args:
28
28
  temperature (Optional[float], optional): temperature the temperature
29
- to use for sampling, e.g. 0.5.
29
+ to use for sampling, e.g. 0.5. (default: :obj:`None`)
30
30
  top_p (Optional[float], optional): the cumulative probability of
31
- tokens to generate, e.g. 0.9. Defaults to None.
31
+ tokens to generate, e.g. 0.9. (default: :obj:`None`)
32
32
  top_k (Optional[int], optional): Parameter which forces the model to
33
33
  only consider the tokens with the `top_k` highest probabilities at
34
- the next step. Defaults to 1024.
34
+ the next step. (default: :obj:`None`)
35
35
  max_tokens (Optional[int], optional): the maximum number of tokens to
36
- generate, e.g. 100. Defaults to None.
36
+ generate, e.g. 100. (default: :obj:`None`)
37
37
  stop (Optional[Union[str,list[str]]]): Stop generation if this token
38
38
  is detected. Or if one of these tokens is detected when providing
39
- a string list.
39
+ a string list. (default: :obj:`None`)
40
40
  seed (Optional[int], optional): the random seed to use for sampling, e.
41
- g. 42. Defaults to None.
41
+ g. 42. (default: :obj:`None`)
42
42
  presence_penalty (float, optional): Number between :obj:`-2.0` and
43
43
  :obj:`2.0`. Positive values penalize new tokens based on whether
44
44
  they appear in the text so far, increasing the model's likelihood
45
45
  to talk about new topics. See more information about frequency and
46
- presence penalties. (default: :obj:`0.0`)
46
+ presence penalties. (default: :obj:`None`)
47
47
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
48
48
  :obj:`2.0`. Positive values penalize new tokens based on their
49
49
  existing frequency in the text so far, decreasing the model's
50
50
  likelihood to repeat the same line verbatim. See more information
51
- about frequency and presence penalties. (default: :obj:`0.0`)
51
+ about frequency and presence penalties. (default: :obj:`None`)
52
52
  use_search_engine (Optional[bool]): Whether to consider using search
53
53
  engine to complete the request. Note that even if this is set to
54
54
  `True`, the model might decide to not use search.
55
+ (default: :obj:`None`)
55
56
  """
56
57
 
57
58
  temperature: Optional[float] = None
@@ -60,9 +61,9 @@ class RekaConfig(BaseConfig):
60
61
  max_tokens: Optional[int] = None
61
62
  stop: Optional[Union[str, list[str]]] = None
62
63
  seed: Optional[int] = None
63
- frequency_penalty: float = 0.0
64
- presence_penalty: float = 0.0
65
- use_search_engine: Optional[bool] = False
64
+ frequency_penalty: Optional[float] = None
65
+ presence_penalty: Optional[float] = None
66
+ use_search_engine: Optional[bool] = None
66
67
 
67
68
  def as_dict(self) -> dict[str, Any]:
68
69
  config_dict = super().as_dict()
@@ -29,39 +29,39 @@ class SambaVerseAPIConfig(BaseConfig):
29
29
  temperature (float, optional): Sampling temperature to use, between
30
30
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
31
  while lower values make it more focused and deterministic.
32
- (default: :obj:`0.7`)
32
+ (default: :obj:`None`)
33
33
  top_p (float, optional): An alternative to sampling with temperature,
34
34
  called nucleus sampling, where the model considers the results of
35
35
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
36
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj:`0.95`)
37
+ (default: :obj:`None`)
38
38
  top_k (int, optional): Only sample from the top K options for each
39
39
  subsequent token. Used to remove "long tail" low probability
40
40
  responses.
41
- (default: :obj:`50`)
41
+ (default: :obj:`None`)
42
42
  max_tokens (Optional[int], optional): The maximum number of tokens to
43
43
  generate, e.g. 100.
44
- (default: :obj:`2048`)
44
+ (default: :obj:`None`)
45
45
  repetition_penalty (Optional[float], optional): The parameter for
46
46
  repetition penalty. 1.0 means no penalty.
47
- (default: :obj:`1.0`)
47
+ (default: :obj:`None`)
48
48
  stop (Optional[Union[str,list[str]]]): Stop generation if this token
49
49
  is detected. Or if one of these tokens is detected when providing
50
50
  a string list.
51
- (default: :obj:`""`)
51
+ (default: :obj:`None`)
52
52
  stream (Optional[bool]): If True, partial message deltas will be sent
53
53
  as data-only server-sent events as they become available.
54
54
  Currently SambaVerse API doesn't support stream mode.
55
- (default: :obj:`False`)
55
+ (default: :obj:`None`)
56
56
  """
57
57
 
58
- temperature: Optional[float] = 0.7
59
- top_p: Optional[float] = 0.95
60
- top_k: Optional[int] = 50
61
- max_tokens: Optional[int] = 2048
62
- repetition_penalty: Optional[float] = 1.0
63
- stop: Optional[Union[str, list[str]]] = ""
64
- stream: Optional[bool] = False
58
+ temperature: Optional[float] = None
59
+ top_p: Optional[float] = None
60
+ top_k: Optional[int] = None
61
+ max_tokens: Optional[int] = None
62
+ repetition_penalty: Optional[float] = None
63
+ stop: Optional[Union[str, list[str]]] = None
64
+ stream: Optional[bool] = None
65
65
 
66
66
  def as_dict(self) -> dict[str, Any]:
67
67
  config_dict = super().as_dict()
@@ -16,7 +16,6 @@ from __future__ import annotations
16
16
  from typing import Any, Dict, List, Optional, Sequence, Union
17
17
 
18
18
  from camel.configs.base_config import BaseConfig
19
- from camel.types import NOT_GIVEN, NotGiven
20
19
 
21
20
 
22
21
  class SGLangConfig(BaseConfig):
@@ -31,27 +30,27 @@ class SGLangConfig(BaseConfig):
31
30
  temperature (float, optional): Sampling temperature to use, between
32
31
  :obj:`0` and :obj:`2`. Higher values make the output more random,
33
32
  while lower values make it more focused and deterministic.
34
- (default: :obj:`1.0`)
33
+ (default: :obj:`None`)
35
34
  top_p (float, optional): An alternative to sampling with temperature,
36
35
  called nucleus sampling, where the model considers the results of
37
36
  the tokens with top_p probability mass. So :obj:`0.1` means only
38
37
  the tokens comprising the top 10% probability mass are considered.
39
- (default: :obj:`1.0`)
38
+ (default: :obj:`None`)
40
39
  n (int, optional): How many chat completion choices to generate for
41
- each input message. (default: :obj:`1`)
40
+ each input message. (default: :obj:`None`)
42
41
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
43
42
  :obj:`2.0`. Positive values penalize new tokens based on their
44
43
  existing frequency in the text so far, decreasing the model's
45
44
  likelihood to repeat the same line verbatim. See more information
46
- about frequency and presence penalties. (default: :obj:`0.0`)
45
+ about frequency and presence penalties. (default: :obj:`None`)
47
46
  presence_penalty (float, optional): Number between :obj:`-2.0` and
48
47
  :obj:`2.0`. Positive values penalize new tokens based on whether
49
48
  they appear in the text so far, increasing the model's likelihood
50
49
  to talk about new topics. See more information about frequency and
51
- presence penalties. (default: :obj:`0.0`)
50
+ presence penalties. (default: :obj:`None`)
52
51
  stream (bool, optional): Whether to stream the generated output in
53
52
  chunks. If set to `True`, the response will be streamed as it is
54
- generated. (default: :obj:`False`)
53
+ generated. (default: :obj:`None`)
55
54
  max_tokens (int, optional): The maximum number of tokens to generate
56
55
  in the chat completion. The total length of input tokens and
57
56
  generated tokens is limited by the model's context length.
@@ -60,17 +59,17 @@ class SGLangConfig(BaseConfig):
60
59
  that the model can dynamically invoke. Each tool should be
61
60
  defined as a dictionary following OpenAI's function calling
62
61
  specification format. For more details, refer to the OpenAI
63
- documentation.
62
+ documentation. (default: :obj:`None`)
64
63
  """
65
64
 
66
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
67
- temperature: float = 1.0
68
- top_p: float = 1.0
69
- n: int = 1
70
- frequency_penalty: float = 0.0
71
- presence_penalty: float = 0.0
72
- stream: bool = False
73
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
65
+ stop: Optional[Union[str, Sequence[str]]] = None
66
+ temperature: Optional[float] = None
67
+ top_p: Optional[float] = None
68
+ n: Optional[int] = None
69
+ frequency_penalty: Optional[float] = None
70
+ presence_penalty: Optional[float] = None
71
+ stream: Optional[bool] = None
72
+ max_tokens: Optional[int] = None
74
73
  tools: Optional[Union[List[Dict[str, Any]]]] = None
75
74
 
76
75
 
@@ -13,12 +13,12 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Any, Sequence, Type, Union
16
+ from typing import Any, Optional, Sequence, Type, Union
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
21
+ from camel.types import NOT_GIVEN
22
22
 
23
23
 
24
24
  class SiliconFlowConfig(BaseConfig):
@@ -27,15 +27,16 @@ class SiliconFlowConfig(BaseConfig):
27
27
 
28
28
  Args:
29
29
  temperature (float, optional): Determines the degree of randomness
30
- in the response. (default: :obj:`0.7`)
30
+ in the response. (default: :obj:`None`)
31
31
  top_p (float, optional): The top_p (nucleus) parameter is used to
32
32
  dynamically adjust the number of choices for each predicted token
33
- based on the cumulative probabilities. (default: :obj:`0.7`)
34
- n (int, optional): Number of generations to return. (default::obj:`1`)
33
+ based on the cumulative probabilities. (default: :obj:`None`)
34
+ n (int, optional): Number of generations to return.
35
+ (default: :obj:`None`)
35
36
  response_format (object, optional): An object specifying the format
36
- that the model must output.
37
+ that the model must output. (default: :obj:`None`)
37
38
  stream (bool, optional): If set, tokens are returned as Server-Sent
38
- Events as they are made available. (default: :obj:`False`)
39
+ Events as they are made available. (default: :obj:`None`)
39
40
  stop (str or list, optional): Up to :obj:`4` sequences where the API
40
41
  will stop generating further tokens. (default: :obj:`None`)
41
42
  max_tokens (int, optional): The maximum number of tokens to generate.
@@ -44,21 +45,21 @@ class SiliconFlowConfig(BaseConfig):
44
45
  :obj:`2.0`. Positive values penalize new tokens based on their
45
46
  existing frequency in the text so far, decreasing the model's
46
47
  likelihood to repeat the same line verbatim. See more information
47
- about frequency and presence penalties. (default: :obj:`0.0`)
48
+ about frequency and presence penalties. (default: :obj:`None`)
48
49
  tools (list[FunctionTool], optional): A list of tools the model may
49
50
  call. Currently, only functions are supported as a tool. Use this
50
51
  to provide a list of functions the model may generate JSON inputs
51
- for. A max of 128 functions are supported.
52
+ for. A max of 128 functions are supported. (default: :obj:`None`)
52
53
  """
53
54
 
54
- temperature: float = 0.7
55
- top_p: float = 0.7
56
- n: int = 1
57
- stream: bool = False
58
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
59
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
60
- response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
61
- frequency_penalty: float = 0.0
55
+ temperature: Optional[float] = None
56
+ top_p: Optional[float] = None
57
+ n: Optional[int] = None
58
+ stream: Optional[bool] = None
59
+ stop: Optional[Union[str, Sequence[str]]] = None
60
+ max_tokens: Optional[int] = None
61
+ response_format: Optional[Union[Type[BaseModel], dict]] = None
62
+ frequency_penalty: Optional[float] = None
62
63
 
63
64
  def as_dict(self) -> dict[str, Any]:
64
65
  r"""Convert the current configuration to a dictionary.
@@ -13,12 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Any, Sequence, Union
16
+ from typing import Any, Optional, Sequence, Union
17
17
 
18
18
  from pydantic import Field
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  class TogetherAIConfig(BaseConfig):
@@ -29,14 +28,14 @@ class TogetherAIConfig(BaseConfig):
29
28
  temperature (float, optional): Sampling temperature to use, between
30
29
  :obj:`0` and :obj:`2`. Higher values make the output more random,
31
30
  while lower values make it more focused and deterministic.
32
- (default: :obj:`0.2`)
31
+ (default: :obj:`None`)
33
32
  top_p (float, optional): An alternative to sampling with temperature,
34
33
  called nucleus sampling, where the model considers the results of
35
34
  the tokens with top_p probability mass. So :obj:`0.1` means only
36
35
  the tokens comprising the top 10% probability mass are considered.
37
- (default: :obj:`1.0`)
36
+ (default: :obj:`None`)
38
37
  n (int, optional): How many chat completion choices to generate for
39
- each input message. (default: :obj:`1`)
38
+ each input message. (default: :obj:`None`)
40
39
  response_format (object, optional): An object specifying the format
41
40
  that the model must output. Compatible with GPT-4 Turbo and all
42
41
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -52,7 +51,7 @@ class TogetherAIConfig(BaseConfig):
52
51
  max context length.
53
52
  stream (bool, optional): If True, partial message deltas will be sent
54
53
  as data-only server-sent events as they become available.
55
- (default: :obj:`False`)
54
+ (default: :obj:`None`)
56
55
  stop (str or list, optional): Up to :obj:`4` sequences where the API
57
56
  will stop generating further tokens. (default: :obj:`None`)
58
57
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -63,12 +62,12 @@ class TogetherAIConfig(BaseConfig):
63
62
  :obj:`2.0`. Positive values penalize new tokens based on whether
64
63
  they appear in the text so far, increasing the model's likelihood
65
64
  to talk about new topics. See more information about frequency and
66
- presence penalties. (default: :obj:`0.0`)
65
+ presence penalties. (default: :obj:`None`)
67
66
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
68
67
  :obj:`2.0`. Positive values penalize new tokens based on their
69
68
  existing frequency in the text so far, decreasing the model's
70
69
  likelihood to repeat the same line verbatim. See more information
71
- about frequency and presence penalties. (default: :obj:`0.0`)
70
+ about frequency and presence penalties. (default: :obj:`None`)
72
71
  logit_bias (dict, optional): Modify the likelihood of specified tokens
73
72
  appearing in the completion. Accepts a json object that maps tokens
74
73
  (specified by their token ID in the tokenizer) to an associated
@@ -80,20 +79,20 @@ class TogetherAIConfig(BaseConfig):
80
79
  exclusive selection of the relevant token. (default: :obj:`{}`)
81
80
  user (str, optional): A unique identifier representing your end-user,
82
81
  which can help OpenAI to monitor and detect abuse.
83
- (default: :obj:`""`)
82
+ (default: :obj:`None`)
84
83
  """
85
84
 
86
- temperature: float = 0.2 # openai default: 1.0
87
- top_p: float = 1.0
88
- n: int = 1
89
- stream: bool = False
90
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
91
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
92
- presence_penalty: float = 0.0
93
- response_format: Union[dict, NotGiven] = NOT_GIVEN
94
- frequency_penalty: float = 0.0
85
+ temperature: Optional[float] = None # openai default: 1.0
86
+ top_p: Optional[float] = None
87
+ n: Optional[int] = None
88
+ stream: Optional[bool] = None
89
+ stop: Optional[Union[str, Sequence[str]]] = None
90
+ max_tokens: Optional[int] = None
91
+ presence_penalty: Optional[float] = None
92
+ response_format: Optional[dict] = None
93
+ frequency_penalty: Optional[float] = None
95
94
  logit_bias: dict = Field(default_factory=dict)
96
- user: str = ""
95
+ user: Optional[str] = None
97
96
 
98
97
  def as_dict(self) -> dict[str, Any]:
99
98
  config_dict = super().as_dict()
@@ -18,7 +18,6 @@ from typing import Optional, Sequence, Union
18
18
  from pydantic import Field
19
19
 
20
20
  from camel.configs.base_config import BaseConfig
21
- from camel.types import NOT_GIVEN, NotGiven
22
21
 
23
22
 
24
23
  # flake8: noqa: E501
@@ -32,14 +31,14 @@ class VLLMConfig(BaseConfig):
32
31
  temperature (float, optional): Sampling temperature to use, between
33
32
  :obj:`0` and :obj:`2`. Higher values make the output more random,
34
33
  while lower values make it more focused and deterministic.
35
- (default: :obj:`0.2`)
34
+ (default: :obj:`None`)
36
35
  top_p (float, optional): An alternative to sampling with temperature,
37
36
  called nucleus sampling, where the model considers the results of
38
37
  the tokens with top_p probability mass. So :obj:`0.1` means only
39
38
  the tokens comprising the top 10% probability mass are considered.
40
- (default: :obj:`1.0`)
39
+ (default: :obj:`None`)
41
40
  n (int, optional): How many chat completion choices to generate for
42
- each input message. (default: :obj:`1`)
41
+ each input message. (default: :obj:`None`)
43
42
  response_format (object, optional): An object specifying the format
44
43
  that the model must output. Compatible with GPT-4 Turbo and all
45
44
  GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -55,7 +54,7 @@ class VLLMConfig(BaseConfig):
55
54
  max context length.
56
55
  stream (bool, optional): If True, partial message deltas will be sent
57
56
  as data-only server-sent events as they become available.
58
- (default: :obj:`False`)
57
+ (default: :obj:`None`)
59
58
  stop (str or list, optional): Up to :obj:`4` sequences where the API
60
59
  will stop generating further tokens. (default: :obj:`None`)
61
60
  max_tokens (int, optional): The maximum number of tokens to generate
@@ -66,12 +65,12 @@ class VLLMConfig(BaseConfig):
66
65
  :obj:`2.0`. Positive values penalize new tokens based on whether
67
66
  they appear in the text so far, increasing the model's likelihood
68
67
  to talk about new topics. See more information about frequency and
69
- presence penalties. (default: :obj:`0.0`)
68
+ presence penalties. (default: :obj:`None`)
70
69
  frequency_penalty (float, optional): Number between :obj:`-2.0` and
71
70
  :obj:`2.0`. Positive values penalize new tokens based on their
72
71
  existing frequency in the text so far, decreasing the model's
73
72
  likelihood to repeat the same line verbatim. See more information
74
- about frequency and presence penalties. (default: :obj:`0.0`)
73
+ about frequency and presence penalties. (default: :obj:`None`)
75
74
  logit_bias (dict, optional): Modify the likelihood of specified tokens
76
75
  appearing in the completion. Accepts a json object that maps tokens
77
76
  (specified by their token ID in the tokenizer) to an associated
@@ -80,10 +79,10 @@ class VLLMConfig(BaseConfig):
80
79
  The exact effect will vary per model, but values between:obj:` -1`
81
80
  and :obj:`1` should decrease or increase likelihood of selection;
82
81
  values like :obj:`-100` or :obj:`100` should result in a ban or
83
- exclusive selection of the relevant token. (default: :obj:`{}`)
82
+ exclusive selection of the relevant token. (default: :obj:`None`)
84
83
  user (str, optional): A unique identifier representing your end-user,
85
84
  which can help OpenAI to monitor and detect abuse.
86
- (default: :obj:`""`)
85
+ (default: :obj:`None`)
87
86
  logprobs: Whether to return log probabilities of the output tokens or
88
87
  not. If true, returns the log probabilities of each output token
89
88
  returned in the `logits` of `message`. (default: :obj:`None`)
@@ -93,17 +92,17 @@ class VLLMConfig(BaseConfig):
93
92
  this parameter is used. (default: :obj:`None`)
94
93
  """
95
94
 
96
- temperature: float = 0.2 # openai default: 1.0
97
- top_p: float = 1.0
98
- n: int = 1
99
- stream: bool = False
100
- stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
101
- max_tokens: Union[int, NotGiven] = NOT_GIVEN
102
- presence_penalty: float = 0.0
103
- response_format: Union[dict, NotGiven] = NOT_GIVEN
104
- frequency_penalty: float = 0.0
95
+ temperature: Optional[float] = None # openai default: 1.0
96
+ top_p: Optional[float] = None
97
+ n: Optional[int] = None
98
+ stream: Optional[bool] = None
99
+ stop: Optional[Union[str, Sequence[str]]] = None
100
+ max_tokens: Optional[int] = None
101
+ presence_penalty: Optional[float] = None
102
+ response_format: Optional[dict] = None
103
+ frequency_penalty: Optional[float] = None
105
104
  logit_bias: dict = Field(default_factory=dict)
106
- user: str = ""
105
+ user: Optional[str] = None
107
106
  logprobs: Optional[bool] = None
108
107
  top_logprobs: Optional[int] = None
109
108