camel-ai 0.2.36__py3-none-any.whl → 0.2.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/__init__.py +2 -0
- camel/agents/repo_agent.py +579 -0
- camel/configs/aiml_config.py +20 -19
- camel/configs/anthropic_config.py +25 -27
- camel/configs/cohere_config.py +11 -10
- camel/configs/deepseek_config.py +16 -16
- camel/configs/gemini_config.py +8 -8
- camel/configs/groq_config.py +18 -19
- camel/configs/internlm_config.py +8 -8
- camel/configs/litellm_config.py +26 -24
- camel/configs/mistral_config.py +8 -8
- camel/configs/moonshot_config.py +11 -11
- camel/configs/nvidia_config.py +13 -13
- camel/configs/ollama_config.py +14 -15
- camel/configs/openai_config.py +3 -3
- camel/configs/openrouter_config.py +9 -9
- camel/configs/qwen_config.py +8 -8
- camel/configs/reka_config.py +12 -11
- camel/configs/samba_config.py +14 -14
- camel/configs/sglang_config.py +15 -16
- camel/configs/siliconflow_config.py +18 -17
- camel/configs/togetherai_config.py +18 -19
- camel/configs/vllm_config.py +18 -19
- camel/configs/yi_config.py +7 -8
- camel/configs/zhipuai_config.py +8 -9
- camel/datagen/evol_instruct/__init__.py +20 -0
- camel/datagen/evol_instruct/evol_instruct.py +424 -0
- camel/datagen/evol_instruct/scorer.py +166 -0
- camel/datagen/evol_instruct/templates.py +268 -0
- camel/datasets/static_dataset.py +25 -23
- camel/environments/models.py +10 -1
- camel/environments/single_step.py +296 -136
- camel/extractors/__init__.py +16 -1
- camel/interpreters/docker_interpreter.py +1 -1
- camel/interpreters/e2b_interpreter.py +1 -1
- camel/interpreters/subprocess_interpreter.py +1 -1
- camel/loaders/__init__.py +2 -2
- camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
- camel/memories/context_creators/score_based.py +198 -67
- camel/models/aiml_model.py +9 -3
- camel/models/anthropic_model.py +11 -3
- camel/models/azure_openai_model.py +9 -3
- camel/models/base_audio_model.py +6 -0
- camel/models/base_model.py +4 -0
- camel/models/deepseek_model.py +9 -3
- camel/models/gemini_model.py +9 -3
- camel/models/groq_model.py +9 -3
- camel/models/internlm_model.py +8 -2
- camel/models/model_factory.py +4 -0
- camel/models/moonshot_model.py +8 -2
- camel/models/nemotron_model.py +9 -3
- camel/models/nvidia_model.py +9 -3
- camel/models/ollama_model.py +9 -3
- camel/models/openai_audio_models.py +5 -3
- camel/models/openai_compatible_model.py +9 -3
- camel/models/openai_model.py +9 -3
- camel/models/openrouter_model.py +9 -3
- camel/models/qwen_model.py +9 -3
- camel/models/samba_model.py +9 -3
- camel/models/sglang_model.py +11 -4
- camel/models/siliconflow_model.py +8 -2
- camel/models/stub_model.py +2 -1
- camel/models/togetherai_model.py +9 -3
- camel/models/vllm_model.py +9 -3
- camel/models/yi_model.py +9 -3
- camel/models/zhipuai_model.py +9 -3
- camel/retrievers/auto_retriever.py +14 -0
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/tidb.py +332 -0
- camel/toolkits/__init__.py +7 -0
- camel/toolkits/browser_toolkit.py +84 -61
- camel/toolkits/openai_agent_toolkit.py +131 -0
- camel/toolkits/searxng_toolkit.py +207 -0
- camel/toolkits/thinking_toolkit.py +230 -0
- camel/types/enums.py +4 -0
- camel/utils/chunker/code_chunker.py +9 -15
- camel/verifiers/base.py +28 -5
- camel/verifiers/python_verifier.py +321 -68
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/METADATA +103 -8
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/RECORD +84 -75
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/licenses/LICENSE +0 -0
|
@@ -13,10 +13,9 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, List, Optional
|
|
17
17
|
|
|
18
18
|
from camel.configs.base_config import BaseConfig
|
|
19
|
-
from camel.types import NotGiven
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
class AnthropicConfig(BaseConfig):
|
|
@@ -29,59 +28,58 @@ class AnthropicConfig(BaseConfig):
|
|
|
29
28
|
generate before stopping. Note that Anthropic models may stop
|
|
30
29
|
before reaching this maximum. This parameter only specifies the
|
|
31
30
|
absolute maximum number of tokens to generate.
|
|
32
|
-
(default: :obj:`
|
|
31
|
+
(default: :obj:`None`)
|
|
33
32
|
stop_sequences (List[str], optional): Custom text sequences that will
|
|
34
33
|
cause the model to stop generating. The models will normally stop
|
|
35
34
|
when they have naturally completed their turn. If the model
|
|
36
35
|
encounters one of these custom sequences, the response will be
|
|
37
36
|
terminated and the stop_reason will be "stop_sequence".
|
|
38
|
-
(default: :obj:`
|
|
37
|
+
(default: :obj:`None`)
|
|
39
38
|
temperature (float, optional): Amount of randomness injected into the
|
|
40
39
|
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
|
|
41
40
|
for analytical / multiple choice, and closer to 1 for creative
|
|
42
41
|
and generative tasks. Note that even with temperature of 0.0, the
|
|
43
|
-
results will not be fully deterministic. (default: :obj:`
|
|
42
|
+
results will not be fully deterministic. (default: :obj:`None`)
|
|
44
43
|
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
|
|
45
44
|
compute the cumulative distribution over all the options for each
|
|
46
45
|
subsequent token in decreasing probability order and cut it off
|
|
47
46
|
once it reaches a particular probability specified by `top_p`.
|
|
48
47
|
You should either alter `temperature` or `top_p`,
|
|
49
|
-
but not both. (default: :obj:`
|
|
48
|
+
but not both. (default: :obj:`None`)
|
|
50
49
|
top_k (int, optional): Only sample from the top K options for each
|
|
51
50
|
subsequent token. Used to remove "long tail" low probability
|
|
52
|
-
responses. (default: :obj:`
|
|
51
|
+
responses. (default: :obj:`None`)
|
|
53
52
|
stream (bool, optional): Whether to incrementally stream the response
|
|
54
|
-
using server-sent events. (default: :obj:`
|
|
55
|
-
metadata (
|
|
53
|
+
using server-sent events. (default: :obj:`None`)
|
|
54
|
+
metadata (dict, optional): An object describing
|
|
56
55
|
metadata about the request. Can include user_id as an external
|
|
57
56
|
identifier for the user associated with the request.
|
|
58
|
-
(default: :obj:`
|
|
59
|
-
thinking (
|
|
57
|
+
(default: :obj:`None`)
|
|
58
|
+
thinking (dict, optional): Configuration for enabling
|
|
60
59
|
Claude's extended thinking. When enabled, responses include
|
|
61
60
|
thinking content blocks showing Claude's thinking process.
|
|
62
|
-
(default: :obj:`
|
|
63
|
-
tool_choice (
|
|
61
|
+
(default: :obj:`None`)
|
|
62
|
+
tool_choice (dict, optional): How the model should
|
|
64
63
|
use the provided tools. The model can use a specific tool, any
|
|
65
64
|
available tool, decide by itself, or not use tools at all.
|
|
66
|
-
(default: :obj:`
|
|
65
|
+
(default: :obj:`None`)
|
|
67
66
|
"""
|
|
68
67
|
|
|
69
|
-
max_tokens: int =
|
|
70
|
-
stop_sequences:
|
|
71
|
-
temperature: float =
|
|
72
|
-
top_p:
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
68
|
+
max_tokens: Optional[int] = None
|
|
69
|
+
stop_sequences: Optional[List[str]] = None
|
|
70
|
+
temperature: Optional[float] = None
|
|
71
|
+
top_p: Optional[float] = None
|
|
72
|
+
top_k: Optional[int] = None
|
|
73
|
+
stream: Optional[bool] = None
|
|
74
|
+
metadata: Optional[dict] = None
|
|
75
|
+
thinking: Optional[dict] = None
|
|
76
|
+
tool_choice: Optional[dict] = None
|
|
77
77
|
|
|
78
78
|
def as_dict(self) -> dict[str, Any]:
|
|
79
79
|
config_dict = super().as_dict()
|
|
80
80
|
# Create a list of keys to remove to avoid modifying dict
|
|
81
81
|
keys_to_remove = [
|
|
82
|
-
key
|
|
83
|
-
for key, value in config_dict.items()
|
|
84
|
-
if isinstance(value, NotGiven)
|
|
82
|
+
key for key, value in config_dict.items() if value is None
|
|
85
83
|
]
|
|
86
84
|
|
|
87
85
|
for key in keys_to_remove:
|
|
@@ -89,8 +87,8 @@ class AnthropicConfig(BaseConfig):
|
|
|
89
87
|
|
|
90
88
|
# remove some keys if thinking is enabled
|
|
91
89
|
thinking_enabled = (
|
|
92
|
-
|
|
93
|
-
and self.thinking
|
|
90
|
+
self.thinking is not None
|
|
91
|
+
and self.thinking.get("type") == "enabled"
|
|
94
92
|
)
|
|
95
93
|
if thinking_enabled:
|
|
96
94
|
# `top_p`, `top_k`, `temperature` must be unset when thinking is
|
camel/configs/cohere_config.py
CHANGED
|
@@ -26,7 +26,7 @@ class CohereConfig(BaseConfig):
|
|
|
26
26
|
temperature (float, optional): Sampling temperature to use, between
|
|
27
27
|
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
28
28
|
while lower values make it more focused and deterministic.
|
|
29
|
-
(default: :obj:`
|
|
29
|
+
(default: :obj:`None`)
|
|
30
30
|
documents (list, optional): A list of relevant documents that the
|
|
31
31
|
model can cite to generate a more accurate reply. Each document is
|
|
32
32
|
either a string or document object with content and metadata.
|
|
@@ -47,30 +47,31 @@ class CohereConfig(BaseConfig):
|
|
|
47
47
|
`1.0`. Used to reduce repetitiveness of generated tokens. The
|
|
48
48
|
higher the value, the stronger a penalty is applied to previously
|
|
49
49
|
present tokens, proportional to how many times they have already
|
|
50
|
-
appeared in the prompt or prior generation.
|
|
50
|
+
appeared in the prompt or prior generation.
|
|
51
|
+
(default: :obj:`None`)
|
|
51
52
|
presence_penalty (float, optional): Min value of `0.0`, max value of
|
|
52
53
|
`1.0`. Used to reduce repetitiveness of generated tokens. Similar
|
|
53
54
|
to `frequency_penalty`, except that this penalty is applied
|
|
54
55
|
equally to all tokens that have already appeared, regardless of
|
|
55
|
-
their exact frequencies. (default: :obj:`
|
|
56
|
+
their exact frequencies. (default: :obj:`None`)
|
|
56
57
|
k (int, optional): Ensures only the top k most likely tokens are
|
|
57
58
|
considered for generation at each step. Min value of `0`, max
|
|
58
|
-
value of `500`. (default: :obj:`
|
|
59
|
+
value of `500`. (default: :obj:`None`)
|
|
59
60
|
p (float, optional): Ensures that only the most likely tokens, with
|
|
60
61
|
total probability mass of `p`, are considered for generation at
|
|
61
62
|
each step. If both k and p are enabled, `p` acts after `k`. Min
|
|
62
|
-
value of `0.01`, max value of `0.99`. (default: :obj:`
|
|
63
|
+
value of `0.01`, max value of `0.99`. (default: :obj:`None`)
|
|
63
64
|
"""
|
|
64
65
|
|
|
65
|
-
temperature: Optional[float] =
|
|
66
|
+
temperature: Optional[float] = None
|
|
66
67
|
documents: Optional[list] = None
|
|
67
68
|
max_tokens: Optional[int] = None
|
|
68
69
|
stop_sequences: Optional[List[str]] = None
|
|
69
70
|
seed: Optional[int] = None
|
|
70
|
-
frequency_penalty: Optional[float] =
|
|
71
|
-
presence_penalty: Optional[float] =
|
|
72
|
-
k: Optional[int] =
|
|
73
|
-
p: Optional[float] =
|
|
71
|
+
frequency_penalty: Optional[float] = None
|
|
72
|
+
presence_penalty: Optional[float] = None
|
|
73
|
+
k: Optional[int] = None
|
|
74
|
+
p: Optional[float] = None
|
|
74
75
|
|
|
75
76
|
|
|
76
77
|
COHERE_API_PARAMS = {param for param in CohereConfig().model_fields.keys()}
|
camel/configs/deepseek_config.py
CHANGED
|
@@ -29,19 +29,19 @@ class DeepSeekConfig(BaseConfig):
|
|
|
29
29
|
temperature (float, optional): Sampling temperature to use, between
|
|
30
30
|
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
31
31
|
while lower values make it more focused and deterministic.
|
|
32
|
-
(default: :obj:`
|
|
32
|
+
(default: :obj:`None`)
|
|
33
33
|
top_p (float, optional): Controls the diversity and focus of the
|
|
34
34
|
generated results. Higher values make the output more diverse,
|
|
35
|
-
while lower values make it more focused. (default: :obj:`
|
|
35
|
+
while lower values make it more focused. (default: :obj:`None`)
|
|
36
36
|
response_format (object, optional): Specifies the format of the
|
|
37
37
|
returned content. The available values are `{"type": "text"}` or
|
|
38
38
|
`{"type": "json_object"}`. Setting it to `{"type": "json_object"}`
|
|
39
39
|
will output a standard JSON string.
|
|
40
|
-
(default: :obj:`
|
|
40
|
+
(default: :obj:`None`)
|
|
41
41
|
stream (bool, optional): If set, partial message deltas will be sent.
|
|
42
42
|
Tokens will be sent as data-only server-sent events (SSE) as
|
|
43
43
|
they become available, with the stream terminated by a
|
|
44
|
-
data: [DONE] message. (default: :obj:`
|
|
44
|
+
data: [DONE] message. (default: :obj:`None`)
|
|
45
45
|
stop (Union[str, list[str]], optional): Up to 16 sequences where
|
|
46
46
|
the API will stop generating further tokens. (default: :obj:`None`)
|
|
47
47
|
max_tokens (int, optional): The maximum number of tokens that can
|
|
@@ -51,11 +51,11 @@ class DeepSeekConfig(BaseConfig):
|
|
|
51
51
|
presence_penalty (float, optional): Number between -2.0 and 2.0.
|
|
52
52
|
Positive values penalize new tokens based on whether they
|
|
53
53
|
appear in the text so far, increasing the model's likelihood
|
|
54
|
-
to talk about new topics. (default: :obj:`
|
|
54
|
+
to talk about new topics. (default: :obj:`None`)
|
|
55
55
|
frequency_penalty (float, optional): Number between -2.0 and 2.0.
|
|
56
56
|
Positive values penalize new tokens based on their existing
|
|
57
57
|
frequency in the text so far, decreasing the model's likelihood
|
|
58
|
-
to repeat the same line verbatim. (default: :obj:`
|
|
58
|
+
to repeat the same line verbatim. (default: :obj:`None`)
|
|
59
59
|
tools (list[FunctionTool], optional): A list of tools the model may
|
|
60
60
|
call. Currently, only functions are supported as a tool. Use
|
|
61
61
|
this to provide a list of functions the model may generate JSON
|
|
@@ -70,31 +70,31 @@ class DeepSeekConfig(BaseConfig):
|
|
|
70
70
|
{"type": "function", "function": {"name": "my_function"}} forces
|
|
71
71
|
the model to call that tool. "none" is the default when no tools
|
|
72
72
|
are present. "auto" is the default if tools are present.
|
|
73
|
-
(default: :obj:`
|
|
73
|
+
(default: :obj:`None`)
|
|
74
74
|
logprobs (bool, optional): Whether to return log probabilities of
|
|
75
75
|
the output tokens or not. If true, returns the log probabilities
|
|
76
76
|
of each output token returned in the content of message.
|
|
77
|
-
(default: :obj:`
|
|
77
|
+
(default: :obj:`None`)
|
|
78
78
|
top_logprobs (int, optional): An integer between 0 and 20 specifying
|
|
79
79
|
the number of most likely tokens to return at each token
|
|
80
80
|
position, each with an associated log probability. logprobs
|
|
81
81
|
must be set to true if this parameter is used.
|
|
82
82
|
(default: :obj:`None`)
|
|
83
83
|
include_usage (bool, optional): When streaming, specifies whether to
|
|
84
|
-
include usage information in `stream_options`.
|
|
85
|
-
:obj:`
|
|
84
|
+
include usage information in `stream_options`.
|
|
85
|
+
(default: :obj:`None`)
|
|
86
86
|
"""
|
|
87
87
|
|
|
88
|
-
temperature: float =
|
|
89
|
-
top_p: float =
|
|
90
|
-
stream: bool =
|
|
88
|
+
temperature: Optional[float] = None # deepseek default: 1.0
|
|
89
|
+
top_p: Optional[float] = None
|
|
90
|
+
stream: Optional[bool] = None
|
|
91
91
|
stop: Optional[Union[str, Sequence[str]]] = None
|
|
92
92
|
max_tokens: Optional[int] = None
|
|
93
|
-
presence_penalty: float =
|
|
93
|
+
presence_penalty: Optional[float] = None
|
|
94
94
|
response_format: Optional[Union[Type[BaseModel], dict]] = None
|
|
95
|
-
frequency_penalty: float =
|
|
95
|
+
frequency_penalty: Optional[float] = None
|
|
96
96
|
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
97
|
-
logprobs: bool =
|
|
97
|
+
logprobs: Optional[bool] = None
|
|
98
98
|
top_logprobs: Optional[int] = None
|
|
99
99
|
|
|
100
100
|
def __init__(self, include_usage: bool = True, **kwargs):
|
camel/configs/gemini_config.py
CHANGED
|
@@ -29,14 +29,14 @@ class GeminiConfig(BaseConfig):
|
|
|
29
29
|
temperature (float, optional): Sampling temperature to use, between
|
|
30
30
|
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
31
31
|
while lower values make it more focused and deterministic.
|
|
32
|
-
(default: :obj:`
|
|
32
|
+
(default: :obj:`None`)
|
|
33
33
|
top_p (float, optional): An alternative to sampling with temperature,
|
|
34
34
|
called nucleus sampling, where the model considers the results of
|
|
35
35
|
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
36
36
|
the tokens comprising the top 10% probability mass are considered.
|
|
37
|
-
(default: :obj:`
|
|
37
|
+
(default: :obj:`None`)
|
|
38
38
|
n (int, optional): How many chat completion choices to generate for
|
|
39
|
-
each input message. (default: :obj:`
|
|
39
|
+
each input message. (default: :obj:`None`)
|
|
40
40
|
response_format (object, optional): An object specifying the format
|
|
41
41
|
that the model must output. Compatible with GPT-4 Turbo and all
|
|
42
42
|
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
@@ -52,7 +52,7 @@ class GeminiConfig(BaseConfig):
|
|
|
52
52
|
max context length.
|
|
53
53
|
stream (bool, optional): If True, partial message deltas will be sent
|
|
54
54
|
as data-only server-sent events as they become available.
|
|
55
|
-
(default: :obj:`
|
|
55
|
+
(default: :obj:`None`)
|
|
56
56
|
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
57
57
|
will stop generating further tokens. (default: :obj:`None`)
|
|
58
58
|
max_tokens (int, optional): The maximum number of tokens to generate
|
|
@@ -75,10 +75,10 @@ class GeminiConfig(BaseConfig):
|
|
|
75
75
|
are present.
|
|
76
76
|
"""
|
|
77
77
|
|
|
78
|
-
temperature: float =
|
|
79
|
-
top_p: float =
|
|
80
|
-
n: int =
|
|
81
|
-
stream: bool =
|
|
78
|
+
temperature: Optional[float] = None # openai default: 1.0
|
|
79
|
+
top_p: Optional[float] = None
|
|
80
|
+
n: Optional[int] = None
|
|
81
|
+
stream: Optional[bool] = None
|
|
82
82
|
stop: Optional[Union[str, Sequence[str]]] = None
|
|
83
83
|
max_tokens: Optional[int] = None
|
|
84
84
|
response_format: Optional[Union[Type[BaseModel], dict]] = None
|
camel/configs/groq_config.py
CHANGED
|
@@ -16,7 +16,6 @@ from __future__ import annotations
|
|
|
16
16
|
from typing import Optional, Sequence, Union
|
|
17
17
|
|
|
18
18
|
from camel.configs.base_config import BaseConfig
|
|
19
|
-
from camel.types import NOT_GIVEN, NotGiven
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
class GroqConfig(BaseConfig):
|
|
@@ -29,14 +28,14 @@ class GroqConfig(BaseConfig):
|
|
|
29
28
|
temperature (float, optional): Sampling temperature to use, between
|
|
30
29
|
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
31
30
|
while lower values make it more focused and deterministic.
|
|
32
|
-
(default: :obj:`
|
|
31
|
+
(default: :obj:`None`)
|
|
33
32
|
top_p (float, optional): An alternative to sampling with temperature,
|
|
34
33
|
called nucleus sampling, where the model considers the results of
|
|
35
34
|
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
36
35
|
the tokens comprising the top 10% probability mass are considered.
|
|
37
|
-
(default: :obj:`
|
|
36
|
+
(default: :obj:`None`)
|
|
38
37
|
n (int, optional): How many chat completion choices to generate for
|
|
39
|
-
each input message. (default: :obj:`
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
40
39
|
response_format (object, optional): An object specifying the format
|
|
41
40
|
that the model must output. Compatible with GPT-4 Turbo and all
|
|
42
41
|
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
@@ -52,7 +51,7 @@ class GroqConfig(BaseConfig):
|
|
|
52
51
|
max context length.
|
|
53
52
|
stream (bool, optional): If True, partial message deltas will be sent
|
|
54
53
|
as data-only server-sent events as they become available.
|
|
55
|
-
(default: :obj:`
|
|
54
|
+
(default: :obj:`None`)
|
|
56
55
|
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
57
56
|
will stop generating further tokens. (default: :obj:`None`)
|
|
58
57
|
max_tokens (int, optional): The maximum number of tokens to generate
|
|
@@ -63,15 +62,15 @@ class GroqConfig(BaseConfig):
|
|
|
63
62
|
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
64
63
|
they appear in the text so far, increasing the model's likelihood
|
|
65
64
|
to talk about new topics. See more information about frequency and
|
|
66
|
-
presence penalties. (default: :obj:`
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
67
66
|
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
68
67
|
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
69
68
|
existing frequency in the text so far, decreasing the model's
|
|
70
69
|
likelihood to repeat the same line verbatim. See more information
|
|
71
|
-
about frequency and presence penalties. (default: :obj:`
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
72
71
|
user (str, optional): A unique identifier representing your end-user,
|
|
73
72
|
which can help OpenAI to monitor and detect abuse.
|
|
74
|
-
(default: :obj:`
|
|
73
|
+
(default: :obj:`None`)
|
|
75
74
|
tools (list[FunctionTool], optional): A list of tools the model may
|
|
76
75
|
call. Currently, only functions are supported as a tool. Use this
|
|
77
76
|
to provide a list of functions the model may generate JSON inputs
|
|
@@ -88,17 +87,17 @@ class GroqConfig(BaseConfig):
|
|
|
88
87
|
are present.
|
|
89
88
|
"""
|
|
90
89
|
|
|
91
|
-
temperature: float =
|
|
92
|
-
top_p: float =
|
|
93
|
-
n: int =
|
|
94
|
-
stream: bool =
|
|
95
|
-
stop: Union[str, Sequence[str]
|
|
96
|
-
max_tokens:
|
|
97
|
-
presence_penalty: float =
|
|
98
|
-
response_format:
|
|
99
|
-
frequency_penalty: float =
|
|
100
|
-
user: str =
|
|
101
|
-
tool_choice: Optional[Union[dict[str, str], str]] =
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
102
101
|
|
|
103
102
|
|
|
104
103
|
GROQ_API_PARAMS = {param for param in GroqConfig.model_fields.keys()}
|
camel/configs/internlm_config.py
CHANGED
|
@@ -24,16 +24,16 @@ class InternLMConfig(BaseConfig):
|
|
|
24
24
|
|
|
25
25
|
Args:
|
|
26
26
|
stream (bool, optional): Whether to stream the response.
|
|
27
|
-
(default: :obj:`
|
|
27
|
+
(default: :obj:`None`)
|
|
28
28
|
temperature (float, optional): Controls the diversity and focus of
|
|
29
29
|
the generated results. Lower values make the output more focused,
|
|
30
|
-
while higher values make it more diverse. (default: :obj:`
|
|
30
|
+
while higher values make it more diverse. (default: :obj:`None`)
|
|
31
31
|
top_p (float, optional): Controls the diversity and focus of the
|
|
32
32
|
generated results. Higher values make the output more diverse,
|
|
33
|
-
while lower values make it more focused. (default: :obj:`
|
|
34
|
-
max_tokens (
|
|
33
|
+
while lower values make it more focused. (default: :obj:`None`)
|
|
34
|
+
max_tokens (int, optional): Allows the model to
|
|
35
35
|
generate the maximum number of tokens.
|
|
36
|
-
(default: :obj:`
|
|
36
|
+
(default: :obj:`None`)
|
|
37
37
|
tools (list, optional): Specifies an array of tools that the model can
|
|
38
38
|
call. It can contain one or more tool objects. During a function
|
|
39
39
|
call process, the model will select one tool from the array.
|
|
@@ -50,9 +50,9 @@ class InternLMConfig(BaseConfig):
|
|
|
50
50
|
are present.
|
|
51
51
|
"""
|
|
52
52
|
|
|
53
|
-
stream: bool =
|
|
54
|
-
temperature: float =
|
|
55
|
-
top_p: float =
|
|
53
|
+
stream: Optional[bool] = None
|
|
54
|
+
temperature: Optional[float] = None
|
|
55
|
+
top_p: Optional[float] = None
|
|
56
56
|
max_tokens: Optional[int] = None
|
|
57
57
|
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
58
58
|
|
camel/configs/litellm_config.py
CHANGED
|
@@ -24,49 +24,51 @@ class LiteLLMConfig(BaseConfig):
|
|
|
24
24
|
|
|
25
25
|
Args:
|
|
26
26
|
timeout (Optional[Union[float, str]], optional): Request timeout.
|
|
27
|
-
(default: None)
|
|
27
|
+
(default: :obj:`None`)
|
|
28
28
|
temperature (Optional[float], optional): Temperature parameter for
|
|
29
|
-
controlling randomness. (default: None)
|
|
29
|
+
controlling randomness. (default: :obj:`None`)
|
|
30
30
|
top_p (Optional[float], optional): Top-p parameter for nucleus
|
|
31
|
-
sampling. (default: None)
|
|
31
|
+
sampling. (default: :obj:`None`)
|
|
32
32
|
n (Optional[int], optional): Number of completions to generate.
|
|
33
|
-
(default: None)
|
|
33
|
+
(default: :obj:`None`)
|
|
34
34
|
stream (Optional[bool], optional): Whether to return a streaming
|
|
35
|
-
response. (default: None)
|
|
35
|
+
response. (default: :obj:`None`)
|
|
36
36
|
stream_options (Optional[dict], optional): Options for the streaming
|
|
37
|
-
response. (default: None)
|
|
37
|
+
response. (default: :obj:`None`)
|
|
38
38
|
stop (Optional[Union[str, List[str]]], optional): Sequences where the
|
|
39
|
-
API will stop generating further tokens. (default: None)
|
|
39
|
+
API will stop generating further tokens. (default: :obj:`None`)
|
|
40
40
|
max_tokens (Optional[int], optional): Maximum number of tokens to
|
|
41
|
-
generate. (default: None)
|
|
41
|
+
generate. (default: :obj:`None`)
|
|
42
42
|
presence_penalty (Optional[float], optional): Penalize new tokens
|
|
43
|
-
based on their existence in the text so far. (default: None)
|
|
43
|
+
based on their existence in the text so far. (default: :obj:`None`)
|
|
44
44
|
frequency_penalty (Optional[float], optional): Penalize new tokens
|
|
45
|
-
based on their frequency in the text so far. (default: None)
|
|
45
|
+
based on their frequency in the text so far. (default: :obj:`None`)
|
|
46
46
|
logit_bias (Optional[dict], optional): Modify the probability of
|
|
47
|
-
specific tokens appearing in the completion. (default: None)
|
|
47
|
+
specific tokens appearing in the completion. (default: :obj:`None`)
|
|
48
48
|
user (Optional[str], optional): A unique identifier representing the
|
|
49
|
-
end-user. (default: None)
|
|
49
|
+
end-user. (default: :obj:`None`)
|
|
50
50
|
response_format (Optional[dict], optional): Response format
|
|
51
|
-
parameters. (default: None)
|
|
52
|
-
seed (Optional[int], optional): Random seed. (default: None)
|
|
53
|
-
tools (Optional[List], optional): List of tools. (default: None)
|
|
51
|
+
parameters. (default: :obj:`None`)
|
|
52
|
+
seed (Optional[int], optional): Random seed. (default: :obj:`None`)
|
|
53
|
+
tools (Optional[List], optional): List of tools. (default: :obj:`None`)
|
|
54
54
|
tool_choice (Optional[Union[str, dict]], optional): Tool choice
|
|
55
|
-
parameters. (default: None)
|
|
55
|
+
parameters. (default: :obj:`None`)
|
|
56
56
|
logprobs (Optional[bool], optional): Whether to return log
|
|
57
|
-
probabilities of the output tokens. (default: None)
|
|
57
|
+
probabilities of the output tokens. (default: :obj:`None`)
|
|
58
58
|
top_logprobs (Optional[int], optional): Number of most likely tokens
|
|
59
|
-
to return at each token position. (default: None)
|
|
60
|
-
deployment_id (Optional[str], optional): Deployment ID.
|
|
59
|
+
to return at each token position. (default: :obj:`None`)
|
|
60
|
+
deployment_id (Optional[str], optional): Deployment ID.
|
|
61
|
+
(default: :obj:`None`)
|
|
61
62
|
extra_headers (Optional[dict], optional): Additional headers for the
|
|
62
|
-
request. (default: None)
|
|
63
|
-
api_version (Optional[str], optional): API version.
|
|
63
|
+
request. (default: :obj:`None`)
|
|
64
|
+
api_version (Optional[str], optional): API version.
|
|
65
|
+
(default: :obj:`None`)
|
|
64
66
|
mock_response (Optional[str], optional): Mock completion response for
|
|
65
|
-
testing or debugging. (default: None)
|
|
67
|
+
testing or debugging. (default: :obj:`None`)
|
|
66
68
|
custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
|
|
67
|
-
provider. (default: None)
|
|
69
|
+
provider. (default: :obj:`None`)
|
|
68
70
|
max_retries (Optional[int], optional): Maximum number of retries.
|
|
69
|
-
(default: None)
|
|
71
|
+
(default: :obj:`None`)
|
|
70
72
|
"""
|
|
71
73
|
|
|
72
74
|
timeout: Optional[Union[float, str]] = None
|
camel/configs/mistral_config.py
CHANGED
|
@@ -30,18 +30,18 @@ class MistralConfig(BaseConfig):
|
|
|
30
30
|
|
|
31
31
|
Args:
|
|
32
32
|
temperature (Optional[float], optional): temperature the temperature
|
|
33
|
-
to use for sampling, e.g. 0.5.
|
|
33
|
+
to use for sampling, e.g. 0.5. (default: :obj:`None`)
|
|
34
34
|
top_p (Optional[float], optional): the cumulative probability of
|
|
35
|
-
tokens to generate, e.g. 0.9.
|
|
35
|
+
tokens to generate, e.g. 0.9. (default: :obj:`None`)
|
|
36
36
|
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
37
|
-
generate, e.g. 100.
|
|
37
|
+
generate, e.g. 100. (default: :obj:`None`)
|
|
38
38
|
stop (Optional[Union[str,list[str]]]): Stop generation if this token
|
|
39
39
|
is detected. Or if one of these tokens is detected when providing
|
|
40
|
-
a string list.
|
|
40
|
+
a string list. (default: :obj:`None`)
|
|
41
41
|
random_seed (Optional[int], optional): the random seed to use for
|
|
42
|
-
sampling, e.g. 42.
|
|
42
|
+
sampling, e.g. 42. (default: :obj:`None`)
|
|
43
43
|
safe_prompt (bool, optional): whether to use safe prompt, e.g. true.
|
|
44
|
-
|
|
44
|
+
(default: :obj:`None`)
|
|
45
45
|
response_format (Union[Dict[str, str], ResponseFormat): format of the
|
|
46
46
|
response.
|
|
47
47
|
tool_choice (str, optional): Controls which (if
|
|
@@ -58,9 +58,9 @@ class MistralConfig(BaseConfig):
|
|
|
58
58
|
max_tokens: Optional[int] = None
|
|
59
59
|
stop: Optional[Union[str, list[str]]] = None
|
|
60
60
|
random_seed: Optional[int] = None
|
|
61
|
-
safe_prompt: bool =
|
|
61
|
+
safe_prompt: Optional[bool] = None
|
|
62
62
|
response_format: Optional[Union[Dict[str, str], Any]] = None
|
|
63
|
-
tool_choice: Optional[str] =
|
|
63
|
+
tool_choice: Optional[str] = None
|
|
64
64
|
|
|
65
65
|
@field_validator("response_format", mode="before")
|
|
66
66
|
@classmethod
|
camel/configs/moonshot_config.py
CHANGED
|
@@ -25,7 +25,7 @@ class MoonshotConfig(BaseConfig):
|
|
|
25
25
|
Args:
|
|
26
26
|
temperature (float, optional): Controls randomness in the response.
|
|
27
27
|
Lower values make the output more focused and deterministic.
|
|
28
|
-
(default: :obj:`
|
|
28
|
+
(default: :obj:`None`)
|
|
29
29
|
max_tokens (int, optional): The maximum number of tokens to generate.
|
|
30
30
|
(default: :obj:`None`)
|
|
31
31
|
stream (bool, optional): Whether to stream the response.
|
|
@@ -35,28 +35,28 @@ class MoonshotConfig(BaseConfig):
|
|
|
35
35
|
type, function name, description, and parameters.
|
|
36
36
|
(default: :obj:`None`)
|
|
37
37
|
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
-
(default: :obj:`
|
|
38
|
+
(default: :obj:`None`)
|
|
39
39
|
n (int, optional): How many chat completion choices to generate for
|
|
40
|
-
each input message.
|
|
40
|
+
each input message.(default: :obj:`None`)
|
|
41
41
|
presence_penalty (float, optional): Penalty for new tokens based on
|
|
42
42
|
whether they appear in the text so far.
|
|
43
|
-
(default: :obj:`
|
|
43
|
+
(default: :obj:`None`)
|
|
44
44
|
frequency_penalty (float, optional): Penalty for new tokens based on
|
|
45
45
|
their frequency in the text so far.
|
|
46
|
-
(default: :obj:`
|
|
46
|
+
(default: :obj:`None`)
|
|
47
47
|
stop (Optional[Union[str, List[str]]], optional): Up to 4 sequences
|
|
48
48
|
where the API will stop generating further tokens.
|
|
49
49
|
(default: :obj:`None`)
|
|
50
50
|
"""
|
|
51
51
|
|
|
52
|
-
temperature: float =
|
|
52
|
+
temperature: Optional[float] = None
|
|
53
53
|
max_tokens: Optional[int] = None
|
|
54
|
-
stream: bool =
|
|
54
|
+
stream: Optional[bool] = None
|
|
55
55
|
tools: Optional[list] = None
|
|
56
|
-
top_p: float =
|
|
57
|
-
n: int =
|
|
58
|
-
presence_penalty: float =
|
|
59
|
-
frequency_penalty: float =
|
|
56
|
+
top_p: Optional[float] = None
|
|
57
|
+
n: Optional[int] = None
|
|
58
|
+
presence_penalty: Optional[float] = None
|
|
59
|
+
frequency_penalty: Optional[float] = None
|
|
60
60
|
stop: Optional[Union[str, List[str]]] = None
|
|
61
61
|
|
|
62
62
|
|
camel/configs/nvidia_config.py
CHANGED
|
@@ -18,7 +18,7 @@ from typing import List, Optional, Union
|
|
|
18
18
|
from pydantic import Field
|
|
19
19
|
|
|
20
20
|
from camel.configs.base_config import BaseConfig
|
|
21
|
-
from camel.types import
|
|
21
|
+
from camel.types import NotGiven
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
class NvidiaConfig(BaseConfig):
|
|
@@ -30,21 +30,21 @@ class NvidiaConfig(BaseConfig):
|
|
|
30
30
|
|
|
31
31
|
Args:
|
|
32
32
|
stream (bool, optional): Whether to stream the response.
|
|
33
|
-
(default: :obj:`
|
|
33
|
+
(default: :obj:`None`)
|
|
34
34
|
temperature (float, optional): Controls randomness in the response.
|
|
35
35
|
Higher values make output more random, lower values make it more
|
|
36
|
-
deterministic. Range: [0.0, 2.0]. (default: :obj:`
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
37
|
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
-
Range: [0.0, 1.0]. (default: :obj:`
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
39
|
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
40
|
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
-
(default: :obj:`
|
|
41
|
+
(default: :obj:`None`)
|
|
42
42
|
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
43
|
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
-
(default: :obj:`
|
|
44
|
+
(default: :obj:`None`)
|
|
45
45
|
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
46
|
to generate. If not provided, model will use its default maximum.
|
|
47
|
-
(default: :obj:`
|
|
47
|
+
(default: :obj:`None`)
|
|
48
48
|
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
49
|
(default: :obj:`None`)
|
|
50
50
|
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
@@ -56,12 +56,12 @@ class NvidiaConfig(BaseConfig):
|
|
|
56
56
|
(default: :obj:`None`)
|
|
57
57
|
"""
|
|
58
58
|
|
|
59
|
-
stream: bool = Field(default=
|
|
60
|
-
temperature: float = Field(default=
|
|
61
|
-
top_p: float = Field(default=
|
|
62
|
-
presence_penalty: float = Field(default=
|
|
63
|
-
frequency_penalty: float = Field(default=
|
|
64
|
-
max_tokens: Union[int, NotGiven] = Field(default=
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
65
|
seed: Optional[int] = Field(default=None)
|
|
66
66
|
tool_choice: Optional[str] = Field(default=None)
|
|
67
67
|
stop: Optional[List[str]] = Field(default=None)
|