camel-ai 0.1.6.6__py3-none-any.whl → 0.1.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +44 -9
- camel/agents/critic_agent.py +0 -1
- camel/configs/__init__.py +9 -0
- camel/configs/reka_config.py +74 -0
- camel/configs/samba_config.py +50 -0
- camel/configs/togetherai_config.py +107 -0
- camel/models/__init__.py +6 -0
- camel/models/groq_model.py +5 -5
- camel/models/litellm_model.py +1 -1
- camel/models/model_factory.py +12 -0
- camel/models/ollama_model.py +6 -4
- camel/models/openai_compatibility_model.py +3 -3
- camel/models/reka_model.py +232 -0
- camel/models/samba_model.py +291 -0
- camel/models/togetherai_model.py +148 -0
- camel/models/vllm_model.py +7 -5
- camel/models/zhipuai_model.py +2 -2
- camel/retrievers/auto_retriever.py +2 -27
- camel/societies/babyagi_playing.py +0 -3
- camel/societies/role_playing.py +18 -2
- camel/storages/object_storages/amazon_s3.py +12 -10
- camel/toolkits/__init__.py +3 -0
- camel/toolkits/linkedin_toolkit.py +230 -0
- camel/types/enums.py +64 -6
- camel/utils/__init__.py +2 -0
- camel/utils/commons.py +22 -0
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/METADATA +19 -10
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/RECORD +30 -23
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
|
+
import logging
|
|
17
18
|
from collections import defaultdict
|
|
18
19
|
from typing import (
|
|
19
20
|
TYPE_CHECKING,
|
|
@@ -61,6 +62,9 @@ if TYPE_CHECKING:
|
|
|
61
62
|
from camel.terminators import ResponseTerminator
|
|
62
63
|
from camel.toolkits import OpenAIFunction
|
|
63
64
|
|
|
65
|
+
|
|
66
|
+
logger = logging.getLogger(__name__)
|
|
67
|
+
|
|
64
68
|
# AgentOps decorator setting
|
|
65
69
|
try:
|
|
66
70
|
import os
|
|
@@ -112,9 +116,6 @@ class ChatAgent(BaseAgent):
|
|
|
112
116
|
model (BaseModelBackend, optional): The model backend to use for
|
|
113
117
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
114
118
|
`GPT_4O_MINI`)
|
|
115
|
-
api_key (str, optional): The API key for authenticating with the
|
|
116
|
-
LLM service. Only OpenAI and Anthropic model supported (default:
|
|
117
|
-
:obj:`None`)
|
|
118
119
|
memory (AgentMemory, optional): The agent memory for managing chat
|
|
119
120
|
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
|
|
120
121
|
(default: :obj:`None`)
|
|
@@ -138,7 +139,6 @@ class ChatAgent(BaseAgent):
|
|
|
138
139
|
self,
|
|
139
140
|
system_message: BaseMessage,
|
|
140
141
|
model: Optional[BaseModelBackend] = None,
|
|
141
|
-
api_key: Optional[str] = None,
|
|
142
142
|
memory: Optional[AgentMemory] = None,
|
|
143
143
|
message_window_size: Optional[int] = None,
|
|
144
144
|
token_limit: Optional[int] = None,
|
|
@@ -150,7 +150,6 @@ class ChatAgent(BaseAgent):
|
|
|
150
150
|
self.system_message = system_message
|
|
151
151
|
self.role_name: str = system_message.role_name
|
|
152
152
|
self.role_type: RoleType = system_message.role_type
|
|
153
|
-
self._api_key = api_key
|
|
154
153
|
self.model_backend: BaseModelBackend = (
|
|
155
154
|
model
|
|
156
155
|
if model is not None
|
|
@@ -158,7 +157,6 @@ class ChatAgent(BaseAgent):
|
|
|
158
157
|
model_platform=ModelPlatformType.OPENAI,
|
|
159
158
|
model_type=ModelType.GPT_4O_MINI,
|
|
160
159
|
model_config_dict=ChatGPTConfig().as_dict(),
|
|
161
|
-
api_key=self._api_key,
|
|
162
160
|
)
|
|
163
161
|
)
|
|
164
162
|
self.output_language: Optional[str] = output_language
|
|
@@ -443,10 +441,22 @@ class ChatAgent(BaseAgent):
|
|
|
443
441
|
for base_message_item in output_messages:
|
|
444
442
|
base_message_item.content = str(info['tool_calls'][-1].result)
|
|
445
443
|
|
|
446
|
-
|
|
444
|
+
chat_agent_response = ChatAgentResponse(
|
|
447
445
|
msgs=output_messages, terminated=self.terminated, info=info
|
|
448
446
|
)
|
|
449
447
|
|
|
448
|
+
# If the output result is single message, it will be
|
|
449
|
+
# automatically added to the memory.
|
|
450
|
+
if len(chat_agent_response.msgs) == 1:
|
|
451
|
+
self.record_message(chat_agent_response.msg)
|
|
452
|
+
else:
|
|
453
|
+
logger.warning(
|
|
454
|
+
"Multiple messages are available in `ChatAgentResponse`. "
|
|
455
|
+
"Please manually run the `record_message` function to "
|
|
456
|
+
"record the selected message."
|
|
457
|
+
)
|
|
458
|
+
return chat_agent_response
|
|
459
|
+
|
|
450
460
|
async def step_async(
|
|
451
461
|
self,
|
|
452
462
|
input_message: BaseMessage,
|
|
@@ -569,10 +579,23 @@ class ChatAgent(BaseAgent):
|
|
|
569
579
|
for base_message_item in output_messages:
|
|
570
580
|
base_message_item.content = str(info['tool_calls'][0].result)
|
|
571
581
|
|
|
572
|
-
|
|
582
|
+
chat_agent_response = ChatAgentResponse(
|
|
573
583
|
msgs=output_messages, terminated=self.terminated, info=info
|
|
574
584
|
)
|
|
575
585
|
|
|
586
|
+
# If the output result is single message, it will be
|
|
587
|
+
# automatically added to the memory.
|
|
588
|
+
if len(chat_agent_response.msgs) == 1:
|
|
589
|
+
self.record_message(chat_agent_response.msg)
|
|
590
|
+
else:
|
|
591
|
+
logger.warning(
|
|
592
|
+
"Multiple messages are presented in `chat_agent_response`. "
|
|
593
|
+
"Please manually call the `record_message` function to "
|
|
594
|
+
"record the chosen message."
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
return chat_agent_response
|
|
598
|
+
|
|
576
599
|
def _add_tools_for_func_call(
|
|
577
600
|
self,
|
|
578
601
|
response: ChatCompletion,
|
|
@@ -742,7 +765,9 @@ class ChatAgent(BaseAgent):
|
|
|
742
765
|
str(choice.finish_reason) for choice in response.choices
|
|
743
766
|
]
|
|
744
767
|
usage = (
|
|
745
|
-
|
|
768
|
+
self._safe_model_dump(response.usage)
|
|
769
|
+
if response.usage is not None
|
|
770
|
+
else {}
|
|
746
771
|
)
|
|
747
772
|
return (
|
|
748
773
|
output_messages,
|
|
@@ -751,6 +776,16 @@ class ChatAgent(BaseAgent):
|
|
|
751
776
|
response.id,
|
|
752
777
|
)
|
|
753
778
|
|
|
779
|
+
def _safe_model_dump(self, obj):
|
|
780
|
+
# Check if the `model_dump` method exists (Pydantic v2)
|
|
781
|
+
if hasattr(obj, 'model_dump'):
|
|
782
|
+
return obj.model_dump()
|
|
783
|
+
# Fallback to `dict()` method (Pydantic v1)
|
|
784
|
+
elif hasattr(obj, 'dict'):
|
|
785
|
+
return obj.dict()
|
|
786
|
+
else:
|
|
787
|
+
raise TypeError("The object is not a Pydantic model")
|
|
788
|
+
|
|
754
789
|
def handle_stream_response(
|
|
755
790
|
self,
|
|
756
791
|
response: Stream[ChatCompletionChunk],
|
camel/agents/critic_agent.py
CHANGED
camel/configs/__init__.py
CHANGED
|
@@ -19,6 +19,9 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
|
19
19
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
20
20
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
21
21
|
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
|
|
22
|
+
from .reka_config import REKA_API_PARAMS, RekaConfig
|
|
23
|
+
from .samba_config import SAMBA_API_PARAMS, SambaConfig
|
|
24
|
+
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
|
22
25
|
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
|
23
26
|
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
|
24
27
|
|
|
@@ -43,4 +46,10 @@ __all__ = [
|
|
|
43
46
|
'VLLM_API_PARAMS',
|
|
44
47
|
'MistralConfig',
|
|
45
48
|
'MISTRAL_API_PARAMS',
|
|
49
|
+
'RekaConfig',
|
|
50
|
+
'REKA_API_PARAMS',
|
|
51
|
+
'SambaConfig',
|
|
52
|
+
'SAMBA_API_PARAMS',
|
|
53
|
+
'TogetherAIConfig',
|
|
54
|
+
'TOGETHERAI_API_PARAMS',
|
|
46
55
|
]
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Any, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RekaConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using the
|
|
23
|
+
Reka API.
|
|
24
|
+
|
|
25
|
+
Reference: https://docs.reka.ai/api-reference/chat/create
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (Optional[float], optional): temperature the temperature
|
|
29
|
+
to use for sampling, e.g. 0.5.
|
|
30
|
+
top_p (Optional[float], optional): the cumulative probability of
|
|
31
|
+
tokens to generate, e.g. 0.9. Defaults to None.
|
|
32
|
+
top_k (Optional[int], optional): Parameter which forces the model to
|
|
33
|
+
only consider the tokens with the `top_k` highest probabilities at
|
|
34
|
+
the next step. Defaults to 1024.
|
|
35
|
+
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
36
|
+
generate, e.g. 100. Defaults to None.
|
|
37
|
+
stop (Optional[Union[str,list[str]]]): Stop generation if this token
|
|
38
|
+
is detected. Or if one of these tokens is detected when providing
|
|
39
|
+
a string list.
|
|
40
|
+
seed (Optional[int], optional): the random seed to use for sampling, e.
|
|
41
|
+
g. 42. Defaults to None.
|
|
42
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
43
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
44
|
+
they appear in the text so far, increasing the model's likelihood
|
|
45
|
+
to talk about new topics. See more information about frequency and
|
|
46
|
+
presence penalties. (default: :obj:`0.0`)
|
|
47
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
48
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
49
|
+
existing frequency in the text so far, decreasing the model's
|
|
50
|
+
likelihood to repeat the same line verbatim. See more information
|
|
51
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
52
|
+
use_search_engine (Optional[bool]): Whether to consider using search
|
|
53
|
+
engine to complete the request. Note that even if this is set to
|
|
54
|
+
`True`, the model might decide to not use search.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
temperature: Optional[float] = None
|
|
58
|
+
top_p: Optional[float] = None
|
|
59
|
+
top_k: Optional[int] = None
|
|
60
|
+
max_tokens: Optional[int] = None
|
|
61
|
+
stop: Optional[Union[str, list[str]]] = None
|
|
62
|
+
seed: Optional[int] = None
|
|
63
|
+
frequency_penalty: float = 0.0
|
|
64
|
+
presence_penalty: float = 0.0
|
|
65
|
+
use_search_engine: Optional[bool] = False
|
|
66
|
+
|
|
67
|
+
def as_dict(self) -> dict[str, Any]:
|
|
68
|
+
config_dict = super().as_dict()
|
|
69
|
+
if "tools" in config_dict:
|
|
70
|
+
del config_dict["tools"] # Reka does not support tool calling
|
|
71
|
+
return config_dict
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
REKA_API_PARAMS = {param for param in RekaConfig().model_fields.keys()}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class SambaConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using the
|
|
23
|
+
SambaNova API.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
27
|
+
generate, e.g. 100. Defaults to `None`.
|
|
28
|
+
stop (Optional[Union[str,list[str]]]): Stop generation if this token
|
|
29
|
+
is detected. Or if one of these tokens is detected when providing
|
|
30
|
+
a string list. Defaults to `None`.
|
|
31
|
+
stream (Optional[bool]): If True, partial message deltas will be sent
|
|
32
|
+
as data-only server-sent events as they become available.
|
|
33
|
+
Currently SambaNova only support stream mode. Defaults to `True`.
|
|
34
|
+
stream_options (Optional[Dict]): Additional options for streaming.
|
|
35
|
+
Defaults to `{"include_usage": True}`.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
max_tokens: Optional[int] = None
|
|
39
|
+
stop: Optional[Union[str, list[str]]] = None
|
|
40
|
+
stream: Optional[bool] = True
|
|
41
|
+
stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
|
|
42
|
+
|
|
43
|
+
def as_dict(self) -> dict[str, Any]:
|
|
44
|
+
config_dict = super().as_dict()
|
|
45
|
+
if "tools" in config_dict:
|
|
46
|
+
del config_dict["tools"] # SambaNova does not support tool calling
|
|
47
|
+
return config_dict
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
SAMBA_API_PARAMS = {param for param in SambaConfig().model_fields.keys()}
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Any, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from openai._types import NOT_GIVEN, NotGiven
|
|
19
|
+
from pydantic import Field
|
|
20
|
+
|
|
21
|
+
from camel.configs.base_config import BaseConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TogetherAIConfig(BaseConfig):
|
|
25
|
+
r"""Defines the parameters for generating chat completions using the
|
|
26
|
+
OpenAI API.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
30
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
31
|
+
while lower values make it more focused and deterministic.
|
|
32
|
+
(default: :obj:`0.2`)
|
|
33
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
34
|
+
called nucleus sampling, where the model considers the results of
|
|
35
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
36
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
37
|
+
(default: :obj:`1.0`)
|
|
38
|
+
n (int, optional): How many chat completion choices to generate for
|
|
39
|
+
each input message. (default: :obj:`1`)
|
|
40
|
+
response_format (object, optional): An object specifying the format
|
|
41
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
42
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
43
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
44
|
+
message the model generates is valid JSON. Important: when using
|
|
45
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
46
|
+
yourself via a system or user message. Without this, the model
|
|
47
|
+
may generate an unending stream of whitespace until the generation
|
|
48
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
49
|
+
"stuck" request. Also note that the message content may be
|
|
50
|
+
partially cut off if finish_reason="length", which indicates the
|
|
51
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
52
|
+
max context length.
|
|
53
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
54
|
+
as data-only server-sent events as they become available.
|
|
55
|
+
(default: :obj:`False`)
|
|
56
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
57
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
58
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
59
|
+
in the chat completion. The total length of input tokens and
|
|
60
|
+
generated tokens is limited by the model's context length.
|
|
61
|
+
(default: :obj:`None`)
|
|
62
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
63
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
64
|
+
they appear in the text so far, increasing the model's likelihood
|
|
65
|
+
to talk about new topics. See more information about frequency and
|
|
66
|
+
presence penalties. (default: :obj:`0.0`)
|
|
67
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
68
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
69
|
+
existing frequency in the text so far, decreasing the model's
|
|
70
|
+
likelihood to repeat the same line verbatim. See more information
|
|
71
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
72
|
+
logit_bias (dict, optional): Modify the likelihood of specified tokens
|
|
73
|
+
appearing in the completion. Accepts a json object that maps tokens
|
|
74
|
+
(specified by their token ID in the tokenizer) to an associated
|
|
75
|
+
bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
|
|
76
|
+
is added to the logits generated by the model prior to sampling.
|
|
77
|
+
The exact effect will vary per model, but values between:obj:` -1`
|
|
78
|
+
and :obj:`1` should decrease or increase likelihood of selection;
|
|
79
|
+
values like :obj:`-100` or :obj:`100` should result in a ban or
|
|
80
|
+
exclusive selection of the relevant token. (default: :obj:`{}`)
|
|
81
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
82
|
+
which can help OpenAI to monitor and detect abuse.
|
|
83
|
+
(default: :obj:`""`)
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
temperature: float = 0.2 # openai default: 1.0
|
|
87
|
+
top_p: float = 1.0
|
|
88
|
+
n: int = 1
|
|
89
|
+
stream: bool = False
|
|
90
|
+
stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
|
|
91
|
+
max_tokens: Union[int, NotGiven] = NOT_GIVEN
|
|
92
|
+
presence_penalty: float = 0.0
|
|
93
|
+
response_format: Union[dict, NotGiven] = NOT_GIVEN
|
|
94
|
+
frequency_penalty: float = 0.0
|
|
95
|
+
logit_bias: dict = Field(default_factory=dict)
|
|
96
|
+
user: str = ""
|
|
97
|
+
|
|
98
|
+
def as_dict(self) -> dict[str, Any]:
|
|
99
|
+
config_dict = super().as_dict()
|
|
100
|
+
if "tools" in config_dict:
|
|
101
|
+
del config_dict["tools"] # Currently does not support tool calling
|
|
102
|
+
return config_dict
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
TOGETHERAI_API_PARAMS = {
|
|
106
|
+
param for param in TogetherAIConfig.model_fields.keys()
|
|
107
|
+
}
|
camel/models/__init__.py
CHANGED
|
@@ -25,7 +25,10 @@ from .open_source_model import OpenSourceModel
|
|
|
25
25
|
from .openai_audio_models import OpenAIAudioModels
|
|
26
26
|
from .openai_compatibility_model import OpenAICompatibilityModel
|
|
27
27
|
from .openai_model import OpenAIModel
|
|
28
|
+
from .reka_model import RekaModel
|
|
29
|
+
from .samba_model import SambaModel
|
|
28
30
|
from .stub_model import StubModel
|
|
31
|
+
from .togetherai_model import TogetherAIModel
|
|
29
32
|
from .vllm_model import VLLMModel
|
|
30
33
|
from .zhipuai_model import ZhipuAIModel
|
|
31
34
|
|
|
@@ -47,4 +50,7 @@ __all__ = [
|
|
|
47
50
|
'VLLMModel',
|
|
48
51
|
'GeminiModel',
|
|
49
52
|
'OpenAICompatibilityModel',
|
|
53
|
+
'RekaModel',
|
|
54
|
+
'SambaModel',
|
|
55
|
+
'TogetherAIModel',
|
|
50
56
|
]
|
camel/models/groq_model.py
CHANGED
|
@@ -51,21 +51,21 @@ class GroqModel(BaseModelBackend):
|
|
|
51
51
|
api_key (Optional[str]): The API key for authenticating with the
|
|
52
52
|
Groq service. (default: :obj:`None`).
|
|
53
53
|
url (Optional[str]): The url to the Groq service. (default:
|
|
54
|
-
:obj:`
|
|
54
|
+
:obj:`"https://api.groq.com/openai/v1"`)
|
|
55
55
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
56
56
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
57
|
-
|
|
57
|
+
GPT_4O_MINI)` will be used.
|
|
58
58
|
"""
|
|
59
59
|
super().__init__(
|
|
60
60
|
model_type, model_config_dict, api_key, url, token_counter
|
|
61
61
|
)
|
|
62
|
-
self._url = url or
|
|
62
|
+
self._url = url or os.environ.get("GROQ_API_BASE_URL")
|
|
63
63
|
self._api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
64
64
|
self._client = OpenAI(
|
|
65
65
|
timeout=60,
|
|
66
66
|
max_retries=3,
|
|
67
67
|
api_key=self._api_key,
|
|
68
|
-
base_url=self._url,
|
|
68
|
+
base_url=self._url or "https://api.groq.com/openai/v1",
|
|
69
69
|
)
|
|
70
70
|
self._token_counter = token_counter
|
|
71
71
|
|
|
@@ -80,7 +80,7 @@ class GroqModel(BaseModelBackend):
|
|
|
80
80
|
# Make sure you have the access to these open-source model in
|
|
81
81
|
# HuggingFace
|
|
82
82
|
if not self._token_counter:
|
|
83
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
83
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
84
84
|
return self._token_counter
|
|
85
85
|
|
|
86
86
|
@api_keys_required("GROQ_API_KEY")
|
camel/models/litellm_model.py
CHANGED
|
@@ -147,7 +147,7 @@ class LiteLLMModel:
|
|
|
147
147
|
|
|
148
148
|
@property
|
|
149
149
|
def token_limit(self) -> int:
|
|
150
|
-
"""Returns the maximum token limit for the given model.
|
|
150
|
+
r"""Returns the maximum token limit for the given model.
|
|
151
151
|
|
|
152
152
|
Returns:
|
|
153
153
|
int: The maximum token limit for the given model.
|
camel/models/model_factory.py
CHANGED
|
@@ -24,7 +24,10 @@ from camel.models.ollama_model import OllamaModel
|
|
|
24
24
|
from camel.models.open_source_model import OpenSourceModel
|
|
25
25
|
from camel.models.openai_compatibility_model import OpenAICompatibilityModel
|
|
26
26
|
from camel.models.openai_model import OpenAIModel
|
|
27
|
+
from camel.models.reka_model import RekaModel
|
|
28
|
+
from camel.models.samba_model import SambaModel
|
|
27
29
|
from camel.models.stub_model import StubModel
|
|
30
|
+
from camel.models.togetherai_model import TogetherAIModel
|
|
28
31
|
from camel.models.vllm_model import VLLMModel
|
|
29
32
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
30
33
|
from camel.types import ModelPlatformType, ModelType
|
|
@@ -91,6 +94,10 @@ class ModelFactory:
|
|
|
91
94
|
model_class = GeminiModel
|
|
92
95
|
elif model_platform.is_mistral and model_type.is_mistral:
|
|
93
96
|
model_class = MistralModel
|
|
97
|
+
elif model_platform.is_reka and model_type.is_reka:
|
|
98
|
+
model_class = RekaModel
|
|
99
|
+
elif model_platform.is_samba and model_type.is_samba:
|
|
100
|
+
model_class = SambaModel
|
|
94
101
|
elif model_type == ModelType.STUB:
|
|
95
102
|
model_class = StubModel
|
|
96
103
|
else:
|
|
@@ -110,6 +117,11 @@ class ModelFactory:
|
|
|
110
117
|
model_class = LiteLLMModel
|
|
111
118
|
elif model_platform.is_openai_compatibility_model:
|
|
112
119
|
model_class = OpenAICompatibilityModel
|
|
120
|
+
elif model_platform.is_together:
|
|
121
|
+
model_class = TogetherAIModel
|
|
122
|
+
return model_class(
|
|
123
|
+
model_type, model_config_dict, api_key, token_counter
|
|
124
|
+
)
|
|
113
125
|
else:
|
|
114
126
|
raise ValueError(
|
|
115
127
|
f"Unknown pair of model platform `{model_platform}` "
|
camel/models/ollama_model.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
14
15
|
from typing import Any, Dict, List, Optional, Union
|
|
15
16
|
|
|
16
17
|
from openai import OpenAI, Stream
|
|
@@ -40,18 +41,19 @@ class OllamaModel:
|
|
|
40
41
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
41
42
|
be fed into openai.ChatCompletion.create().
|
|
42
43
|
url (Optional[str]): The url to the model service. (default:
|
|
43
|
-
:obj:`
|
|
44
|
+
:obj:`"http://localhost:11434/v1"`)
|
|
44
45
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
45
46
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
46
|
-
|
|
47
|
+
GPT_4O_MINI)` will be used.
|
|
47
48
|
"""
|
|
48
49
|
self.model_type = model_type
|
|
49
50
|
self.model_config_dict = model_config_dict
|
|
51
|
+
self._url = url or os.environ.get("OLLAMA_BASE_URL")
|
|
50
52
|
# Use OpenAI cilent as interface call Ollama
|
|
51
53
|
self._client = OpenAI(
|
|
52
54
|
timeout=60,
|
|
53
55
|
max_retries=3,
|
|
54
|
-
base_url=
|
|
56
|
+
base_url=self._url or "http://localhost:11434/v1",
|
|
55
57
|
api_key="ollama", # required but ignored
|
|
56
58
|
)
|
|
57
59
|
self._token_counter = token_counter
|
|
@@ -66,7 +68,7 @@ class OllamaModel:
|
|
|
66
68
|
tokenization style.
|
|
67
69
|
"""
|
|
68
70
|
if not self._token_counter:
|
|
69
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
71
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
70
72
|
return self._token_counter
|
|
71
73
|
|
|
72
74
|
def check_model_config(self):
|
|
@@ -38,7 +38,7 @@ class OpenAICompatibilityModel:
|
|
|
38
38
|
r"""Constructor for model backend.
|
|
39
39
|
|
|
40
40
|
Args:
|
|
41
|
-
model_type (
|
|
41
|
+
model_type (str): Model for which a backend is created.
|
|
42
42
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
43
43
|
be fed into openai.ChatCompletion.create().
|
|
44
44
|
api_key (str): The API key for authenticating with the
|
|
@@ -47,7 +47,7 @@ class OpenAICompatibilityModel:
|
|
|
47
47
|
:obj:`None`)
|
|
48
48
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
49
49
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
50
|
-
|
|
50
|
+
GPT_4O_MINI)` will be used.
|
|
51
51
|
"""
|
|
52
52
|
self.model_type = model_type
|
|
53
53
|
self.model_config_dict = model_config_dict
|
|
@@ -91,7 +91,7 @@ class OpenAICompatibilityModel:
|
|
|
91
91
|
"""
|
|
92
92
|
|
|
93
93
|
if not self._token_counter:
|
|
94
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
94
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
95
95
|
return self._token_counter
|
|
96
96
|
|
|
97
97
|
@property
|