camel-ai 0.2.45__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/deepseek_model.py +11 -44
  11. camel/models/gemini_model.py +10 -72
  12. camel/models/groq_model.py +11 -131
  13. camel/models/internlm_model.py +11 -61
  14. camel/models/lmstudio_model.py +82 -0
  15. camel/models/model_factory.py +7 -1
  16. camel/models/modelscope_model.py +11 -122
  17. camel/models/moonshot_model.py +10 -76
  18. camel/models/nemotron_model.py +4 -60
  19. camel/models/nvidia_model.py +11 -111
  20. camel/models/ollama_model.py +12 -205
  21. camel/models/openai_compatible_model.py +51 -12
  22. camel/models/openrouter_model.py +12 -131
  23. camel/models/ppio_model.py +10 -99
  24. camel/models/qwen_model.py +11 -122
  25. camel/models/reka_model.py +1 -1
  26. camel/models/sglang_model.py +5 -3
  27. camel/models/siliconflow_model.py +10 -58
  28. camel/models/togetherai_model.py +10 -177
  29. camel/models/vllm_model.py +11 -218
  30. camel/models/volcano_model.py +1 -15
  31. camel/models/yi_model.py +11 -98
  32. camel/models/zhipuai_model.py +11 -102
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/pyautogui_toolkit.py +428 -0
  35. camel/toolkits/video_analysis_toolkit.py +215 -80
  36. camel/toolkits/video_download_toolkit.py +10 -3
  37. camel/types/enums.py +64 -0
  38. camel/types/unified_model_type.py +10 -0
  39. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/METADATA +2 -1
  40. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/RECORD +42 -37
  41. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.45'
17
+ __version__ = '0.2.46'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
camel/configs/__init__.py CHANGED
@@ -14,12 +14,14 @@
14
14
  from .aiml_config import AIML_API_PARAMS, AIMLConfig
15
15
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
16
16
  from .base_config import BaseConfig
17
+ from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
17
18
  from .cohere_config import COHERE_API_PARAMS, CohereConfig
18
19
  from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
19
20
  from .gemini_config import Gemini_API_PARAMS, GeminiConfig
20
21
  from .groq_config import GROQ_API_PARAMS, GroqConfig
21
22
  from .internlm_config import INTERNLM_API_PARAMS, InternLMConfig
22
23
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
24
+ from .lmstudio_config import LMSTUDIO_API_PARAMS, LMStudioConfig
23
25
  from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
24
26
  from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
25
27
  from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
@@ -81,6 +83,8 @@ __all__ = [
81
83
  'YI_API_PARAMS',
82
84
  'QwenConfig',
83
85
  'QWEN_API_PARAMS',
86
+ 'BedrockConfig',
87
+ 'BEDROCK_API_PARAMS',
84
88
  'DeepSeekConfig',
85
89
  'DEEPSEEK_API_PARAMS',
86
90
  'PPIOConfig',
@@ -97,4 +101,6 @@ __all__ = [
97
101
  'AIML_API_PARAMS',
98
102
  'OpenRouterConfig',
99
103
  'OPENROUTER_API_PARAMS',
104
+ 'LMSTUDIO_API_PARAMS',
105
+ 'LMStudioConfig',
100
106
  ]
@@ -0,0 +1,73 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from typing import Dict, Optional, Union
15
+
16
+ from camel.configs.base_config import BaseConfig
17
+
18
+
19
+ class BedrockConfig(BaseConfig):
20
+ r"""Defines the parameters for generating chat completions using OpenAI
21
+ compatibility.
22
+
23
+ Args:
24
+ max_tokens (int, optional): The maximum number of tokens to generate
25
+ in the chat completion. The total length of input tokens and
26
+ generated tokens is limited by the model's context length.
27
+ (default: :obj:`None`)
28
+ temperature (float, optional): Sampling temperature to use, between
29
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
30
+ while lower values make it more focused and deterministic.
31
+ (default: :obj:`None`)
32
+ top_p (float, optional): An alternative to sampling with temperature,
33
+ called nucleus sampling, where the model considers the results of
34
+ the tokens with top_p probability mass. So :obj:`0.1` means only
35
+ the tokens comprising the top 10% probability mass are considered.
36
+ (default: :obj:`None`)
37
+ top_k (int, optional): The number of top tokens to consider.
38
+ stream (bool, optional): If True, partial message deltas will be sent
39
+ as data-only server-sent events as they become available.
40
+ (default: :obj:`None`)
41
+ tools (list[FunctionTool], optional): A list of tools the model may
42
+ call. Currently, only functions are supported as a tool. Use this
43
+ to provide a list of functions the model may generate JSON inputs
44
+ for. A max of 128 functions are supported.
45
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
46
+ any) tool is called by the model. :obj:`"none"` means the model
47
+ will not call any tool and instead generates a message.
48
+ :obj:`"auto"` means the model can pick between generating a
49
+ message or calling one or more tools. :obj:`"required"` means the
50
+ model must call one or more tools. Specifying a particular tool
51
+ via {"type": "function", "function": {"name": "my_function"}}
52
+ forces the model to call that tool. :obj:`"none"` is the default
53
+ when no tools are present. :obj:`"auto"` is the default if tools
54
+ are present.
55
+ reasoning_effort(str, optional): A parameter specifying the level of
56
+ reasoning used by certain model types. Valid values are :obj:
57
+ `"low"`, :obj:`"medium"`, or :obj:`"high"`. If set, it is only
58
+ applied to the model types that support it (e.g., :obj:`o1`,
59
+ :obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
60
+ or if the model type does not support it, this parameter is
61
+ ignored. (default: :obj:`None`)
62
+ """
63
+
64
+ max_tokens: Optional[int] = None
65
+ temperature: Optional[float] = None
66
+ top_p: Optional[float] = None
67
+ top_k: Optional[int] = None
68
+ stream: Optional[bool] = None
69
+ tool_choice: Optional[Union[Dict[str, str], str]] = None
70
+ reasoning_effort: Optional[str] = None
71
+
72
+
73
+ BEDROCK_API_PARAMS = {param for param in BedrockConfig.model_fields.keys()}
@@ -0,0 +1,94 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from __future__ import annotations
15
+
16
+ from typing import Optional, Sequence, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+
20
+
21
+ class LMStudioConfig(BaseConfig):
22
+ r"""Defines the parameters for generating chat completions using OpenAI
23
+ compatibility.
24
+
25
+ Args:
26
+ temperature (float, optional): Sampling temperature to use, between
27
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
28
+ while lower values make it more focused and deterministic.
29
+ (default: :obj:`None`)
30
+ top_p (float, optional): An alternative to sampling with temperature,
31
+ called nucleus sampling, where the model considers the results of
32
+ the tokens with top_p probability mass. So :obj:`0.1` means only
33
+ the tokens comprising the top 10% probability mass are considered.
34
+ (default: :obj:`None`)
35
+ response_format (object, optional): An object specifying the format
36
+ that the model must output. Compatible with GPT-4 Turbo and all
37
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
38
+ {"type": "json_object"} enables JSON mode, which guarantees the
39
+ message the model generates is valid JSON. Important: when using
40
+ JSON mode, you must also instruct the model to produce JSON
41
+ yourself via a system or user message. Without this, the model
42
+ may generate an unending stream of whitespace until the generation
43
+ reaches the token limit, resulting in a long-running and seemingly
44
+ "stuck" request. Also note that the message content may be
45
+ partially cut off if finish_reason="length", which indicates the
46
+ generation exceeded max_tokens or the conversation exceeded the
47
+ max context length.
48
+ stream (bool, optional): If True, partial message deltas will be sent
49
+ as data-only server-sent events as they become available.
50
+ (default: :obj:`None`)
51
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
52
+ will stop generating further tokens. (default: :obj:`None`)
53
+ max_tokens (int, optional): The maximum number of tokens to generate
54
+ in the chat completion. The total length of input tokens and
55
+ generated tokens is limited by the model's context length.
56
+ (default: :obj:`None`)
57
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
58
+ :obj:`2.0`. Positive values penalize new tokens based on whether
59
+ they appear in the text so far, increasing the model's likelihood
60
+ to talk about new topics. See more information about frequency and
61
+ presence penalties. (default: :obj:`None`)
62
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
63
+ :obj:`2.0`. Positive values penalize new tokens based on their
64
+ existing frequency in the text so far, decreasing the model's
65
+ likelihood to repeat the same line verbatim. See more information
66
+ about frequency and presence penalties. (default: :obj:`None`)
67
+ tools (list[FunctionTool], optional): A list of tools the model may
68
+ call. Currently, only functions are supported as a tool. Use this
69
+ to provide a list of functions the model may generate JSON inputs
70
+ for. A max of 128 functions are supported.
71
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
72
+ any) tool is called by the model. :obj:`"none"` means the model
73
+ will not call any tool and instead generates a message.
74
+ :obj:`"auto"` means the model can pick between generating a
75
+ message or calling one or more tools. :obj:`"required"` means the
76
+ model must call one or more tools. Specifying a particular tool
77
+ via {"type": "function", "function": {"name": "my_function"}}
78
+ forces the model to call that tool. :obj:`"none"` is the default
79
+ when no tools are present. :obj:`"auto"` is the default if tools
80
+ are present.
81
+ """
82
+
83
+ temperature: Optional[float] = None
84
+ top_p: Optional[float] = None
85
+ stream: Optional[bool] = None
86
+ stop: Optional[Union[str, Sequence[str]]] = None
87
+ max_tokens: Optional[int] = None
88
+ presence_penalty: Optional[float] = None
89
+ response_format: Optional[dict] = None
90
+ frequency_penalty: Optional[float] = None
91
+ tool_choice: Optional[Union[dict[str, str], str]] = None
92
+
93
+
94
+ LMSTUDIO_API_PARAMS = {param for param in LMStudioConfig.model_fields.keys()}
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Union
17
17
 
18
18
  from camel.configs.base_config import BaseConfig
19
19
 
@@ -61,7 +61,7 @@ class QwenConfig(BaseConfig):
61
61
  call. It can contain one or more tool objects. During a function
62
62
  call process, the model will select one tool from the array.
63
63
  (default: :obj:`None`)
64
- extra_body (Optional[Dict[str, str]], optional): Additional parameters
64
+ extra_body (Optional[Dict[str, Any]], optional): Additional parameters
65
65
  to be sent to the Qwen API. If you want to enable internet search,
66
66
  you can set this parameter to `{"enable_search": True}`.
67
67
  (default: :obj:`None`)
@@ -78,7 +78,7 @@ class QwenConfig(BaseConfig):
78
78
  max_tokens: Optional[int] = None
79
79
  seed: Optional[int] = None
80
80
  stop: Optional[Union[str, List]] = None
81
- extra_body: Optional[Dict[str, str]] = None
81
+ extra_body: Optional[Dict[str, Any]] = None
82
82
 
83
83
  def __init__(self, include_usage: bool = True, **kwargs):
84
84
  super().__init__(**kwargs)
camel/models/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .aiml_model import AIMLModel
15
15
  from .anthropic_model import AnthropicModel
16
+ from .aws_bedrock_model import AWSBedrockModel
16
17
  from .azure_openai_model import AzureOpenAIModel
17
18
  from .base_audio_model import BaseAudioModel
18
19
  from .base_model import BaseModelBackend
@@ -23,6 +24,7 @@ from .gemini_model import GeminiModel
23
24
  from .groq_model import GroqModel
24
25
  from .internlm_model import InternLMModel
25
26
  from .litellm_model import LiteLLMModel
27
+ from .lmstudio_model import LMStudioModel
26
28
  from .mistral_model import MistralModel
27
29
  from .model_factory import ModelFactory
28
30
  from .model_manager import ModelManager, ModelProcessingError
@@ -76,6 +78,7 @@ __all__ = [
76
78
  'PPIOModel',
77
79
  'YiModel',
78
80
  'QwenModel',
81
+ 'AWSBedrockModel',
79
82
  'ModelProcessingError',
80
83
  'DeepSeekModel',
81
84
  'FishAudioModel',
@@ -86,4 +89,5 @@ __all__ = [
86
89
  'BaseAudioModel',
87
90
  'SiliconFlowModel',
88
91
  'VolcanoModel',
92
+ 'LMStudioModel',
89
93
  ]
@@ -12,29 +12,19 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
16
-
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
- from pydantic import BaseModel
15
+ from typing import Any, Dict, Optional, Union
19
16
 
20
17
  from camel.configs import AIML_API_PARAMS, AIMLConfig
21
- from camel.messages import OpenAIMessage
22
- from camel.models._utils import try_modify_message_with_format
23
- from camel.models.base_model import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
29
20
  from camel.utils import (
30
21
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
22
  api_keys_required,
33
23
  )
34
24
 
35
25
 
36
- class AIMLModel(BaseModelBackend):
37
- r"""AIML API in a unified BaseModelBackend interface.
26
+ class AIMLModel(OpenAICompatibleModel):
27
+ r"""AIML API in a unified OpenAICompatibleModel interface.
38
28
 
39
29
  Args:
40
30
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -77,87 +67,14 @@ class AIMLModel(BaseModelBackend):
77
67
  )
78
68
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
79
69
  super().__init__(
80
- model_type, model_config_dict, api_key, url, token_counter, timeout
81
- )
82
- self._client = OpenAI(
83
- timeout=self._timeout,
84
- max_retries=3,
85
- api_key=self._api_key,
86
- base_url=self._url,
87
- )
88
- self._async_client = AsyncOpenAI(
89
- timeout=self._timeout,
90
- max_retries=3,
91
- api_key=self._api_key,
92
- base_url=self._url,
93
- )
94
-
95
- def _prepare_request(
96
- self,
97
- messages: List[OpenAIMessage],
98
- response_format: Optional[Type[BaseModel]] = None,
99
- tools: Optional[List[Dict[str, Any]]] = None,
100
- ) -> Dict[str, Any]:
101
- request_config = self.model_config_dict.copy()
102
- if tools:
103
- request_config["tools"] = tools
104
- if response_format:
105
- # AIML API does not natively support response format
106
- try_modify_message_with_format(messages[-1], response_format)
107
- return request_config
108
-
109
- def _run(
110
- self,
111
- messages: List[OpenAIMessage],
112
- response_format: Optional[Type[BaseModel]] = None,
113
- tools: Optional[List[Dict[str, Any]]] = None,
114
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
115
- r"""Runs inference of AIML chat completion.
116
-
117
- Args:
118
- messages (List[OpenAIMessage]): Message list with the chat history
119
- in OpenAI API format.
120
-
121
- Returns:
122
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
123
- `ChatCompletion` in the non-stream mode, or
124
- `Stream[ChatCompletionChunk]` in the stream mode.
125
- """
126
- request_config = self._prepare_request(
127
- messages, response_format, tools
70
+ model_type=model_type,
71
+ model_config_dict=model_config_dict,
72
+ api_key=api_key,
73
+ url=url,
74
+ token_counter=token_counter,
75
+ timeout=timeout,
128
76
  )
129
77
 
130
- response = self._client.chat.completions.create(
131
- messages=messages, model=self.model_type, **request_config
132
- )
133
- return response
134
-
135
- async def _arun(
136
- self,
137
- messages: List[OpenAIMessage],
138
- response_format: Optional[Type[BaseModel]] = None,
139
- tools: Optional[List[Dict[str, Any]]] = None,
140
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
141
- request_config = self._prepare_request(
142
- messages, response_format, tools
143
- )
144
- response = await self._async_client.chat.completions.create(
145
- messages=messages, model=self.model_type, **request_config
146
- )
147
- return response
148
-
149
- @property
150
- def token_counter(self) -> BaseTokenCounter:
151
- r"""Initialize the token counter for the model backend.
152
-
153
- Returns:
154
- BaseTokenCounter: The token counter following the model's
155
- tokenization style.
156
- """
157
- if not self._token_counter:
158
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
159
- return self._token_counter
160
-
161
78
  def check_model_config(self):
162
79
  r"""Check whether the model configuration contains any
163
80
  unexpected arguments to AIML API.
@@ -172,13 +89,3 @@ class AIMLModel(BaseModelBackend):
172
89
  f"Unexpected argument `{param}` is "
173
90
  "input into AIML model backend."
174
91
  )
175
-
176
- @property
177
- def stream(self) -> bool:
178
- """Returns whether the model is in stream mode, which sends partial
179
- results each time.
180
-
181
- Returns:
182
- bool: Whether the model is in stream mode.
183
- """
184
- return self.model_config_dict.get("stream", False)
@@ -12,14 +12,11 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
16
-
17
- from pydantic import BaseModel
15
+ from typing import Any, Dict, Optional, Union
18
16
 
19
17
  from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
20
- from camel.messages import OpenAIMessage
21
- from camel.models.base_model import BaseModelBackend
22
- from camel.types import ChatCompletion, ModelType
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
23
20
  from camel.utils import (
24
21
  AnthropicTokenCounter,
25
22
  BaseTokenCounter,
@@ -28,8 +25,8 @@ from camel.utils import (
28
25
  )
29
26
 
30
27
 
31
- class AnthropicModel(BaseModelBackend):
32
- r"""Anthropic API in a unified BaseModelBackend interface.
28
+ class AnthropicModel(OpenAICompatibleModel):
29
+ r"""Anthropic API in a unified OpenAICompatibleModel interface.
33
30
 
34
31
  Args:
35
32
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -66,8 +63,6 @@ class AnthropicModel(BaseModelBackend):
66
63
  token_counter: Optional[BaseTokenCounter] = None,
67
64
  timeout: Optional[float] = None,
68
65
  ) -> None:
69
- from openai import AsyncOpenAI, OpenAI
70
-
71
66
  if model_config_dict is None:
72
67
  model_config_dict = AnthropicConfig().as_dict()
73
68
  api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
@@ -78,14 +73,12 @@ class AnthropicModel(BaseModelBackend):
78
73
  )
79
74
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
80
75
  super().__init__(
81
- model_type, model_config_dict, api_key, url, token_counter, timeout
82
- )
83
- self.client = OpenAI(
84
- base_url=self._url, api_key=self._api_key, timeout=self._timeout
85
- )
86
-
87
- self.async_client = AsyncOpenAI(
88
- api_key=self._api_key, base_url=self._url, timeout=self._timeout
76
+ model_type=model_type,
77
+ model_config_dict=model_config_dict,
78
+ api_key=api_key,
79
+ url=url,
80
+ token_counter=token_counter,
81
+ timeout=timeout,
89
82
  )
90
83
 
91
84
  @property
@@ -100,54 +93,6 @@ class AnthropicModel(BaseModelBackend):
100
93
  self._token_counter = AnthropicTokenCounter(self.model_type)
101
94
  return self._token_counter
102
95
 
103
- def _run(
104
- self,
105
- messages: List[OpenAIMessage],
106
- response_format: Optional[Type[BaseModel]] = None,
107
- tools: Optional[List[Dict[str, Any]]] = None,
108
- ):
109
- r"""Run inference of Anthropic chat completion.
110
-
111
- Args:
112
- messages (List[OpenAIMessage]): Message list with the chat history
113
- in OpenAI API format.
114
-
115
- Returns:
116
- ChatCompletion: Response in the OpenAI API format.
117
- """
118
- response = self.client.chat.completions.create(
119
- model=self.model_type,
120
- messages=messages,
121
- **self.model_config_dict,
122
- tools=tools, # type: ignore[arg-type]
123
- )
124
-
125
- return response
126
-
127
- async def _arun(
128
- self,
129
- messages: List[OpenAIMessage],
130
- response_format: Optional[Type[BaseModel]] = None,
131
- tools: Optional[List[Dict[str, Any]]] = None,
132
- ) -> ChatCompletion:
133
- r"""Run inference of Anthropic chat completion.
134
-
135
- Args:
136
- messages (List[OpenAIMessage]): Message list with the chat history
137
- in OpenAI API format.
138
-
139
- Returns:
140
- ChatCompletion: Response in the OpenAI API format.
141
- """
142
- response = await self.async_client.chat.completions.create(
143
- model=self.model_type,
144
- messages=messages,
145
- **self.model_config_dict,
146
- tools=tools, # type: ignore[arg-type]
147
- )
148
-
149
- return response
150
-
151
96
  def check_model_config(self):
152
97
  r"""Check whether the model configuration is valid for anthropic
153
98
  model backends.
@@ -162,13 +107,3 @@ class AnthropicModel(BaseModelBackend):
162
107
  f"Unexpected argument `{param}` is "
163
108
  "input into Anthropic model backend."
164
109
  )
165
-
166
- @property
167
- def stream(self) -> bool:
168
- r"""Returns whether the model is in stream mode, which sends partial
169
- results each time.
170
-
171
- Returns:
172
- bool: Whether the model is in stream mode.
173
- """
174
- return self.model_config_dict.get("stream", False)
@@ -0,0 +1,112 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
+
18
+ from openai import AsyncStream
19
+ from pydantic import BaseModel
20
+
21
+ from camel.configs import BEDROCK_API_PARAMS, BedrockConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import BaseTokenCounter, api_keys_required
30
+
31
+
32
+ class AWSBedrockModel(OpenAICompatibleModel):
33
+ r"""AWS Bedrock API in a unified OpenAICompatibleModel interface.
34
+
35
+ Args:
36
+ model_type (Union[ModelType, str]): Model for which a backend is
37
+ created.
38
+ model_config_dict (Dict[str, Any], optional): A dictionary
39
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
40
+ If:obj:`None`, :obj:`BedrockConfig().as_dict()` will be used.
41
+ (default: :obj:`None`)
42
+ api_key (str, optional): The API key for authenticating with
43
+ the AWS Bedrock service. (default: :obj:`None`)
44
+ url (str, optional): The url to the AWS Bedrock service.
45
+ token_counter (BaseTokenCounter, optional): Token counter to
46
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
47
+ ModelType.GPT_4O_MINI)` will be used.
48
+ (default: :obj:`None`)
49
+ timeout (Optional[float], optional): The timeout value in seconds for
50
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
51
+ environment variable or default to 180 seconds.
52
+ (default: :obj:`None`)
53
+
54
+ References:
55
+ https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html
56
+ """
57
+
58
+ @api_keys_required(
59
+ [
60
+ ("url", "BEDROCK_API_BASE_URL"),
61
+ ("api_key", "BEDROCK_API_KEY"),
62
+ ]
63
+ )
64
+ def __init__(
65
+ self,
66
+ model_type: Union[ModelType, str],
67
+ model_config_dict: Optional[Dict[str, Any]] = None,
68
+ api_key: Optional[str] = None,
69
+ url: Optional[str] = None,
70
+ token_counter: Optional[BaseTokenCounter] = None,
71
+ timeout: Optional[float] = None,
72
+ ) -> None:
73
+ if model_config_dict is None:
74
+ model_config_dict = BedrockConfig().as_dict()
75
+ api_key = api_key or os.environ.get("BEDROCK_API_KEY")
76
+ url = url or os.environ.get(
77
+ "BEDROCK_API_BASE_URL",
78
+ )
79
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
80
+ super().__init__(
81
+ model_type=model_type,
82
+ model_config_dict=model_config_dict,
83
+ api_key=api_key,
84
+ url=url,
85
+ token_counter=token_counter,
86
+ timeout=timeout,
87
+ )
88
+
89
+ async def _arun(
90
+ self,
91
+ messages: List[OpenAIMessage],
92
+ response_format: Optional[Type[BaseModel]] = None,
93
+ tools: Optional[List[Dict[str, Any]]] = None,
94
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
95
+ raise NotImplementedError(
96
+ "AWS Bedrock does not support async inference."
97
+ )
98
+
99
+ def check_model_config(self):
100
+ r"""Check whether the input model configuration contains unexpected
101
+ arguments.
102
+
103
+ Raises:
104
+ ValueError: If the model configuration dictionary contains any
105
+ unexpected argument for this model class.
106
+ """
107
+ for param in self.model_config_dict:
108
+ if param not in BEDROCK_API_PARAMS:
109
+ raise ValueError(
110
+ f"Invalid parameter '{param}' in model_config_dict. "
111
+ f"Valid parameters are: {BEDROCK_API_PARAMS}"
112
+ )