camel-ai 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +107 -22
  3. camel/configs/__init__.py +6 -0
  4. camel/configs/base_config.py +21 -0
  5. camel/configs/gemini_config.py +17 -9
  6. camel/configs/qwen_config.py +91 -0
  7. camel/configs/yi_config.py +58 -0
  8. camel/generators.py +93 -0
  9. camel/interpreters/docker_interpreter.py +5 -0
  10. camel/interpreters/ipython_interpreter.py +2 -1
  11. camel/loaders/__init__.py +2 -0
  12. camel/loaders/apify_reader.py +223 -0
  13. camel/memories/agent_memories.py +24 -1
  14. camel/messages/base.py +38 -0
  15. camel/models/__init__.py +4 -0
  16. camel/models/model_factory.py +6 -0
  17. camel/models/qwen_model.py +139 -0
  18. camel/models/yi_model.py +138 -0
  19. camel/prompts/image_craft.py +8 -0
  20. camel/prompts/video_description_prompt.py +8 -0
  21. camel/retrievers/vector_retriever.py +5 -1
  22. camel/societies/role_playing.py +29 -18
  23. camel/societies/workforce/base.py +7 -1
  24. camel/societies/workforce/task_channel.py +10 -0
  25. camel/societies/workforce/utils.py +6 -0
  26. camel/societies/workforce/worker.py +2 -0
  27. camel/storages/vectordb_storages/qdrant.py +147 -24
  28. camel/tasks/task.py +15 -0
  29. camel/terminators/base.py +4 -0
  30. camel/terminators/response_terminator.py +1 -0
  31. camel/terminators/token_limit_terminator.py +1 -0
  32. camel/toolkits/__init__.py +4 -1
  33. camel/toolkits/base.py +9 -0
  34. camel/toolkits/data_commons_toolkit.py +360 -0
  35. camel/toolkits/function_tool.py +174 -7
  36. camel/toolkits/github_toolkit.py +175 -176
  37. camel/toolkits/google_scholar_toolkit.py +36 -7
  38. camel/toolkits/notion_toolkit.py +279 -0
  39. camel/toolkits/search_toolkit.py +164 -36
  40. camel/types/enums.py +88 -0
  41. camel/types/unified_model_type.py +10 -0
  42. camel/utils/commons.py +2 -1
  43. camel/utils/constants.py +2 -0
  44. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/METADATA +129 -79
  45. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/RECORD +47 -40
  46. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/LICENSE +0 -0
  47. {camel_ai-0.2.6.dist-info → camel_ai-0.2.7.dist-info}/WHEEL +0 -0
@@ -80,6 +80,11 @@ class DockerInterpreter(BaseInterpreter):
80
80
  self._container: Optional[Container] = None
81
81
 
82
82
  def __del__(self) -> None:
83
+ r"""Destructor for the DockerInterpreter class.
84
+
85
+ This method ensures that the Docker container is removed when the
86
+ interpreter is deleted.
87
+ """
83
88
  if self._container is not None:
84
89
  self._container.remove(force=True)
85
90
 
@@ -105,7 +105,8 @@ class JupyterKernelInterpreter(BaseInterpreter):
105
105
  outputs.append(msg_content["data"]["text/plain"])
106
106
  if "image/png" in msg_content["data"]:
107
107
  outputs.append(
108
- f"\n![image](data:image/png;base64,{msg_content['data']['image/png']})\n"
108
+ f"\n![image](data:image/png;base64,"
109
+ f"{msg_content['data']['image/png']})\n"
109
110
  )
110
111
  except queue.Empty:
111
112
  outputs.append("Time out")
camel/loaders/__init__.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
+ from .apify_reader import Apify
15
16
  from .base_io import File
16
17
  from .chunkr_reader import ChunkrReader
17
18
  from .firecrawl_reader import Firecrawl
@@ -23,5 +24,6 @@ __all__ = [
23
24
  'UnstructuredIO',
24
25
  'JinaURLReader',
25
26
  'Firecrawl',
27
+ 'Apify',
26
28
  'ChunkrReader',
27
29
  ]
@@ -0,0 +1,223 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, List, Optional
16
+
17
+ if TYPE_CHECKING:
18
+ from apify_client.clients import DatasetClient
19
+
20
+ from camel.utils import api_keys_required
21
+
22
+
23
+ class Apify:
24
+ r"""Apify is a platform that allows you to automate any web workflow.
25
+
26
+ Args:
27
+ api_key (Optional[str]): API key for authenticating with the Apify API.
28
+ """
29
+
30
+ @api_keys_required("APIFY_API_KEY")
31
+ def __init__(
32
+ self,
33
+ api_key: Optional[str] = None,
34
+ ) -> None:
35
+ from apify_client import ApifyClient
36
+
37
+ self._api_key = api_key or os.environ.get("APIFY_API_KEY")
38
+ self.client = ApifyClient(token=self._api_key)
39
+
40
+ def run_actor(
41
+ self,
42
+ actor_id: str,
43
+ run_input: Optional[dict] = None,
44
+ content_type: Optional[str] = None,
45
+ build: Optional[str] = None,
46
+ max_items: Optional[int] = None,
47
+ memory_mbytes: Optional[int] = None,
48
+ timeout_secs: Optional[int] = None,
49
+ webhooks: Optional[list] = None,
50
+ wait_secs: Optional[int] = None,
51
+ ) -> Optional[dict]:
52
+ r"""Run an actor on the Apify platform.
53
+
54
+ Args:
55
+ actor_id (str): The ID of the actor to run.
56
+ run_input (Optional[dict]): The input data for the actor. Defaults
57
+ to `None`.
58
+ content_type (str, optional): The content type of the input.
59
+ build (str, optional): Specifies the Actor build to run. It can be
60
+ either a build tag or build number. By default, the run uses
61
+ the build specified in the default run configuration for the
62
+ Actor (typically latest).
63
+ max_items (int, optional): Maximum number of results that will be
64
+ returned by this run. If the Actor is charged per result, you
65
+ will not be charged for more results than the given limit.
66
+ memory_mbytes (int, optional): Memory limit for the run, in
67
+ megabytes. By default, the run uses a memory limit specified in
68
+ the default run configuration for the Actor.
69
+ timeout_secs (int, optional): Optional timeout for the run, in
70
+ seconds. By default, the run uses timeout specified in the
71
+ default run configuration for the Actor.
72
+ webhooks (list, optional): Optional webhooks
73
+ (https://docs.apify.com/webhooks) associated with the Actor
74
+ run, which can be used to receive a notification, e.g. when the
75
+ Actor finished or failed. If you already have a webhook set up
76
+ for the Actor, you do not have to add it again here.
77
+ wait_secs (int, optional): The maximum number of seconds the server
78
+ waits for finish. If not provided, waits indefinitely.
79
+
80
+ Returns:
81
+ Optional[dict]: The output data from the actor if successful.
82
+ # please use the 'defaultDatasetId' to get the dataset
83
+
84
+ Raises:
85
+ RuntimeError: If the actor fails to run.
86
+ """
87
+ try:
88
+ return self.client.actor(actor_id).call(
89
+ run_input=run_input,
90
+ content_type=content_type,
91
+ build=build,
92
+ max_items=max_items,
93
+ memory_mbytes=memory_mbytes,
94
+ timeout_secs=timeout_secs,
95
+ webhooks=webhooks,
96
+ wait_secs=wait_secs,
97
+ )
98
+ except Exception as e:
99
+ raise RuntimeError(f"Failed to run actor {actor_id}: {e}") from e
100
+
101
+ def get_dataset_client(
102
+ self,
103
+ dataset_id: str,
104
+ ) -> "DatasetClient":
105
+ r"""Get a dataset client from the Apify platform.
106
+
107
+ Args:
108
+ dataset_id (str): The ID of the dataset to get the client for.
109
+
110
+ Returns:
111
+ DatasetClient: The dataset client.
112
+
113
+ Raises:
114
+ RuntimeError: If the dataset client fails to be retrieved.
115
+ """
116
+ try:
117
+ return self.client.dataset(dataset_id)
118
+ except Exception as e:
119
+ raise RuntimeError(
120
+ f"Failed to get dataset {dataset_id}: {e}"
121
+ ) from e
122
+
123
+ def get_dataset(
124
+ self,
125
+ dataset_id: str,
126
+ ) -> Optional[dict]:
127
+ r"""Get a dataset from the Apify platform.
128
+
129
+ Args:
130
+ dataset_id (str): The ID of the dataset to get.
131
+
132
+ Returns:
133
+ dict: The dataset.
134
+
135
+ Raises:
136
+ RuntimeError: If the dataset fails to be retrieved.
137
+ """
138
+ try:
139
+ return self.get_dataset_client(dataset_id).get()
140
+ except Exception as e:
141
+ raise RuntimeError(
142
+ f"Failed to get dataset {dataset_id}: {e}"
143
+ ) from e
144
+
145
+ def update_dataset(
146
+ self,
147
+ dataset_id: str,
148
+ name: str,
149
+ ) -> dict:
150
+ r"""Update a dataset on the Apify platform.
151
+
152
+ Args:
153
+ dataset_id (str): The ID of the dataset to update.
154
+ name (str): The new name for the dataset.
155
+
156
+ Returns:
157
+ dict: The updated dataset.
158
+
159
+ Raises:
160
+ RuntimeError: If the dataset fails to be updated.
161
+ """
162
+ try:
163
+ return self.get_dataset_client(dataset_id).update(name=name)
164
+ except Exception as e:
165
+ raise RuntimeError(
166
+ f"Failed to update dataset {dataset_id}: {e}"
167
+ ) from e
168
+
169
+ def get_dataset_items(
170
+ self,
171
+ dataset_id: str,
172
+ ) -> List:
173
+ r"""Get items from a dataset on the Apify platform.
174
+
175
+ Args:
176
+ dataset_id (str): The ID of the dataset to get items from.
177
+
178
+ Returns:
179
+ list: The items in the dataset.
180
+
181
+ Raises:
182
+ RuntimeError: If the items fail to be retrieved.
183
+ """
184
+ try:
185
+ items = self.get_dataset_client(dataset_id).list_items().items
186
+ return items
187
+ except Exception as e:
188
+ raise RuntimeError(
189
+ f"Failed to get dataset items {dataset_id}: {e}"
190
+ ) from e
191
+
192
+ def get_datasets(
193
+ self,
194
+ unnamed: Optional[bool] = None,
195
+ limit: Optional[int] = None,
196
+ offset: Optional[int] = None,
197
+ desc: Optional[bool] = None,
198
+ ) -> List[dict]:
199
+ r"""Get all named datasets from the Apify platform.
200
+
201
+ Args:
202
+ unnamed (bool, optional): Whether to include unnamed key-value
203
+ stores in the list
204
+ limit (int, optional): How many key-value stores to retrieve
205
+ offset (int, optional): What key-value store to include as first
206
+ when retrieving the list
207
+ desc (bool, optional): Whether to sort the key-value stores in
208
+ descending order based on their modification date
209
+
210
+ Returns:
211
+ List[dict]: The datasets.
212
+
213
+ Raises:
214
+ RuntimeError: If the datasets fail to be retrieved.
215
+ """
216
+ try:
217
+ return (
218
+ self.client.datasets()
219
+ .list(unnamed=unnamed, limit=limit, offset=offset, desc=desc)
220
+ .items
221
+ )
222
+ except Exception as e:
223
+ raise RuntimeError(f"Failed to get datasets: {e}") from e
@@ -106,7 +106,18 @@ class VectorDBMemory(AgentMemory):
106
106
 
107
107
  class LongtermAgentMemory(AgentMemory):
108
108
  r"""An implementation of the :obj:`AgentMemory` abstract base class for
109
- augumenting ChatHistoryMemory with VectorDBMemory.
109
+ augmenting ChatHistoryMemory with VectorDBMemory.
110
+
111
+ Args:
112
+ context_creator (BaseContextCreator): A model context creator.
113
+ chat_history_block (Optional[ChatHistoryBlock], optional): A chat
114
+ history block. If `None`, a :obj:`ChatHistoryBlock` will be used.
115
+ (default: :obj:`None`)
116
+ vector_db_block (Optional[VectorDBBlock], optional): A vector database
117
+ block. If `None`, a :obj:`VectorDBBlock` will be used.
118
+ (default: :obj:`None`)
119
+ retrieve_limit (int, optional): The maximum number of messages
120
+ to be added into the context. (default: :obj:`3`)
110
121
  """
111
122
 
112
123
  def __init__(
@@ -123,9 +134,21 @@ class LongtermAgentMemory(AgentMemory):
123
134
  self._current_topic: str = ""
124
135
 
125
136
  def get_context_creator(self) -> BaseContextCreator:
137
+ r"""Returns the context creator used by the memory.
138
+
139
+ Returns:
140
+ BaseContextCreator: The context creator used by the memory.
141
+ """
126
142
  return self._context_creator
127
143
 
128
144
  def retrieve(self) -> List[ContextRecord]:
145
+ r"""Retrieves context records from both the chat history and the vector
146
+ database.
147
+
148
+ Returns:
149
+ List[ContextRecord]: A list of context records retrieved from both
150
+ the chat history and the vector database.
151
+ """
129
152
  chat_history = self.chat_history_block.retrieve()
130
153
  vector_db_retrieve = self.vector_db_block.retrieve(
131
154
  self._current_topic, self.retrieve_limit
camel/messages/base.py CHANGED
@@ -81,6 +81,25 @@ class BaseMessage:
81
81
  OpenAIVisionDetailType, str
82
82
  ] = OpenAIVisionDetailType.LOW,
83
83
  ) -> "BaseMessage":
84
+ r"""Create a new user message.
85
+
86
+ Args:
87
+ role_name (str): The name of the user role.
88
+ content (str): The content of the message.
89
+ meta_dict (Optional[Dict[str, str]]): Additional metadata
90
+ dictionary for the message.
91
+ video_bytes (Optional[bytes]): Optional bytes of a video
92
+ associated with the message.
93
+ image_list (Optional[List[Image.Image]]): Optional list of PIL
94
+ Image objects associated with the message.
95
+ image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
96
+ the images associated with the message.
97
+ video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
98
+ the videos associated with the message.
99
+
100
+ Returns:
101
+ BaseMessage: The new user message.
102
+ """
84
103
  return cls(
85
104
  role_name,
86
105
  RoleType.USER,
@@ -107,6 +126,25 @@ class BaseMessage:
107
126
  OpenAIVisionDetailType, str
108
127
  ] = OpenAIVisionDetailType.LOW,
109
128
  ) -> "BaseMessage":
129
+ r"""Create a new assistant message.
130
+
131
+ Args:
132
+ role_name (str): The name of the assistant role.
133
+ content (str): The content of the message.
134
+ meta_dict (Optional[Dict[str, str]]): Additional metadata
135
+ dictionary for the message.
136
+ video_bytes (Optional[bytes]): Optional bytes of a video
137
+ associated with the message.
138
+ image_list (Optional[List[Image.Image]]): Optional list of PIL
139
+ Image objects associated with the message.
140
+ image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
141
+ the images associated with the message.
142
+ video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
143
+ the videos associated with the message.
144
+
145
+ Returns:
146
+ BaseMessage: The new assistant message.
147
+ """
110
148
  return cls(
111
149
  role_name,
112
150
  RoleType.ASSISTANT,
camel/models/__init__.py CHANGED
@@ -24,11 +24,13 @@ from .ollama_model import OllamaModel
24
24
  from .openai_audio_models import OpenAIAudioModels
25
25
  from .openai_compatible_model import OpenAICompatibleModel
26
26
  from .openai_model import OpenAIModel
27
+ from .qwen_model import QwenModel
27
28
  from .reka_model import RekaModel
28
29
  from .samba_model import SambaModel
29
30
  from .stub_model import StubModel
30
31
  from .togetherai_model import TogetherAIModel
31
32
  from .vllm_model import VLLMModel
33
+ from .yi_model import YiModel
32
34
  from .zhipuai_model import ZhipuAIModel
33
35
 
34
36
  __all__ = [
@@ -51,4 +53,6 @@ __all__ = [
51
53
  'RekaModel',
52
54
  'SambaModel',
53
55
  'TogetherAIModel',
56
+ 'YiModel',
57
+ 'QwenModel',
54
58
  ]
@@ -23,11 +23,13 @@ from camel.models.mistral_model import MistralModel
23
23
  from camel.models.ollama_model import OllamaModel
24
24
  from camel.models.openai_compatible_model import OpenAICompatibleModel
25
25
  from camel.models.openai_model import OpenAIModel
26
+ from camel.models.qwen_model import QwenModel
26
27
  from camel.models.reka_model import RekaModel
27
28
  from camel.models.samba_model import SambaModel
28
29
  from camel.models.stub_model import StubModel
29
30
  from camel.models.togetherai_model import TogetherAIModel
30
31
  from camel.models.vllm_model import VLLMModel
32
+ from camel.models.yi_model import YiModel
31
33
  from camel.models.zhipuai_model import ZhipuAIModel
32
34
  from camel.types import ModelPlatformType, ModelType, UnifiedModelType
33
35
  from camel.utils import BaseTokenCounter
@@ -106,6 +108,10 @@ class ModelFactory:
106
108
  model_class = MistralModel
107
109
  elif model_platform.is_reka and model_type.is_reka:
108
110
  model_class = RekaModel
111
+ elif model_platform.is_yi and model_type.is_yi:
112
+ model_class = YiModel
113
+ elif model_platform.is_qwen and model_type.is_qwen:
114
+ model_class = QwenModel
109
115
  elif model_type == ModelType.STUB:
110
116
  model_class = StubModel
111
117
 
@@ -0,0 +1,139 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import QWEN_API_PARAMS, QwenConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ api_keys_required,
32
+ )
33
+
34
+
35
+ class QwenModel(BaseModelBackend):
36
+ r"""Qwen API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of Qwen series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`QwenConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating with
46
+ the Qwen service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the Qwen service.
48
+ (default: :obj:`https://dashscope.aliyuncs.com/compatible-mode/v1`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ model_type: Union[ModelType, str],
58
+ model_config_dict: Optional[Dict[str, Any]] = None,
59
+ api_key: Optional[str] = None,
60
+ url: Optional[str] = None,
61
+ token_counter: Optional[BaseTokenCounter] = None,
62
+ ) -> None:
63
+ if model_config_dict is None:
64
+ model_config_dict = QwenConfig().as_dict()
65
+ api_key = api_key or os.environ.get("QWEN_API_KEY")
66
+ url = url or os.environ.get(
67
+ "QWEN_API_BASE_URL",
68
+ "https://dashscope.aliyuncs.com/compatible-mode/v1",
69
+ )
70
+ super().__init__(
71
+ model_type, model_config_dict, api_key, url, token_counter
72
+ )
73
+ self._client = OpenAI(
74
+ timeout=60,
75
+ max_retries=3,
76
+ api_key=self._api_key,
77
+ base_url=self._url,
78
+ )
79
+
80
+ @api_keys_required("QWEN_API_KEY")
81
+ def run(
82
+ self,
83
+ messages: List[OpenAIMessage],
84
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
85
+ r"""Runs inference of Qwen chat completion.
86
+
87
+ Args:
88
+ messages (List[OpenAIMessage]): Message list with the chat history
89
+ in OpenAI API format.
90
+
91
+ Returns:
92
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
93
+ `ChatCompletion` in the non-stream mode, or
94
+ `Stream[ChatCompletionChunk]` in the stream mode.
95
+ """
96
+ response = self._client.chat.completions.create(
97
+ messages=messages,
98
+ model=self.model_type,
99
+ **self.model_config_dict,
100
+ )
101
+ return response
102
+
103
+ @property
104
+ def token_counter(self) -> BaseTokenCounter:
105
+ r"""Initialize the token counter for the model backend.
106
+
107
+ Returns:
108
+ OpenAITokenCounter: The token counter following the model's
109
+ tokenization style.
110
+ """
111
+
112
+ if not self._token_counter:
113
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
114
+ return self._token_counter
115
+
116
+ def check_model_config(self):
117
+ r"""Check whether the model configuration contains any
118
+ unexpected arguments to Qwen API.
119
+
120
+ Raises:
121
+ ValueError: If the model configuration dictionary contains any
122
+ unexpected arguments to Qwen API.
123
+ """
124
+ for param in self.model_config_dict:
125
+ if param not in QWEN_API_PARAMS:
126
+ raise ValueError(
127
+ f"Unexpected argument `{param}` is "
128
+ "input into Qwen model backend."
129
+ )
130
+
131
+ @property
132
+ def stream(self) -> bool:
133
+ r"""Returns whether the model is in stream mode, which sends partial
134
+ results each time.
135
+
136
+ Returns:
137
+ bool: Whether the model is in stream mode.
138
+ """
139
+ return self.model_config_dict.get('stream', False)
@@ -0,0 +1,138 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+
20
+ from camel.configs import YI_API_PARAMS, YiConfig
21
+ from camel.messages import OpenAIMessage
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ api_keys_required,
32
+ )
33
+
34
+
35
+ class YiModel(BaseModelBackend):
36
+ r"""Yi API in a unified BaseModelBackend interface.
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, one of Yi series.
41
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
42
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
43
+ :obj:`None`, :obj:`YiConfig().as_dict()` will be used.
44
+ (default: :obj:`None`)
45
+ api_key (Optional[str], optional): The API key for authenticating with
46
+ the Yi service. (default: :obj:`None`)
47
+ url (Optional[str], optional): The url to the Yi service.
48
+ (default: :obj:`https://api.lingyiwanwu.com/v1`)
49
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
51
+ ModelType.GPT_4O_MINI)` will be used.
52
+ (default: :obj:`None`)
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ model_type: Union[ModelType, str],
58
+ model_config_dict: Optional[Dict[str, Any]] = None,
59
+ api_key: Optional[str] = None,
60
+ url: Optional[str] = None,
61
+ token_counter: Optional[BaseTokenCounter] = None,
62
+ ) -> None:
63
+ if model_config_dict is None:
64
+ model_config_dict = YiConfig().as_dict()
65
+ api_key = api_key or os.environ.get("YI_API_KEY")
66
+ url = url or os.environ.get(
67
+ "YI_API_BASE_URL", "https://api.lingyiwanwu.com/v1"
68
+ )
69
+ super().__init__(
70
+ model_type, model_config_dict, api_key, url, token_counter
71
+ )
72
+ self._client = OpenAI(
73
+ timeout=60,
74
+ max_retries=3,
75
+ api_key=self._api_key,
76
+ base_url=self._url,
77
+ )
78
+
79
+ @api_keys_required("YI_API_KEY")
80
+ def run(
81
+ self,
82
+ messages: List[OpenAIMessage],
83
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
84
+ r"""Runs inference of Yi chat completion.
85
+
86
+ Args:
87
+ messages (List[OpenAIMessage]): Message list with the chat history
88
+ in OpenAI API format.
89
+
90
+ Returns:
91
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
92
+ `ChatCompletion` in the non-stream mode, or
93
+ `Stream[ChatCompletionChunk]` in the stream mode.
94
+ """
95
+ response = self._client.chat.completions.create(
96
+ messages=messages,
97
+ model=self.model_type,
98
+ **self.model_config_dict,
99
+ )
100
+ return response
101
+
102
+ @property
103
+ def token_counter(self) -> BaseTokenCounter:
104
+ r"""Initialize the token counter for the model backend.
105
+
106
+ Returns:
107
+ OpenAITokenCounter: The token counter following the model's
108
+ tokenization style.
109
+ """
110
+
111
+ if not self._token_counter:
112
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
113
+ return self._token_counter
114
+
115
+ def check_model_config(self):
116
+ r"""Check whether the model configuration contains any
117
+ unexpected arguments to Yi API.
118
+
119
+ Raises:
120
+ ValueError: If the model configuration dictionary contains any
121
+ unexpected arguments to Yi API.
122
+ """
123
+ for param in self.model_config_dict:
124
+ if param not in YI_API_PARAMS:
125
+ raise ValueError(
126
+ f"Unexpected argument `{param}` is "
127
+ "input into Yi model backend."
128
+ )
129
+
130
+ @property
131
+ def stream(self) -> bool:
132
+ r"""Returns whether the model is in stream mode, which sends partial
133
+ results each time.
134
+
135
+ Returns:
136
+ bool: Whether the model is in stream mode.
137
+ """
138
+ return self.model_config_dict.get('stream', False)