camel-ai 0.2.45__py3-none-any.whl → 0.2.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (54) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/datasets/few_shot_generator.py +19 -3
  7. camel/datasets/models.py +1 -1
  8. camel/loaders/__init__.py +2 -0
  9. camel/loaders/scrapegraph_reader.py +96 -0
  10. camel/models/__init__.py +4 -0
  11. camel/models/aiml_model.py +11 -104
  12. camel/models/anthropic_model.py +11 -76
  13. camel/models/aws_bedrock_model.py +112 -0
  14. camel/models/deepseek_model.py +11 -44
  15. camel/models/gemini_model.py +10 -72
  16. camel/models/groq_model.py +11 -131
  17. camel/models/internlm_model.py +11 -61
  18. camel/models/lmstudio_model.py +82 -0
  19. camel/models/model_factory.py +7 -1
  20. camel/models/modelscope_model.py +11 -122
  21. camel/models/moonshot_model.py +10 -76
  22. camel/models/nemotron_model.py +4 -60
  23. camel/models/nvidia_model.py +11 -111
  24. camel/models/ollama_model.py +12 -205
  25. camel/models/openai_compatible_model.py +51 -12
  26. camel/models/openai_model.py +3 -1
  27. camel/models/openrouter_model.py +12 -131
  28. camel/models/ppio_model.py +10 -99
  29. camel/models/qwen_model.py +11 -122
  30. camel/models/reka_model.py +1 -1
  31. camel/models/sglang_model.py +5 -3
  32. camel/models/siliconflow_model.py +10 -58
  33. camel/models/togetherai_model.py +10 -177
  34. camel/models/vllm_model.py +11 -218
  35. camel/models/volcano_model.py +1 -15
  36. camel/models/yi_model.py +11 -98
  37. camel/models/zhipuai_model.py +11 -102
  38. camel/storages/__init__.py +2 -0
  39. camel/storages/vectordb_storages/__init__.py +2 -0
  40. camel/storages/vectordb_storages/oceanbase.py +458 -0
  41. camel/toolkits/__init__.py +4 -0
  42. camel/toolkits/browser_toolkit.py +4 -7
  43. camel/toolkits/jina_reranker_toolkit.py +231 -0
  44. camel/toolkits/pyautogui_toolkit.py +428 -0
  45. camel/toolkits/search_toolkit.py +167 -0
  46. camel/toolkits/video_analysis_toolkit.py +215 -80
  47. camel/toolkits/video_download_toolkit.py +10 -3
  48. camel/types/enums.py +70 -0
  49. camel/types/unified_model_type.py +10 -0
  50. camel/utils/token_counting.py +7 -3
  51. {camel_ai-0.2.45.dist-info → camel_ai-0.2.47.dist-info}/METADATA +13 -1
  52. {camel_ai-0.2.45.dist-info → camel_ai-0.2.47.dist-info}/RECORD +54 -46
  53. {camel_ai-0.2.45.dist-info → camel_ai-0.2.47.dist-info}/WHEEL +0 -0
  54. {camel_ai-0.2.45.dist-info → camel_ai-0.2.47.dist-info}/licenses/LICENSE +0 -0
@@ -13,24 +13,16 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import VLLM_API_PARAMS, VLLMConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
29
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import BaseTokenCounter
30
22
 
31
23
 
32
24
  # flake8: noqa: E501
33
- class VLLMModel(BaseModelBackend):
25
+ class VLLMModel(OpenAICompatibleModel):
34
26
  r"""vLLM service interface.
35
27
 
36
28
  Args:
@@ -73,23 +65,15 @@ class VLLMModel(BaseModelBackend):
73
65
  url = url or os.environ.get("VLLM_BASE_URL")
74
66
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
75
67
  super().__init__(
76
- model_type, model_config_dict, api_key, url, token_counter, timeout
68
+ model_type=model_type,
69
+ model_config_dict=model_config_dict,
70
+ api_key=api_key,
71
+ url=url,
72
+ token_counter=token_counter,
73
+ timeout=timeout,
77
74
  )
78
75
  if not self._url:
79
76
  self._start_server()
80
- # Use OpenAI client as interface call vLLM
81
- self._client = OpenAI(
82
- timeout=self._timeout,
83
- max_retries=3,
84
- api_key="EMPTY", # required but ignored
85
- base_url=self._url,
86
- )
87
- self._async_client = AsyncOpenAI(
88
- timeout=self._timeout,
89
- max_retries=3,
90
- api_key="EMPTY", # required but ignored
91
- base_url=self._url,
92
- )
93
77
 
94
78
  def _start_server(self) -> None:
95
79
  r"""Starts the vllm server in a subprocess."""
@@ -107,187 +91,6 @@ class VLLMModel(BaseModelBackend):
107
91
  except Exception as e:
108
92
  print(f"Failed to start vllm server: {e}.")
109
93
 
110
- @property
111
- def token_counter(self) -> BaseTokenCounter:
112
- r"""Initialize the token counter for the model backend.
113
-
114
- Returns:
115
- BaseTokenCounter: The token counter following the model's
116
- tokenization style.
117
- """
118
- if not self._token_counter:
119
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
120
- return self._token_counter
121
-
122
- def _run(
123
- self,
124
- messages: List[OpenAIMessage],
125
- response_format: Optional[Type[BaseModel]] = None,
126
- tools: Optional[List[Dict[str, Any]]] = None,
127
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
128
- r"""Runs inference of OpenAI chat completion.
129
-
130
- Args:
131
- messages (List[OpenAIMessage]): Message list with the chat history
132
- in OpenAI API format.
133
- response_format (Optional[Type[BaseModel]]): The format of the
134
- response.
135
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
136
- use for the request.
137
-
138
- Returns:
139
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
140
- `ChatCompletion` in the non-stream mode, or
141
- `Stream[ChatCompletionChunk]` in the stream mode.
142
- """
143
- response_format = response_format or self.model_config_dict.get(
144
- "response_format", None
145
- )
146
- if response_format:
147
- return self._request_parse(messages, response_format, tools)
148
- else:
149
- return self._request_chat_completion(messages, tools)
150
-
151
- async def _arun(
152
- self,
153
- messages: List[OpenAIMessage],
154
- response_format: Optional[Type[BaseModel]] = None,
155
- tools: Optional[List[Dict[str, Any]]] = None,
156
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
157
- r"""Runs inference of OpenAI chat completion in async mode.
158
-
159
- Args:
160
- messages (List[OpenAIMessage]): Message list with the chat history
161
- in OpenAI API format.
162
- response_format (Optional[Type[BaseModel]]): The format of the
163
- response.
164
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
165
- use for the request.
166
-
167
- Returns:
168
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
169
- `ChatCompletion` in the non-stream mode, or
170
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
171
- """
172
- response_format = response_format or self.model_config_dict.get(
173
- "response_format", None
174
- )
175
- if response_format:
176
- return await self._arequest_parse(messages, response_format, tools)
177
- else:
178
- return await self._arequest_chat_completion(messages, tools)
179
-
180
- def _request_chat_completion(
181
- self,
182
- messages: List[OpenAIMessage],
183
- tools: Optional[List[Dict[str, Any]]] = None,
184
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
- request_config = self.model_config_dict.copy()
186
-
187
- if tools:
188
- request_config["tools"] = tools
189
-
190
- # Remove additionalProperties from each tool's function parameters
191
- if tools and "tools" in request_config:
192
- for tool in request_config["tools"]:
193
- if "function" in tool and "parameters" in tool["function"]:
194
- tool["function"]["parameters"].pop(
195
- "additionalProperties", None
196
- )
197
-
198
- return self._client.chat.completions.create(
199
- messages=messages,
200
- model=self.model_type,
201
- **request_config,
202
- )
203
-
204
- async def _arequest_chat_completion(
205
- self,
206
- messages: List[OpenAIMessage],
207
- tools: Optional[List[Dict[str, Any]]] = None,
208
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
209
- request_config = self.model_config_dict.copy()
210
-
211
- if tools:
212
- request_config["tools"] = tools
213
- # Remove additionalProperties from each tool's function parameters
214
- if "tools" in request_config:
215
- for tool in request_config["tools"]:
216
- if "function" in tool and "parameters" in tool["function"]:
217
- tool["function"]["parameters"].pop(
218
- "additionalProperties", None
219
- )
220
-
221
- return await self._async_client.chat.completions.create(
222
- messages=messages,
223
- model=self.model_type,
224
- **request_config,
225
- )
226
-
227
- def _request_parse(
228
- self,
229
- messages: List[OpenAIMessage],
230
- response_format: Type[BaseModel],
231
- tools: Optional[List[Dict[str, Any]]] = None,
232
- ) -> ChatCompletion:
233
- request_config = self.model_config_dict.copy()
234
-
235
- request_config["response_format"] = response_format
236
- request_config.pop("stream", None)
237
- if tools is not None:
238
- # Create a deep copy of tools to avoid modifying the original
239
- import copy
240
-
241
- request_config["tools"] = copy.deepcopy(tools)
242
- # Remove additionalProperties and strict from each tool's function
243
- # parameters since vLLM does not support them
244
- if "tools" in request_config:
245
- for tool in request_config["tools"]:
246
- if "function" in tool and "parameters" in tool["function"]:
247
- tool["function"]["parameters"].pop(
248
- "additionalProperties", None
249
- )
250
- if "strict" in tool.get("function", {}):
251
- tool["function"].pop("strict")
252
-
253
- return self._client.beta.chat.completions.parse(
254
- messages=messages,
255
- model=self.model_type,
256
- **request_config,
257
- )
258
-
259
- async def _arequest_parse(
260
- self,
261
- messages: List[OpenAIMessage],
262
- response_format: Type[BaseModel],
263
- tools: Optional[List[Dict[str, Any]]] = None,
264
- ) -> ChatCompletion:
265
- request_config = self.model_config_dict.copy()
266
-
267
- request_config["response_format"] = response_format
268
- request_config.pop("stream", None)
269
- if tools is not None:
270
- # Create a deep copy of tools to avoid modifying the original
271
- import copy
272
-
273
- request_config["tools"] = copy.deepcopy(tools)
274
- # Remove additionalProperties and strict from each tool's function
275
- # parameters since vLLM does not support them
276
- if "tools" in request_config:
277
- for tool in request_config["tools"]:
278
- if "function" in tool and "parameters" in tool["function"]:
279
- tool["function"]["parameters"].pop(
280
- "additionalProperties", None
281
- )
282
- if "strict" in tool.get("function", {}):
283
- tool["function"].pop("strict")
284
-
285
- return await self._async_client.beta.chat.completions.parse(
286
- messages=messages,
287
- model=self.model_type,
288
- **request_config,
289
- )
290
-
291
94
  def check_model_config(self):
292
95
  r"""Check whether the model configuration contains any
293
96
  unexpected arguments to vLLM API.
@@ -302,13 +105,3 @@ class VLLMModel(BaseModelBackend):
302
105
  f"Unexpected argument `{param}` is "
303
106
  "input into vLLM model backend."
304
107
  )
305
-
306
- @property
307
- def stream(self) -> bool:
308
- r"""Returns whether the model is in stream mode, which sends partial
309
- results each time.
310
-
311
- Returns:
312
- bool: Whether the model is in stream mode.
313
- """
314
- return self.model_config_dict.get('stream', False)
@@ -20,13 +20,12 @@ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
20
  from camel.types import ModelType
21
21
  from camel.utils import (
22
22
  BaseTokenCounter,
23
- OpenAITokenCounter,
24
23
  api_keys_required,
25
24
  )
26
25
 
27
26
 
28
27
  class VolcanoModel(OpenAICompatibleModel):
29
- r"""Volcano Engine API in a unified BaseModelBackend interface.
28
+ r"""Volcano Engine API in a unified OpenAICompatibleModel interface.
30
29
 
31
30
  Args:
32
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,19 +74,6 @@ class VolcanoModel(OpenAICompatibleModel):
75
74
  model_type, model_config_dict, api_key, url, token_counter, timeout
76
75
  )
77
76
 
78
- @property
79
- def token_counter(self) -> BaseTokenCounter:
80
- r"""Initialize the token counter for the model backend.
81
-
82
- Returns:
83
- BaseTokenCounter: The token counter following the model's
84
- tokenization style.
85
- """
86
- if not self._token_counter:
87
- # Use OpenAI token counter as an approximation
88
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
89
- return self._token_counter
90
-
91
77
  def check_model_config(self):
92
78
  r"""Check whether the model configuration is valid for Volcano
93
79
  model backends.
camel/models/yi_model.py CHANGED
@@ -13,28 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import YI_API_PARAMS, YiConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
29
21
  from camel.utils import (
30
22
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
23
  api_keys_required,
33
24
  )
34
25
 
35
26
 
36
- class YiModel(BaseModelBackend):
37
- r"""Yi API in a unified BaseModelBackend interface.
27
+ class YiModel(OpenAICompatibleModel):
28
+ r"""Yi API in a unified OpenAICompatibleModel interface.
38
29
 
39
30
  Args:
40
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -79,81 +70,13 @@ class YiModel(BaseModelBackend):
79
70
  )
80
71
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
81
72
  super().__init__(
82
- model_type, model_config_dict, api_key, url, token_counter, timeout
83
- )
84
- self._client = OpenAI(
85
- timeout=self._timeout,
86
- max_retries=3,
87
- api_key=self._api_key,
88
- base_url=self._url,
89
- )
90
- self._async_client = AsyncOpenAI(
91
- timeout=self._timeout,
92
- max_retries=3,
93
- api_key=self._api_key,
94
- base_url=self._url,
95
- )
96
-
97
- async def _arun(
98
- self,
99
- messages: List[OpenAIMessage],
100
- response_format: Optional[Type[BaseModel]] = None,
101
- tools: Optional[List[Dict[str, Any]]] = None,
102
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
103
- r"""Runs inference of Yi chat completion.
104
-
105
- Args:
106
- messages (List[OpenAIMessage]): Message list with the chat history
107
- in OpenAI API format.
108
-
109
- Returns:
110
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
111
- `ChatCompletion` in the non-stream mode, or
112
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
113
- """
114
- response = await self._async_client.chat.completions.create(
115
- messages=messages,
116
- model=self.model_type,
117
- **self.model_config_dict,
73
+ model_type=model_type,
74
+ model_config_dict=model_config_dict,
75
+ api_key=api_key,
76
+ url=url,
77
+ token_counter=token_counter,
78
+ timeout=timeout,
118
79
  )
119
- return response
120
-
121
- def _run(
122
- self,
123
- messages: List[OpenAIMessage],
124
- response_format: Optional[Type[BaseModel]] = None,
125
- tools: Optional[List[Dict[str, Any]]] = None,
126
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
127
- r"""Runs inference of Yi chat completion.
128
-
129
- Args:
130
- messages (List[OpenAIMessage]): Message list with the chat history
131
- in OpenAI API format.
132
-
133
- Returns:
134
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
135
- `ChatCompletion` in the non-stream mode, or
136
- `Stream[ChatCompletionChunk]` in the stream mode.
137
- """
138
- response = self._client.chat.completions.create(
139
- messages=messages,
140
- model=self.model_type,
141
- **self.model_config_dict,
142
- )
143
- return response
144
-
145
- @property
146
- def token_counter(self) -> BaseTokenCounter:
147
- r"""Initialize the token counter for the model backend.
148
-
149
- Returns:
150
- OpenAITokenCounter: The token counter following the model's
151
- tokenization style.
152
- """
153
-
154
- if not self._token_counter:
155
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
156
- return self._token_counter
157
80
 
158
81
  def check_model_config(self):
159
82
  r"""Check whether the model configuration contains any
@@ -169,13 +92,3 @@ class YiModel(BaseModelBackend):
169
92
  f"Unexpected argument `{param}` is "
170
93
  "input into Yi model backend."
171
94
  )
172
-
173
- @property
174
- def stream(self) -> bool:
175
- r"""Returns whether the model is in stream mode, which sends partial
176
- results each time.
177
-
178
- Returns:
179
- bool: Whether the model is in stream mode.
180
- """
181
- return self.model_config_dict.get('stream', False)
@@ -13,28 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import ZHIPUAI_API_PARAMS, ZhipuAIConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
29
21
  from camel.utils import (
30
22
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
23
  api_keys_required,
33
24
  )
34
25
 
35
26
 
36
- class ZhipuAIModel(BaseModelBackend):
37
- r"""ZhipuAI API in a unified BaseModelBackend interface.
27
+ class ZhipuAIModel(OpenAICompatibleModel):
28
+ r"""ZhipuAI API in a unified OpenAICompatibleModel interface.
38
29
 
39
30
  Args:
40
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -79,85 +70,13 @@ class ZhipuAIModel(BaseModelBackend):
79
70
  )
80
71
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
81
72
  super().__init__(
82
- model_type, model_config_dict, api_key, url, token_counter, timeout
83
- )
84
- self._client = OpenAI(
85
- timeout=self._timeout,
86
- max_retries=3,
87
- api_key=self._api_key,
88
- base_url=self._url,
89
- )
90
- self._async_client = AsyncOpenAI(
91
- timeout=self._timeout,
92
- max_retries=3,
93
- api_key=self._api_key,
94
- base_url=self._url,
95
- )
96
-
97
- async def _arun(
98
- self,
99
- messages: List[OpenAIMessage],
100
- response_format: Optional[Type[BaseModel]] = None,
101
- tools: Optional[List[Dict[str, Any]]] = None,
102
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
103
- r"""Runs inference of OpenAI chat completion.
104
-
105
- Args:
106
- messages (List[OpenAIMessage]): Message list with the chat history
107
- in OpenAI API format.
108
-
109
- Returns:
110
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
111
- `ChatCompletion` in the non-stream mode, or
112
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
113
- """
114
- # Use OpenAI client as interface call ZhipuAI
115
- # Reference: https://open.bigmodel.cn/dev/api#openai_sdk
116
- response = await self._async_client.chat.completions.create(
117
- messages=messages,
118
- model=self.model_type,
119
- **self.model_config_dict,
73
+ model_type=model_type,
74
+ model_config_dict=model_config_dict,
75
+ api_key=api_key,
76
+ url=url,
77
+ token_counter=token_counter,
78
+ timeout=timeout,
120
79
  )
121
- return response
122
-
123
- def _run(
124
- self,
125
- messages: List[OpenAIMessage],
126
- response_format: Optional[Type[BaseModel]] = None,
127
- tools: Optional[List[Dict[str, Any]]] = None,
128
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
129
- r"""Runs inference of OpenAI chat completion.
130
-
131
- Args:
132
- messages (List[OpenAIMessage]): Message list with the chat history
133
- in OpenAI API format.
134
-
135
- Returns:
136
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
137
- `ChatCompletion` in the non-stream mode, or
138
- `Stream[ChatCompletionChunk]` in the stream mode.
139
- """
140
- # Use OpenAI client as interface call ZhipuAI
141
- # Reference: https://open.bigmodel.cn/dev/api#openai_sdk
142
- response = self._client.chat.completions.create(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
146
- )
147
- return response
148
-
149
- @property
150
- def token_counter(self) -> BaseTokenCounter:
151
- r"""Initialize the token counter for the model backend.
152
-
153
- Returns:
154
- OpenAITokenCounter: The token counter following the model's
155
- tokenization style.
156
- """
157
-
158
- if not self._token_counter:
159
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
160
- return self._token_counter
161
80
 
162
81
  def check_model_config(self):
163
82
  r"""Check whether the model configuration contains any
@@ -173,13 +92,3 @@ class ZhipuAIModel(BaseModelBackend):
173
92
  f"Unexpected argument `{param}` is "
174
93
  "input into ZhipuAI model backend."
175
94
  )
176
-
177
- @property
178
- def stream(self) -> bool:
179
- r"""Returns whether the model is in stream mode, which sends partial
180
- results each time.
181
-
182
- Returns:
183
- bool: Whether the model is in stream mode.
184
- """
185
- return self.model_config_dict.get('stream', False)
@@ -27,6 +27,7 @@ from .vectordb_storages.base import (
27
27
  VectorRecord,
28
28
  )
29
29
  from .vectordb_storages.milvus import MilvusStorage
30
+ from .vectordb_storages.oceanbase import OceanBaseStorage
30
31
  from .vectordb_storages.qdrant import QdrantStorage
31
32
  from .vectordb_storages.tidb import TiDBStorage
32
33
 
@@ -46,4 +47,5 @@ __all__ = [
46
47
  'Neo4jGraph',
47
48
  'NebulaGraph',
48
49
  'Mem0Storage',
50
+ 'OceanBaseStorage',
49
51
  ]
@@ -20,6 +20,7 @@ from .base import (
20
20
  VectorRecord,
21
21
  )
22
22
  from .milvus import MilvusStorage
23
+ from .oceanbase import OceanBaseStorage
23
24
  from .qdrant import QdrantStorage
24
25
  from .tidb import TiDBStorage
25
26
 
@@ -30,6 +31,7 @@ __all__ = [
30
31
  'QdrantStorage',
31
32
  'MilvusStorage',
32
33
  "TiDBStorage",
34
+ 'OceanBaseStorage',
33
35
  'VectorRecord',
34
36
  'VectorDBStatus',
35
37
  ]