camel-ai 0.2.1a0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.2.1a'
15
+ __version__ = '0.2.2'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -188,7 +188,7 @@ class ChatAgent(BaseAgent):
188
188
  # the tools set from `ChatAgent` will be used.
189
189
  # This design simplifies the interface while retaining tool-running
190
190
  # capabilities for `BaseModelBackend`.
191
- if all_tools and not self.model_backend.model_config_dict['tools']:
191
+ if all_tools and not self.model_backend.model_config_dict.get("tools"):
192
192
  tool_schema_list = [
193
193
  tool.get_openai_tool_schema() for tool in all_tools
194
194
  ]
@@ -426,7 +426,7 @@ class ChatAgent(BaseAgent):
426
426
  or isinstance(self.model_type, str)
427
427
  and "lama" in self.model_type
428
428
  ):
429
- if self.model_backend.model_config_dict['tools']:
429
+ if self.model_backend.model_config_dict.get("tools", None):
430
430
  tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
431
431
 
432
432
  tool_sys_msg = BaseMessage.make_assistant_message(
camel/bots/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .discord_bot import DiscordBot
15
+ from .telegram_bot import TelegramBot
16
+
17
+ __all__ = [
18
+ 'DiscordBot',
19
+ 'TelegramBot',
20
+ ]
@@ -0,0 +1,206 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, List, Optional, Union
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+ from camel.retrievers import AutoRetriever
20
+ from camel.utils import dependencies_required
21
+
22
+ try:
23
+ from unstructured.documents.elements import Element
24
+ except ImportError:
25
+ Element = None
26
+
27
+ if TYPE_CHECKING:
28
+ from discord import Message
29
+
30
+
31
+ class DiscordBot:
32
+ r"""Represents a Discord bot that is powered by a CAMEL `ChatAgent`.
33
+
34
+ Attributes:
35
+ chat_agent (ChatAgent): Chat agent that will power the bot.
36
+ channel_ids (List[int], optional): The channel IDs that the bot will
37
+ listen to.
38
+ discord_token (str, optional): The bot token.
39
+ auto_retriever (AutoRetriever): AutoRetriever instance for RAG.
40
+ vector_storage_local_path (Union[str, List[str]]): The paths to the
41
+ contents for RAG.
42
+ top_k (int): Top choice for the RAG response.
43
+ return_detailed_info (bool): If show detailed info of the RAG response.
44
+ contents (Union[str, List[str], Element, List[Element]], optional):
45
+ Local file paths, remote URLs, string contents or Element objects.
46
+ """
47
+
48
+ @dependencies_required('discord')
49
+ def __init__(
50
+ self,
51
+ chat_agent: ChatAgent,
52
+ contents: Union[str, List[str], Element, List[Element]] = None,
53
+ channel_ids: Optional[List[int]] = None,
54
+ discord_token: Optional[str] = None,
55
+ auto_retriever: Optional[AutoRetriever] = None,
56
+ vector_storage_local_path: Union[str, List[str]] = "",
57
+ top_k: int = 1,
58
+ return_detailed_info: bool = True,
59
+ ) -> None:
60
+ self.chat_agent = chat_agent
61
+ self.token = discord_token or os.getenv('DISCORD_TOKEN')
62
+ self.channel_ids = channel_ids
63
+ self.auto_retriever = auto_retriever
64
+ self.vector_storage_local_path = vector_storage_local_path
65
+ self.top_k = top_k
66
+ self.return_detailed_info = return_detailed_info
67
+ self.contents = contents
68
+
69
+ if not self.token:
70
+ raise ValueError(
71
+ "`DISCORD_TOKEN` not found in environment variables. Get it"
72
+ " here: `https://discord.com/developers/applications`."
73
+ )
74
+
75
+ import discord
76
+
77
+ intents = discord.Intents.default()
78
+ intents.message_content = True
79
+ self.client = discord.Client(intents=intents)
80
+
81
+ # Register event handlers
82
+ self.client.event(self.on_ready)
83
+ self.client.event(self.on_message)
84
+
85
+ def run(self) -> None:
86
+ r"""Start the Discord bot using its token.
87
+
88
+ This method starts the Discord bot by running the client with the
89
+ provided token.
90
+ """
91
+ self.client.run(self.token) # type: ignore[arg-type]
92
+
93
+ async def on_ready(self) -> None:
94
+ r"""This method is called when the bot has successfully connected to
95
+ the Discord server.
96
+
97
+ It prints a message indicating that the bot has logged in and displays
98
+ the username of the bot.
99
+ """
100
+ print(f'We have logged in as {self.client.user}')
101
+
102
+ async def on_message(self, message: 'Message') -> None:
103
+ r"""Event handler for when a message is received.
104
+
105
+ Args:
106
+ message (discord.Message): The message object received.
107
+ """
108
+
109
+ # If the message author is the bot itself,
110
+ # do not respond to this message
111
+ if message.author == self.client.user:
112
+ return
113
+
114
+ # If allowed channel IDs are provided,
115
+ # only respond to messages in those channels
116
+ if self.channel_ids and message.channel.id not in self.channel_ids:
117
+ return
118
+
119
+ # Only respond to messages that mention the bot
120
+ if not self.client.user or not self.client.user.mentioned_in(message):
121
+ return
122
+
123
+ user_raw_msg = message.content
124
+
125
+ if self.auto_retriever:
126
+ retrieved_content = self.auto_retriever.run_vector_retriever(
127
+ query=user_raw_msg,
128
+ contents=self.contents,
129
+ top_k=self.top_k,
130
+ return_detailed_info=self.return_detailed_info,
131
+ )
132
+ user_raw_msg = (
133
+ f"Here is the query to you: {user_raw_msg}\n"
134
+ f"Based on the retrieved content: {retrieved_content}, \n"
135
+ f"answer the query from {message.author.name}"
136
+ )
137
+
138
+ user_msg = BaseMessage.make_user_message(
139
+ role_name="User", content=user_raw_msg
140
+ )
141
+ assistant_response = self.chat_agent.step(user_msg)
142
+ await message.channel.send(assistant_response.msg.content)
143
+
144
+
145
+ if __name__ == "__main__":
146
+ assistant_sys_msg = BaseMessage.make_assistant_message(
147
+ role_name="Assistant",
148
+ content='''
149
+ Objective:
150
+ You are a customer service bot designed to assist users
151
+ with inquiries related to our open-source project.
152
+ Your responses should be informative, concise, and helpful.
153
+
154
+ Instructions:
155
+ Understand User Queries: Carefully read and understand the
156
+ user's question. Focus on keywords and context to
157
+ determine the user's intent.
158
+ Search for Relevant Information: Use the provided dataset
159
+ and refer to the RAG (file to find answers that
160
+ closely match the user's query. The RAG file contains
161
+ detailed interactions and should be your primary
162
+ resource for crafting responses.
163
+ Provide Clear and Concise Responses: Your answers should
164
+ be clear and to the point. Avoid overly technical
165
+ language unless the user's query indicates
166
+ familiarity with technical terms.
167
+ Encourage Engagement: Where applicable, encourage users
168
+ to contribute to the project or seek further
169
+ assistance.
170
+
171
+ Response Structure:
172
+ Greeting: Begin with a polite greeting or acknowledgment.
173
+ Main Response: Provide the main answer to the user's query.
174
+ Additional Information: Offer any extra tips or direct the
175
+ user to additional resources if necessary.
176
+ Closing: Close the response politely, encouraging
177
+ further engagement if appropriate.
178
+ bd
179
+ Tone:
180
+ Professional: Maintain a professional tone that
181
+ instills confidence in the user.
182
+ Friendly: Be approachable and friendly to make users
183
+ feel comfortable.
184
+ Helpful: Always aim to be as helpful as possible,
185
+ guiding users to solutions.
186
+ ''',
187
+ )
188
+
189
+ agent = ChatAgent(
190
+ assistant_sys_msg,
191
+ message_window_size=10,
192
+ )
193
+ # Uncommented the folowing code and offer storage information
194
+ # for RAG functionality
195
+
196
+ # auto_retriever = AutoRetriever(
197
+ # vector_storage_local_path="examples/bots",
198
+ # storage_type=StorageType.QDRANT,
199
+ # )
200
+
201
+ bot = DiscordBot(
202
+ agent,
203
+ # auto_retriever=auto_retriever,
204
+ # vector_storage_local_path=["local_data/"],
205
+ )
206
+ bot.run()
@@ -0,0 +1,82 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, Optional
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+ from camel.utils import dependencies_required
20
+
21
+ # Conditionally import telebot types only for type checking
22
+ if TYPE_CHECKING:
23
+ from telebot.types import ( # type: ignore[import-untyped]
24
+ Message,
25
+ )
26
+
27
+
28
+ class TelegramBot:
29
+ r"""Represents a Telegram bot that is powered by an agent.
30
+
31
+ Attributes:
32
+ chat_agent (ChatAgent): Chat agent that will power the bot.
33
+ telegram_token (str, optional): The bot token.
34
+ """
35
+
36
+ @dependencies_required('telebot')
37
+ def __init__(
38
+ self,
39
+ chat_agent: ChatAgent,
40
+ telegram_token: Optional[str] = None,
41
+ ) -> None:
42
+ self.chat_agent = chat_agent
43
+
44
+ if not telegram_token:
45
+ self.token = os.getenv('TELEGRAM_TOKEN')
46
+ if not self.token:
47
+ raise ValueError(
48
+ "`TELEGRAM_TOKEN` not found in environment variables. "
49
+ "Get it from t.me/BotFather."
50
+ )
51
+ else:
52
+ self.token = telegram_token
53
+
54
+ import telebot # type: ignore[import-untyped]
55
+
56
+ self.bot = telebot.TeleBot(token=self.token)
57
+
58
+ # Register the message handler within the constructor
59
+ self.bot.message_handler(func=lambda message: True)(self.on_message)
60
+
61
+ def run(self) -> None:
62
+ r"""Start the Telegram bot."""
63
+ print("Telegram bot is running...")
64
+ self.bot.infinity_polling()
65
+
66
+ def on_message(self, message: 'Message') -> None:
67
+ r"""Handles incoming messages from the user.
68
+
69
+ Args:
70
+ message (types.Message): The incoming message object.
71
+ """
72
+ self.chat_agent.reset()
73
+
74
+ if not message.text:
75
+ return
76
+
77
+ user_msg = BaseMessage.make_user_message(
78
+ role_name="User", content=message.text
79
+ )
80
+ assistant_response = self.chat_agent.step(user_msg)
81
+
82
+ self.bot.reply_to(message, assistant_response.msg.content)
@@ -86,7 +86,7 @@ class GeminiConfig(BaseConfig):
86
86
 
87
87
  @model_validator(mode="before")
88
88
  @classmethod
89
- def fields_type_checking(cls, data: Any):
89
+ def model_type_checking(cls, data: Any):
90
90
  if isinstance(data, dict):
91
91
  response_schema = data.get("response_schema")
92
92
  safety_settings = data.get("safety_settings")
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import uuid
15
15
  import warnings
16
+ from io import IOBase
16
17
  from typing import (
17
18
  Any,
18
19
  Dict,
@@ -108,7 +109,7 @@ class UnstructuredIO:
108
109
  specified.
109
110
 
110
111
  Notes:
111
- Available document types:
112
+ Supported file types:
112
113
  "csv", "doc", "docx", "epub", "image", "md", "msg", "odt",
113
114
  "org", "pdf", "ppt", "pptx", "rtf", "rst", "tsv", "xlsx".
114
115
 
@@ -152,6 +153,39 @@ class UnstructuredIO:
152
153
  warnings.warn(f"Failed to partition the file: {input_path}")
153
154
  return None
154
155
 
156
+ @staticmethod
157
+ def parse_bytes(file: IOBase, **kwargs: Any) -> Union[List[Element], None]:
158
+ r"""Parses a bytes stream and converts its contents into elements.
159
+
160
+ Args:
161
+ file (IOBase): The file in bytes format to be parsed.
162
+ **kwargs: Extra kwargs passed to the partition function.
163
+
164
+ Returns:
165
+ Union[List[Element], None]: List of elements after parsing the file
166
+ if successful, otherwise `None`.
167
+
168
+ Notes:
169
+ Supported file types:
170
+ "csv", "doc", "docx", "epub", "image", "md", "msg", "odt",
171
+ "org", "pdf", "ppt", "pptx", "rtf", "rst", "tsv", "xlsx".
172
+
173
+ References:
174
+ https://docs.unstructured.io/open-source/core-functionality/partitioning
175
+ """
176
+
177
+ from unstructured.partition.auto import partition
178
+
179
+ try:
180
+ # Use partition to process the bytes stream
181
+ elements = partition(file=file, **kwargs)
182
+ return elements
183
+ except Exception as e:
184
+ import warnings
185
+
186
+ warnings.warn(f"Failed to partition the file stream: {e}")
187
+ return None
188
+
155
189
  @staticmethod
156
190
  def clean_text_data(
157
191
  text: str,
@@ -93,10 +93,10 @@ class FunctionCallingMessage(BaseMessage):
93
93
  OpenAIMessage: The converted :obj:`OpenAIMessage` object
94
94
  with its role being "function".
95
95
  """
96
- if (not self.func_name) or (not self.result):
96
+ if not self.func_name:
97
97
  raise ValueError(
98
98
  "Invalid request for converting into function message"
99
- " due to missing function name or results."
99
+ " due to missing function name."
100
100
  )
101
101
 
102
102
  result_content = {"result": {str(self.result)}}
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
+ import os
15
16
  from typing import Any, Dict, List, Optional, Union
16
17
 
17
18
  from openai import OpenAI, Stream
@@ -25,14 +26,14 @@ from camel.utils import (
25
26
 
26
27
 
27
28
  class OpenAICompatibilityModel:
28
- r"""Constructor for model backend supporting OpenAI compatibility."""
29
+ r"""LLM API served by OpenAI-compatible providers."""
29
30
 
30
31
  def __init__(
31
32
  self,
32
33
  model_type: str,
33
34
  model_config_dict: Dict[str, Any],
34
- api_key: str,
35
- url: str,
35
+ api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
36
37
  token_counter: Optional[BaseTokenCounter] = None,
37
38
  ) -> None:
38
39
  r"""Constructor for model backend.
@@ -51,13 +52,25 @@ class OpenAICompatibilityModel:
51
52
  """
52
53
  self.model_type = model_type
53
54
  self.model_config_dict = model_config_dict
54
- self._token_counter = token_counter
55
+ self._url = url or os.environ.get("OPENAI_COMPATIBILIY_API_BASE_URL")
56
+ self._api_key = api_key or os.environ.get(
57
+ "OPENAI_COMPATIBILIY_API_KEY"
58
+ )
59
+ if self._url is None:
60
+ raise ValueError(
61
+ "For OpenAI-compatible models, you must provide the `url`."
62
+ )
63
+ if self._api_key is None:
64
+ raise ValueError(
65
+ "For OpenAI-compatible models, you must provide the `api_key`."
66
+ )
55
67
  self._client = OpenAI(
56
68
  timeout=60,
57
69
  max_retries=3,
58
- api_key=api_key,
59
- base_url=url,
70
+ base_url=self._url,
71
+ api_key=self._api_key,
60
72
  )
73
+ self._token_counter = token_counter
61
74
 
62
75
  def run(
63
76
  self,
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
15
  import warnings
16
+ from io import IOBase
16
17
  from typing import Any, Dict, List, Optional, Union
17
18
  from urllib.parse import urlparse
18
19
 
@@ -72,26 +73,34 @@ class VectorRetriever(BaseRetriever):
72
73
 
73
74
  def process(
74
75
  self,
75
- content: Union[str, Element],
76
+ content: Union[str, Element, IOBase],
76
77
  chunk_type: str = "chunk_by_title",
77
78
  max_characters: int = 500,
79
+ embed_batch: int = 50,
80
+ should_chunk: bool = True,
78
81
  **kwargs: Any,
79
82
  ) -> None:
80
- r"""Processes content from a file or URL, divides it into chunks by
81
- using `Unstructured IO`, and stores their embeddings in the specified
82
- vector storage.
83
+ r"""Processes content from local file path, remote URL, string
84
+ content, Element object, or a binary file object, divides it into
85
+ chunks by using `Unstructured IO`, and stores their embeddings in the
86
+ specified vector storage.
83
87
 
84
88
  Args:
85
- content (Union[str, Element]): Local file path, remote URL,
86
- string content or Element object.
89
+ content (Union[str, Element, IOBase]): Local file path, remote
90
+ URL, string content, Element object, or a binary file object.
87
91
  chunk_type (str): Type of chunking going to apply. Defaults to
88
92
  "chunk_by_title".
89
93
  max_characters (int): Max number of characters in each chunk.
90
94
  Defaults to `500`.
95
+ embed_batch (int): Size of batch for embeddings. Defaults to `50`.
96
+ should_chunk (bool): If True, divide the content into chunks,
97
+ otherwise skip chunking. Defaults to True.
91
98
  **kwargs (Any): Additional keyword arguments for content parsing.
92
99
  """
93
100
  if isinstance(content, Element):
94
101
  elements = [content]
102
+ elif isinstance(content, IOBase):
103
+ elements = self.uio.parse_bytes(file=content, **kwargs) or []
95
104
  else:
96
105
  # Check if the content is URL
97
106
  parsed_url = urlparse(content)
@@ -100,20 +109,26 @@ class VectorRetriever(BaseRetriever):
100
109
  elements = self.uio.parse_file_or_url(content, **kwargs) or []
101
110
  else:
102
111
  elements = [self.uio.create_element_from_text(text=content)]
103
- if elements:
104
- chunks = self.uio.chunk_elements(
105
- chunk_type=chunk_type,
106
- elements=elements,
107
- max_characters=max_characters,
108
- )
109
112
  if not elements:
110
113
  warnings.warn(
111
114
  f"No elements were extracted from the content: {content}"
112
115
  )
113
116
  return
114
- # Iterate to process and store embeddings, set batch of 50
115
- for i in range(0, len(chunks), 50):
116
- batch_chunks = chunks[i : i + 50]
117
+
118
+ # Chunk the content if required
119
+ chunks = (
120
+ self.uio.chunk_elements(
121
+ chunk_type=chunk_type,
122
+ elements=elements,
123
+ max_characters=max_characters,
124
+ )
125
+ if should_chunk
126
+ else elements
127
+ )
128
+
129
+ # Process chunks in batches and store embeddings
130
+ for i in range(0, len(chunks), embed_batch):
131
+ batch_chunks = chunks[i : i + embed_batch]
117
132
  batch_vectors = self.embedding_model.embed_list(
118
133
  objs=[str(chunk) for chunk in batch_chunks]
119
134
  )
@@ -124,6 +139,8 @@ class VectorRetriever(BaseRetriever):
124
139
  for vector, chunk in zip(batch_vectors, batch_chunks):
125
140
  if isinstance(content, str):
126
141
  content_path_info = {"content path": content}
142
+ elif isinstance(content, IOBase):
143
+ content_path_info = {"content path": "From file bytes"}
127
144
  elif isinstance(content, Element):
128
145
  content_path_info = {
129
146
  "content path": content.metadata.file_directory
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import logging
14
15
  from typing import Dict, List, Optional, Sequence, Tuple, Union
15
16
 
16
17
  from camel.agents import (
@@ -27,6 +28,9 @@ from camel.prompts import TextPrompt
27
28
  from camel.responses import ChatAgentResponse
28
29
  from camel.types import RoleType, TaskType
29
30
 
31
+ logger = logging.getLogger(__name__)
32
+ logger.setLevel(logging.WARNING)
33
+
30
34
 
31
35
  class RolePlaying:
32
36
  r"""Role playing between two agents.
@@ -97,6 +101,14 @@ class RolePlaying:
97
101
  extend_task_specify_meta_dict: Optional[Dict] = None,
98
102
  output_language: Optional[str] = None,
99
103
  ) -> None:
104
+ if model is not None:
105
+ logger.warning(
106
+ "The provided model will override the model settings in "
107
+ "all agents, including any configurations passed "
108
+ "through assistant_agent_kwargs, user_agent_kwargs, and "
109
+ "other agent-specific kwargs."
110
+ )
111
+
100
112
  self.with_task_specify = with_task_specify
101
113
  self.with_task_planner = with_task_planner
102
114
  self.with_critic_in_the_loop = with_critic_in_the_loop
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .graph_storages.base import BaseGraphStorage
16
+ from .graph_storages.nebula_graph import NebulaGraph
16
17
  from .graph_storages.neo4j_graph import Neo4jGraph
17
18
  from .key_value_storages.base import BaseKeyValueStorage
18
19
  from .key_value_storages.in_memory import InMemoryKeyValueStorage
@@ -40,4 +41,5 @@ __all__ = [
40
41
  'MilvusStorage',
41
42
  'BaseGraphStorage',
42
43
  'Neo4jGraph',
44
+ 'NebulaGraph',
43
45
  ]
@@ -14,10 +14,12 @@
14
14
 
15
15
  from .base import BaseGraphStorage
16
16
  from .graph_element import GraphElement
17
+ from .nebula_graph import NebulaGraph
17
18
  from .neo4j_graph import Neo4jGraph
18
19
 
19
20
  __all__ = [
20
21
  'BaseGraphStorage',
21
22
  'GraphElement',
22
23
  'Neo4jGraph',
24
+ 'NebulaGraph',
23
25
  ]