camel-ai 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (72) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +362 -237
  3. camel/benchmarks/__init__.py +11 -1
  4. camel/benchmarks/apibank.py +560 -0
  5. camel/benchmarks/apibench.py +496 -0
  6. camel/benchmarks/gaia.py +2 -2
  7. camel/benchmarks/nexus.py +518 -0
  8. camel/datagen/__init__.py +21 -0
  9. camel/datagen/cotdatagen.py +448 -0
  10. camel/datagen/self_instruct/__init__.py +36 -0
  11. camel/datagen/self_instruct/filter/__init__.py +34 -0
  12. camel/datagen/self_instruct/filter/filter_function.py +208 -0
  13. camel/datagen/self_instruct/filter/filter_registry.py +56 -0
  14. camel/datagen/self_instruct/filter/instruction_filter.py +76 -0
  15. camel/datagen/self_instruct/self_instruct.py +393 -0
  16. camel/datagen/self_instruct/templates.py +384 -0
  17. camel/datahubs/huggingface.py +12 -2
  18. camel/datahubs/models.py +4 -2
  19. camel/embeddings/mistral_embedding.py +5 -1
  20. camel/embeddings/openai_compatible_embedding.py +6 -1
  21. camel/embeddings/openai_embedding.py +5 -1
  22. camel/interpreters/e2b_interpreter.py +5 -1
  23. camel/loaders/apify_reader.py +5 -1
  24. camel/loaders/chunkr_reader.py +5 -1
  25. camel/loaders/firecrawl_reader.py +0 -30
  26. camel/logger.py +11 -5
  27. camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +4 -1
  28. camel/models/anthropic_model.py +5 -1
  29. camel/models/azure_openai_model.py +1 -2
  30. camel/models/cohere_model.py +5 -1
  31. camel/models/deepseek_model.py +5 -1
  32. camel/models/gemini_model.py +5 -1
  33. camel/models/groq_model.py +5 -1
  34. camel/models/mistral_model.py +5 -1
  35. camel/models/nemotron_model.py +5 -1
  36. camel/models/nvidia_model.py +5 -1
  37. camel/models/openai_model.py +28 -12
  38. camel/models/qwen_model.py +5 -1
  39. camel/models/reka_model.py +5 -1
  40. camel/models/reward/nemotron_model.py +5 -1
  41. camel/models/samba_model.py +5 -1
  42. camel/models/togetherai_model.py +5 -1
  43. camel/models/yi_model.py +5 -1
  44. camel/models/zhipuai_model.py +5 -1
  45. camel/retrievers/auto_retriever.py +8 -0
  46. camel/retrievers/vector_retriever.py +6 -3
  47. camel/schemas/__init__.py +2 -1
  48. camel/schemas/base.py +2 -4
  49. camel/schemas/openai_converter.py +5 -1
  50. camel/schemas/outlines_converter.py +249 -0
  51. camel/societies/role_playing.py +4 -4
  52. camel/societies/workforce/workforce.py +2 -2
  53. camel/storages/graph_storages/nebula_graph.py +119 -27
  54. camel/storages/graph_storages/neo4j_graph.py +138 -0
  55. camel/toolkits/__init__.py +2 -0
  56. camel/toolkits/arxiv_toolkit.py +20 -3
  57. camel/toolkits/function_tool.py +61 -61
  58. camel/toolkits/meshy_toolkit.py +5 -1
  59. camel/toolkits/notion_toolkit.py +1 -1
  60. camel/toolkits/openbb_toolkit.py +869 -0
  61. camel/toolkits/search_toolkit.py +91 -5
  62. camel/toolkits/stripe_toolkit.py +5 -1
  63. camel/toolkits/twitter_toolkit.py +24 -16
  64. camel/types/enums.py +10 -1
  65. camel/types/unified_model_type.py +5 -0
  66. camel/utils/__init__.py +4 -0
  67. camel/utils/commons.py +146 -42
  68. camel/utils/token_counting.py +1 -0
  69. {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/METADATA +18 -7
  70. {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/RECORD +72 -58
  71. {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/LICENSE +0 -0
  72. {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/WHEEL +0 -0
@@ -0,0 +1,249 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import Any, Callable, List, Literal, Type, Union
16
+
17
+ from pydantic import BaseModel
18
+
19
+ from .base import BaseConverter
20
+
21
+
22
+ class OutlinesConverter(BaseConverter):
23
+ r"""OutlinesConverter is a class that converts a string or a function
24
+ into a BaseModel schema.
25
+
26
+ Args:
27
+ model_type (str, optional): The model type to be used.
28
+ platform (str, optional): The platform to be used.
29
+ 1. transformers
30
+ 2. mamba
31
+ 3. vllm
32
+ 4. llamacpp
33
+ 5. mlx
34
+ (default: "transformers")
35
+ **kwargs: The keyword arguments to be used. See the outlines
36
+ documentation for more details. See
37
+ https://dottxt-ai.github.io/outlines/latest/reference/models/models/
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ model_type: str,
43
+ platform: Literal[
44
+ "vllm", "transformers", "mamba", "llamacpp", "mlx"
45
+ ] = "transformers",
46
+ **kwargs: Any,
47
+ ):
48
+ self.model_type = model_type
49
+ from outlines import models
50
+
51
+ match platform:
52
+ case "vllm":
53
+ self._outlines_model = models.vllm(model_type, **kwargs)
54
+ case "transformers":
55
+ self._outlines_model = models.transformers(
56
+ model_type, **kwargs
57
+ )
58
+ case "mamba":
59
+ self._outlines_model = models.mamba(model_type, **kwargs)
60
+ case "llamacpp":
61
+ self._outlines_model = models.llamacpp(model_type, **kwargs)
62
+ case "mlx":
63
+ self._outlines_model = models.mlxlm(model_type, **kwargs)
64
+ case _:
65
+ raise ValueError(f"Unsupported platform: {platform}")
66
+
67
+ def convert_regex(self, content: str, regex_pattern: str) -> str:
68
+ r"""Convert the content to the specified regex pattern.
69
+
70
+ Args:
71
+ content (str): The content to be converted.
72
+ regex_pattern (str): The regex pattern to be used.
73
+
74
+ Returns:
75
+ str: The converted content.
76
+ """
77
+ import outlines
78
+
79
+ regex_generator = outlines.generate.regex(
80
+ self._outlines_model, regex_pattern
81
+ )
82
+ return regex_generator(content)
83
+
84
+ def convert_json(
85
+ self,
86
+ content: str,
87
+ output_schema: Union[str, Callable],
88
+ ) -> dict:
89
+ r"""Convert the content to the specified JSON schema given by
90
+ output_schema.
91
+
92
+ Args:
93
+ content (str): The content to be converted.
94
+ output_schema (Union[str, Callable]): The expected format of the
95
+ response.
96
+
97
+ Returns:
98
+ dict: The converted content in JSON format.
99
+ """
100
+ import outlines
101
+
102
+ json_generator = outlines.generate.json(
103
+ self._outlines_model, output_schema
104
+ )
105
+ return json_generator(content)
106
+
107
+ def convert_pydantic(
108
+ self,
109
+ content: str,
110
+ output_schema: Type[BaseModel],
111
+ ) -> BaseModel:
112
+ r"""Convert the content to the specified Pydantic schema.
113
+
114
+ Args:
115
+ content (str): The content to be converted.
116
+ output_schema (Type[BaseModel]): The expected format of the
117
+ response.
118
+
119
+ Returns:
120
+ BaseModel: The converted content in pydantic model format.
121
+ """
122
+ import outlines
123
+
124
+ json_generator = outlines.generate.json(
125
+ self._outlines_model, output_schema
126
+ )
127
+ return json_generator(content)
128
+
129
+ def convert_type(self, content: str, type_name: type) -> str:
130
+ r"""Convert the content to the specified type.
131
+
132
+ The following types are currently available:
133
+ 1. int
134
+ 2. float
135
+ 3. bool
136
+ 4. datetime.date
137
+ 5. datetime.time
138
+ 6. datetime.datetime
139
+ 7. custom types (https://dottxt-ai.github.io/outlines/latest/reference/generation/types/)
140
+
141
+ Args:
142
+ content (str): The content to be converted.
143
+ type_name (type): The type to be used.
144
+
145
+ Returns:
146
+ str: The converted content.
147
+ """
148
+ import outlines
149
+
150
+ type_generator = outlines.generate.format(
151
+ self._outlines_model, type_name
152
+ )
153
+ return type_generator(content)
154
+
155
+ def convert_choice(self, content: str, choices: List[str]) -> str:
156
+ r"""Convert the content to the specified choice.
157
+
158
+ Args:
159
+ content (str): The content to be converted.
160
+ choices (List[str]): The choices to be used.
161
+
162
+ Returns:
163
+ str: The converted content.
164
+ """
165
+ import outlines
166
+
167
+ choices_generator = outlines.generate.choice(
168
+ self._outlines_model, choices
169
+ )
170
+ return choices_generator(content)
171
+
172
+ def convert_grammar(self, content: str, grammar: str) -> str:
173
+ r"""Convert the content to the specified grammar.
174
+
175
+ Args:
176
+ content (str): The content to be converted.
177
+ grammar (str): The grammar to be used.
178
+
179
+ Returns:
180
+ str: The converted content.
181
+ """
182
+ import outlines
183
+
184
+ grammar_generator = outlines.generate.cfg(
185
+ self._outlines_model, grammar
186
+ )
187
+ return grammar_generator(content)
188
+
189
+ def convert( # type: ignore[override]
190
+ self,
191
+ content: str,
192
+ type: Literal["regex", "json", "type", "choice", "grammar"],
193
+ **kwargs,
194
+ ) -> Any:
195
+ r"""Formats the input content into the expected BaseModel.
196
+
197
+ Args:
198
+ type (Literal["regex", "json", "type", "choice", "grammar"]):
199
+ The type of conversion to perform. Options are:
200
+ - "regex": Match the content against a regex pattern.
201
+ - "pydantic": Convert the content into a pydantic model.
202
+ - "json": Convert the content into a JSON based on a
203
+ schema.
204
+ - "type": Convert the content into a specified type.
205
+ - "choice": Match the content against a list of valid
206
+ choices.
207
+ - "grammar": Convert the content using a specified grammar.
208
+ content (str): The content to be formatted.
209
+ **kwargs: Additional keyword arguments specific to the conversion
210
+ type.
211
+
212
+ - For "regex":
213
+ regex_pattern (str): The regex pattern to use for matching.
214
+
215
+ - For "pydantic":
216
+ output_schema (Type[BaseModel]): The schema to validate and
217
+ format the pydantic model.
218
+
219
+ - For "json":
220
+ output_schema (Union[str, Callable]): The schema to validate
221
+ and format the JSON object.
222
+
223
+ - For "type":
224
+ type_name (str): The target type name for the conversion.
225
+
226
+ - For "choice":
227
+ choices (List[str]): A list of valid choices to match against.
228
+
229
+ - For "grammar":
230
+ grammar (str): The grammar definition to use for content
231
+ conversion.
232
+ """
233
+ match type:
234
+ case "regex":
235
+ return self.convert_regex(content, kwargs.get("regex_pattern")) # type: ignore[arg-type]
236
+ case "pydantic":
237
+ return self.convert_pydantic(
238
+ content, kwargs.get("output_schema")
239
+ ) # type: ignore[arg-type]
240
+ case "json":
241
+ return self.convert_json(content, kwargs.get("output_schema")) # type: ignore[arg-type]
242
+ case "type":
243
+ return self.convert_type(content, kwargs.get("type_name")) # type: ignore[arg-type]
244
+ case "choice":
245
+ return self.convert_choice(content, kwargs.get("choices")) # type: ignore[arg-type]
246
+ case "grammar":
247
+ return self.convert_grammar(content, kwargs.get("grammar")) # type: ignore[arg-type]
248
+ case _:
249
+ raise ValueError("Unsupported output schema type")
@@ -509,8 +509,8 @@ class RolePlaying:
509
509
  # step and once in role play), and the model generates only one
510
510
  # response when multi-response support is enabled.
511
511
  if (
512
- 'n' in self.user_agent.model_config_dict.keys()
513
- and self.user_agent.model_config_dict['n'] > 1
512
+ 'n' in self.user_agent.model_backend.model_config_dict.keys()
513
+ and self.user_agent.model_backend.model_config_dict['n'] > 1
514
514
  ):
515
515
  self.user_agent.record_message(user_msg)
516
516
 
@@ -532,8 +532,8 @@ class RolePlaying:
532
532
  # step and once in role play), and the model generates only one
533
533
  # response when multi-response support is enabled.
534
534
  if (
535
- 'n' in self.assistant_agent.model_config_dict.keys()
536
- and self.assistant_agent.model_config_dict['n'] > 1
535
+ 'n' in self.assistant_agent.model_backend.model_config_dict.keys()
536
+ and self.assistant_agent.model_backend.model_config_dict['n'] > 1
537
537
  ):
538
538
  self.assistant_agent.record_message(assistant_msg)
539
539
 
@@ -251,7 +251,7 @@ class Workforce(BaseNode):
251
251
  additional_info = "A Workforce node"
252
252
  elif isinstance(child, SingleAgentWorker):
253
253
  additional_info = "tools: " + (
254
- ", ".join(child.worker.func_dict.keys())
254
+ ", ".join(child.worker.tool_dict.keys())
255
255
  )
256
256
  elif isinstance(child, RolePlayingWorker):
257
257
  additional_info = "A Role playing node"
@@ -369,7 +369,7 @@ class Workforce(BaseNode):
369
369
  model_config_dict=model_config_dict,
370
370
  )
371
371
 
372
- return ChatAgent(worker_sys_msg, model=model, tools=function_list)
372
+ return ChatAgent(worker_sys_msg, model=model, tools=function_list) # type: ignore[arg-type]
373
373
 
374
374
  async def _get_returned_task(self) -> Task:
375
375
  r"""Get the task that's published by this node and just get returned
@@ -12,8 +12,19 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ import logging
16
+ import re
15
17
  import time
16
- from typing import TYPE_CHECKING, Any, Dict, List, Tuple
18
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
19
+
20
+ from camel.storages.graph_storages.base import BaseGraphStorage
21
+ from camel.storages.graph_storages.graph_element import (
22
+ GraphElement,
23
+ )
24
+ from camel.utils.commons import dependencies_required
25
+
26
+ logger = logging.getLogger(__name__)
27
+
17
28
 
18
29
  if TYPE_CHECKING:
19
30
  from nebula3.data.ResultSet import ( # type: ignore[import-untyped]
@@ -24,11 +35,6 @@ if TYPE_CHECKING:
24
35
  Session,
25
36
  )
26
37
 
27
- from camel.storages.graph_storages.base import BaseGraphStorage
28
- from camel.storages.graph_storages.graph_element import (
29
- GraphElement,
30
- )
31
- from camel.utils.commons import dependencies_required
32
38
 
33
39
  MAX_RETRIES = 5
34
40
  RETRY_DELAY = 3
@@ -178,55 +184,81 @@ class NebulaGraph(BaseGraphStorage):
178
184
  """
179
185
  nodes = self._extract_nodes(graph_elements)
180
186
  for node in nodes:
181
- self.add_node(node['id'], node['type'])
187
+ try:
188
+ self.add_node(node['id'], node['type'])
189
+ except Exception as e:
190
+ logger.warning(f"Failed to add node {node}. Error: {e}")
191
+ continue
182
192
 
183
193
  relationships = self._extract_relationships(graph_elements)
184
194
  for rel in relationships:
185
- self.add_triplet(rel['subj']['id'], rel['obj']['id'], rel['type'])
195
+ try:
196
+ self.add_triplet(
197
+ rel['subj']['id'], rel['obj']['id'], rel['type']
198
+ )
199
+ except Exception as e:
200
+ logger.warning(f"Failed to add relationship {rel}. Error: {e}")
201
+ continue
186
202
 
187
203
  def ensure_edge_type_exists(
188
204
  self,
189
205
  edge_type: str,
206
+ time_label: Optional[str] = None,
190
207
  ) -> None:
191
208
  r"""Ensures that a specified edge type exists in the NebulaGraph
192
209
  database. If the edge type already exists, this method does nothing.
193
210
 
194
211
  Args:
195
212
  edge_type (str): The name of the edge type to be created.
213
+ time_label (str, optional): A specific timestamp to set as the
214
+ default value for the time label property. If not
215
+ provided, no timestamp will be added. (default: :obj:`None`)
196
216
 
197
217
  Raises:
198
218
  Exception: If the edge type creation fails after multiple retry
199
219
  attempts, an exception is raised with the error message.
200
220
  """
201
- create_edge_stmt = f'CREATE EDGE IF NOT EXISTS {edge_type}()'
221
+ create_edge_stmt = f"CREATE EDGE IF NOT EXISTS {edge_type} ()"
222
+ if time_label is not None:
223
+ time_label = self._validate_time_label(time_label)
224
+ create_edge_stmt = f"""CREATE EDGE IF NOT EXISTS {edge_type}
225
+ (time_label DATETIME DEFAULT {time_label})"""
202
226
 
203
227
  for attempt in range(MAX_RETRIES):
204
228
  res = self.query(create_edge_stmt)
205
229
  if res.is_succeeded():
206
- return # Tag creation succeeded, exit the method
230
+ return # Edge type creation succeeded
207
231
 
208
232
  if attempt < MAX_RETRIES - 1:
209
233
  time.sleep(RETRY_DELAY)
210
234
  else:
211
235
  # Final attempt failed, raise an exception
212
236
  raise Exception(
213
- f"Failed to create tag `{edge_type}` after "
237
+ f"Failed to create edge type `{edge_type}` after "
214
238
  f"{MAX_RETRIES} attempts: {res.error_msg()}"
215
239
  )
216
240
 
217
- def ensure_tag_exists(self, tag_name: str) -> None:
241
+ def ensure_tag_exists(
242
+ self, tag_name: str, time_label: Optional[str] = None
243
+ ) -> None:
218
244
  r"""Ensures a tag is created in the NebulaGraph database. If the tag
219
245
  already exists, it does nothing.
220
246
 
221
247
  Args:
222
248
  tag_name (str): The name of the tag to be created.
249
+ time_label (str, optional): A specific timestamp to set as the
250
+ default value for the time label property. If not provided,
251
+ no timestamp will be added. (default: :obj:`None`)
223
252
 
224
253
  Raises:
225
254
  Exception: If the tag creation fails after retries, an exception
226
255
  is raised with the error message.
227
256
  """
228
-
229
- create_tag_stmt = f'CREATE TAG IF NOT EXISTS {tag_name}()'
257
+ create_tag_stmt = f"CREATE TAG IF NOT EXISTS {tag_name} ()"
258
+ if time_label is not None:
259
+ time_label = self._validate_time_label(time_label)
260
+ create_tag_stmt = f"""CREATE TAG IF NOT EXISTS {tag_name}
261
+ (time_label DATETIME DEFAULT {time_label})"""
230
262
 
231
263
  for attempt in range(MAX_RETRIES):
232
264
  res = self.query(create_tag_stmt)
@@ -246,24 +278,39 @@ class NebulaGraph(BaseGraphStorage):
246
278
  self,
247
279
  node_id: str,
248
280
  tag_name: str,
281
+ time_label: Optional[str] = None,
249
282
  ) -> None:
250
283
  r"""Add a node with the specified tag and properties.
251
284
 
252
285
  Args:
253
286
  node_id (str): The ID of the node.
254
287
  tag_name (str): The tag name of the node.
288
+ time_label (str, optional): A specific timestamp to set for
289
+ the node's time label property. If not provided, no timestamp
290
+ will be added. (default: :obj:`None`)
255
291
  """
256
- self.ensure_tag_exists(tag_name)
292
+ node_id = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', node_id)
293
+ tag_name = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', tag_name)
257
294
 
258
- # Insert node without properties
259
- insert_stmt = (
260
- f'INSERT VERTEX IF NOT EXISTS {tag_name}() VALUES "{node_id}":()'
261
- )
295
+ self.ensure_tag_exists(tag_name, time_label)
296
+
297
+ # Insert node with or without time_label property
298
+ if time_label is not None:
299
+ time_label = self._validate_time_label(time_label)
300
+ insert_stmt = (
301
+ f'INSERT VERTEX IF NOT EXISTS {tag_name}(time_label) VALUES '
302
+ f'"{node_id}":("{time_label}")'
303
+ )
304
+ else:
305
+ insert_stmt = (
306
+ f'INSERT VERTEX IF NOT EXISTS {tag_name}() VALUES '
307
+ f'"{node_id}":()'
308
+ )
262
309
 
263
310
  for attempt in range(MAX_RETRIES):
264
311
  res = self.query(insert_stmt)
265
312
  if res.is_succeeded():
266
- return # Tag creation succeeded, exit the method
313
+ return # Node creation succeeded, exit the method
267
314
 
268
315
  if attempt < MAX_RETRIES - 1:
269
316
  time.sleep(RETRY_DELAY)
@@ -329,7 +376,7 @@ class NebulaGraph(BaseGraphStorage):
329
376
  @property
330
377
  def get_structured_schema(self) -> Dict[str, Any]:
331
378
  r"""Generates a structured schema consisting of node and relationship
332
- properties, relationships, and metadata.
379
+ properties, relationships, and metadata, including timestamps.
333
380
 
334
381
  Returns:
335
382
  Dict[str, Any]: A dictionary representing the structured schema.
@@ -400,6 +447,7 @@ class NebulaGraph(BaseGraphStorage):
400
447
  subj: str,
401
448
  obj: str,
402
449
  rel: str,
450
+ time_label: Optional[str] = None,
403
451
  ) -> None:
404
452
  r"""Adds a relationship (triplet) between two entities in the Nebula
405
453
  Graph database.
@@ -408,24 +456,44 @@ class NebulaGraph(BaseGraphStorage):
408
456
  subj (str): The identifier for the subject entity.
409
457
  obj (str): The identifier for the object entity.
410
458
  rel (str): The relationship between the subject and object.
459
+ time_label (str, optional): A specific timestamp to set for the
460
+ time label property of the relationship. If not provided,
461
+ no timestamp will be added. (default: :obj:`None`)
462
+
463
+ Raises:
464
+ ValueError: If the time_label format is invalid.
465
+ Exception: If creating the relationship fails.
411
466
  """
467
+ subj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', subj)
468
+ obj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', obj)
469
+ rel = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', rel)
470
+
412
471
  self.ensure_tag_exists(subj)
413
472
  self.ensure_tag_exists(obj)
414
- self.ensure_edge_type_exists(rel)
473
+ self.ensure_edge_type_exists(rel, time_label)
415
474
  self.add_node(node_id=subj, tag_name=subj)
416
475
  self.add_node(node_id=obj, tag_name=obj)
417
476
 
418
- # Avoid latenicy
477
+ # Avoid latency
419
478
  time.sleep(1)
420
479
 
421
- insert_stmt = (
422
- f'INSERT EDGE IF NOT EXISTS {rel}() VALUES "{subj}"->"{obj}":();'
423
- )
480
+ # Create edge with or without time_label property
481
+ if time_label is not None:
482
+ time_label = self._validate_time_label(time_label)
483
+ insert_stmt = (
484
+ f'INSERT EDGE IF NOT EXISTS {rel}(time_label) VALUES '
485
+ f'"{subj}"->"{obj}":("{time_label}")'
486
+ )
487
+ else:
488
+ insert_stmt = (
489
+ f'INSERT EDGE IF NOT EXISTS {rel}() VALUES '
490
+ f'"{subj}"->"{obj}":()'
491
+ )
424
492
 
425
493
  res = self.query(insert_stmt)
426
494
  if not res.is_succeeded():
427
495
  raise Exception(
428
- f'create relationship `]{subj}` -> `{obj}`'
496
+ f'create relationship `{subj}` -> `{obj}`'
429
497
  + f'failed: {res.error_msg()}'
430
498
  )
431
499
 
@@ -545,3 +613,27 @@ class NebulaGraph(BaseGraphStorage):
545
613
  )
546
614
 
547
615
  return rel_schema_props, rel_structure_props
616
+
617
+ def _validate_time_label(self, time_label: str) -> str:
618
+ r"""Validates the format of a time label string.
619
+
620
+ Args:
621
+ time_label (str): The time label string to validate.
622
+ Should be in format 'YYYY-MM-DDThh:mm:ss'.
623
+
624
+ Returns:
625
+ str: The validated time label.
626
+
627
+ Raises:
628
+ ValueError: If the time label format is invalid.
629
+ """
630
+ try:
631
+ # Check if the format matches YYYY-MM-DDThh:mm:ss
632
+ pattern = r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$'
633
+ if not re.match(pattern, time_label):
634
+ raise ValueError(
635
+ "Time label must be in format 'YYYY-MM-DDThh:mm:ss'"
636
+ )
637
+ return time_label
638
+ except Exception as e:
639
+ raise ValueError(f"Invalid time label format: {e!s}")