camel-ai 0.2.24__py3-none-any.whl → 0.2.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -12,8 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
- import shlex
15
+ import os
16
16
  import subprocess
17
+ import sys
17
18
  import tempfile
18
19
  from pathlib import Path
19
20
  from typing import Any, ClassVar, Dict, List
@@ -43,12 +44,14 @@ class SubprocessInterpreter(BaseInterpreter):
43
44
  the executed code. (default: :obj:`False`)
44
45
  print_stderr (bool, optional): If True, print the standard error of the
45
46
  executed code. (default: :obj:`True`)
47
+ execution_timeout (int, optional): Maximum time in seconds to wait for
48
+ code execution to complete. (default: :obj:`60`)
46
49
  """
47
50
 
48
- _CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, str]] = {
49
- "python": "python {file_name}",
50
- "bash": "bash {file_name}",
51
- "r": "Rscript {file_name}",
51
+ _CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, Dict[str, str]]] = {
52
+ "python": {"posix": "python {file_name}", "nt": "python {file_name}"},
53
+ "bash": {"posix": "bash {file_name}", "nt": "bash {file_name}"},
54
+ "r": {"posix": "Rscript {file_name}", "nt": "Rscript {file_name}"},
52
55
  }
53
56
 
54
57
  _CODE_EXTENSION_MAPPING: ClassVar[Dict[str, str]] = {
@@ -74,10 +77,12 @@ class SubprocessInterpreter(BaseInterpreter):
74
77
  require_confirm: bool = True,
75
78
  print_stdout: bool = False,
76
79
  print_stderr: bool = True,
80
+ execution_timeout: int = 60,
77
81
  ) -> None:
78
82
  self.require_confirm = require_confirm
79
83
  self.print_stdout = print_stdout
80
84
  self.print_stderr = print_stderr
85
+ self.execution_timeout = execution_timeout
81
86
 
82
87
  def run_file(
83
88
  self,
@@ -94,13 +99,9 @@ class SubprocessInterpreter(BaseInterpreter):
94
99
  Returns:
95
100
  str: A string containing the captured stdout and stderr of the
96
101
  executed code.
97
-
98
- Raises:
99
- RuntimeError: If the provided file path does not point to a file.
100
- InterpreterError: If the code type provided is not supported.
101
102
  """
102
103
  if not file.is_file():
103
- raise RuntimeError(f"{file} is not a file.")
104
+ return f"{file} is not a file."
104
105
  code_type = self._check_code_type(code_type)
105
106
  if self._CODE_TYPE_MAPPING[code_type] == "python":
106
107
  # For Python code, use ast to analyze and modify the code
@@ -108,7 +109,7 @@ class SubprocessInterpreter(BaseInterpreter):
108
109
 
109
110
  import astor
110
111
 
111
- with open(file, 'r') as f:
112
+ with open(file, 'r', encoding='utf-8') as f:
112
113
  source = f.read()
113
114
 
114
115
  # Parse the source code
@@ -158,34 +159,88 @@ class SubprocessInterpreter(BaseInterpreter):
158
159
  modified_source = astor.to_source(tree)
159
160
  # Create a temporary file with the modified source
160
161
  temp_file = self._create_temp_file(modified_source, "py")
161
- cmd = shlex.split(f"python {temp_file!s}")
162
- except SyntaxError:
163
- # If parsing fails, run the original file
164
- cmd = shlex.split(
165
- self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
166
- file_name=str(file)
162
+ cmd = ["python", str(temp_file)]
163
+ except (SyntaxError, TypeError, ValueError) as e:
164
+ logger.warning(f"Failed to parse Python code with AST: {e}")
165
+ platform_type = 'posix' if os.name != 'nt' else 'nt'
166
+ cmd_template = self._CODE_EXECUTE_CMD_MAPPING[code_type][
167
+ platform_type
168
+ ]
169
+ base_cmd = cmd_template.split()[0]
170
+
171
+ # Check if command is available
172
+ if not self._is_command_available(base_cmd):
173
+ raise InterpreterError(
174
+ f"Command '{base_cmd}' not found. Please ensure it "
175
+ f"is installed and available in your PATH."
167
176
  )
168
- )
177
+
178
+ cmd = [base_cmd, str(file)]
169
179
  else:
170
180
  # For non-Python code, use standard execution
171
- cmd = shlex.split(
172
- self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
173
- file_name=str(file)
181
+ platform_type = 'posix' if os.name != 'nt' else 'nt'
182
+ cmd_template = self._CODE_EXECUTE_CMD_MAPPING[code_type][
183
+ platform_type
184
+ ]
185
+ base_cmd = cmd_template.split()[0] # Get 'python', 'bash', etc.
186
+
187
+ # Check if command is available
188
+ if not self._is_command_available(base_cmd):
189
+ raise InterpreterError(
190
+ f"Command '{base_cmd}' not found. Please ensure it "
191
+ f"is installed and available in your PATH."
174
192
  )
175
- )
176
193
 
177
- proc = subprocess.Popen(
178
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
179
- )
180
- stdout, stderr = proc.communicate()
181
- return_code = proc.returncode
194
+ cmd = [base_cmd, str(file)]
195
+
196
+ # Get current Python executable's environment
197
+ env = os.environ.copy()
198
+
199
+ # On Windows, ensure we use the correct Python executable path
200
+ if os.name == 'nt':
201
+ python_path = os.path.dirname(sys.executable)
202
+ if 'PATH' in env:
203
+ env['PATH'] = python_path + os.pathsep + env['PATH']
204
+ else:
205
+ env['PATH'] = python_path
206
+
207
+ try:
208
+ proc = subprocess.Popen(
209
+ cmd,
210
+ stdout=subprocess.PIPE,
211
+ stderr=subprocess.PIPE,
212
+ text=True,
213
+ env=env,
214
+ shell=False, # Never use shell=True for security
215
+ )
216
+ # Add timeout to prevent hanging processes
217
+ stdout, stderr = proc.communicate(timeout=self.execution_timeout)
218
+ return_code = proc.returncode
219
+ except subprocess.TimeoutExpired:
220
+ proc.kill()
221
+ stdout, stderr = proc.communicate()
222
+ return_code = proc.returncode
223
+ timeout_msg = (
224
+ f"Process timed out after {self.execution_timeout} seconds "
225
+ f"and was terminated."
226
+ )
227
+ stderr = f"{stderr}\n{timeout_msg}"
182
228
 
183
229
  # Clean up temporary file if it was created
184
- if (
185
- self._CODE_TYPE_MAPPING[code_type] == "python"
186
- and 'temp_file' in locals()
187
- ):
188
- temp_file.unlink()
230
+ temp_file_to_clean = locals().get('temp_file')
231
+ if temp_file_to_clean is not None:
232
+ try:
233
+ if temp_file_to_clean.exists():
234
+ try:
235
+ temp_file_to_clean.unlink()
236
+ except PermissionError:
237
+ # On Windows, files might be locked
238
+ logger.warning(
239
+ f"Could not delete temp file "
240
+ f"{temp_file_to_clean} (may be locked)"
241
+ )
242
+ except Exception as e:
243
+ logger.warning(f"Failed to cleanup temporary file: {e}")
189
244
 
190
245
  if self.print_stdout and stdout:
191
246
  print("======stdout======")
@@ -240,7 +295,7 @@ class SubprocessInterpreter(BaseInterpreter):
240
295
  "computer: {code}"
241
296
  )
242
297
  while True:
243
- choice = input("Running code? [Y/n]:").lower()
298
+ choice = input("Running code? [Y/n]:").lower().strip()
244
299
  if choice in ["y", "yes", "ye", ""]:
245
300
  break
246
301
  elif choice in ["no", "n"]:
@@ -249,22 +304,72 @@ class SubprocessInterpreter(BaseInterpreter):
249
304
  "This choice stops the current operation and any "
250
305
  "further code execution."
251
306
  )
252
- temp_file_path = self._create_temp_file(
253
- code=code, extension=self._CODE_EXTENSION_MAPPING[code_type]
254
- )
307
+ else:
308
+ print("Please enter 'y' or 'n'.")
309
+
310
+ temp_file_path = None
311
+ temp_dir = None
312
+ try:
313
+ temp_file_path = self._create_temp_file(
314
+ code=code, extension=self._CODE_EXTENSION_MAPPING[code_type]
315
+ )
316
+ temp_dir = temp_file_path.parent
317
+ return self.run_file(temp_file_path, code_type)
318
+ finally:
319
+ # Clean up temp file and directory
320
+ try:
321
+ if temp_file_path and temp_file_path.exists():
322
+ try:
323
+ temp_file_path.unlink()
324
+ except PermissionError:
325
+ # On Windows, files might be locked
326
+ logger.warning(
327
+ f"Could not delete temp file {temp_file_path}"
328
+ )
329
+
330
+ if temp_dir and temp_dir.exists():
331
+ try:
332
+ import shutil
333
+
334
+ shutil.rmtree(temp_dir, ignore_errors=True)
335
+ except Exception as e:
336
+ logger.warning(f"Could not delete temp directory: {e}")
337
+ except Exception as e:
338
+ logger.warning(f"Error during cleanup: {e}")
255
339
 
256
- result = self.run_file(temp_file_path, code_type)
340
+ def _create_temp_file(self, code: str, extension: str) -> Path:
341
+ r"""Creates a temporary file with the given code and extension.
257
342
 
258
- temp_file_path.unlink()
259
- return result
343
+ Args:
344
+ code (str): The code to write to the temporary file.
345
+ extension (str): The file extension to use.
260
346
 
261
- def _create_temp_file(self, code: str, extension: str) -> Path:
262
- with tempfile.NamedTemporaryFile(
263
- mode="w", delete=False, suffix=f".{extension}"
264
- ) as f:
265
- f.write(code)
266
- name = f.name
267
- return Path(name)
347
+ Returns:
348
+ Path: The path to the created temporary file.
349
+ """
350
+ try:
351
+ # Create a temporary directory first to ensure we have write
352
+ # permissions
353
+ temp_dir = tempfile.mkdtemp()
354
+ # Create file path with appropriate extension
355
+ file_path = Path(temp_dir) / f"temp_code.{extension}"
356
+
357
+ # Write code to file with appropriate encoding
358
+ with open(file_path, 'w', encoding='utf-8') as f:
359
+ f.write(code)
360
+
361
+ return file_path
362
+ except Exception as e:
363
+ # Clean up temp directory if creation failed
364
+ if 'temp_dir' in locals():
365
+ try:
366
+ import shutil
367
+
368
+ shutil.rmtree(temp_dir, ignore_errors=True)
369
+ except Exception:
370
+ pass
371
+ logger.error(f"Failed to create temporary file: {e}")
372
+ raise
268
373
 
269
374
  def _check_code_type(self, code_type: str) -> str:
270
375
  if code_type not in self._CODE_TYPE_MAPPING:
@@ -284,3 +389,39 @@ class SubprocessInterpreter(BaseInterpreter):
284
389
  raise RuntimeError(
285
390
  "SubprocessInterpreter doesn't support " "`action_space`."
286
391
  )
392
+
393
+ def _is_command_available(self, command: str) -> bool:
394
+ r"""Check if a command is available in the system PATH.
395
+
396
+ Args:
397
+ command (str): The command to check.
398
+
399
+ Returns:
400
+ bool: True if the command is available, False otherwise.
401
+ """
402
+ if os.name == 'nt': # Windows
403
+ # On Windows, use where.exe to find the command
404
+ try:
405
+ with open(os.devnull, 'w') as devnull:
406
+ subprocess.check_call(
407
+ ['where', command],
408
+ stdout=devnull,
409
+ stderr=devnull,
410
+ shell=False,
411
+ )
412
+ return True
413
+ except subprocess.CalledProcessError:
414
+ return False
415
+ else: # Unix-like systems
416
+ # On Unix-like systems, use which to find the command
417
+ try:
418
+ with open(os.devnull, 'w') as devnull:
419
+ subprocess.check_call(
420
+ ['which', command],
421
+ stdout=devnull,
422
+ stderr=devnull,
423
+ shell=False,
424
+ )
425
+ return True
426
+ except subprocess.CalledProcessError:
427
+ return False
@@ -35,13 +35,13 @@ class AnthropicModel(BaseModelBackend):
35
35
  model_type (Union[ModelType, str]): Model for which a backend is
36
36
  created, one of CLAUDE_* series.
37
37
  model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
38
- that will be fed into Anthropic.messages.create(). If
38
+ that will be fed into `openai.ChatCompletion.create()`. If
39
39
  :obj:`None`, :obj:`AnthropicConfig().as_dict()` will be used.
40
40
  (default: :obj:`None`)
41
41
  api_key (Optional[str], optional): The API key for authenticating with
42
42
  the Anthropic service. (default: :obj:`None`)
43
43
  url (Optional[str], optional): The url to the Anthropic service.
44
- (default: :obj:`None`)
44
+ (default: :obj:`https://api.anthropic.com/v1/`)
45
45
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
46
46
  use for the model. If not provided, :obj:`AnthropicTokenCounter`
47
47
  will be used. (default: :obj:`None`)
@@ -61,43 +61,24 @@ class AnthropicModel(BaseModelBackend):
61
61
  url: Optional[str] = None,
62
62
  token_counter: Optional[BaseTokenCounter] = None,
63
63
  ) -> None:
64
- from anthropic import Anthropic, AsyncAnthropic
64
+ from openai import AsyncOpenAI, OpenAI
65
65
 
66
66
  if model_config_dict is None:
67
67
  model_config_dict = AnthropicConfig().as_dict()
68
68
  api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
69
- url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
69
+ url = (
70
+ url
71
+ or os.environ.get("ANTHROPIC_API_BASE_URL")
72
+ or "https://api.anthropic.com/v1/"
73
+ )
70
74
  super().__init__(
71
75
  model_type, model_config_dict, api_key, url, token_counter
72
76
  )
73
- self.client = Anthropic(api_key=self._api_key, base_url=self._url)
74
- self.async_client = AsyncAnthropic(
75
- api_key=self._api_key, base_url=self._url
76
- )
77
+ self.client = OpenAI(base_url=self._url, api_key=self._api_key)
77
78
 
78
- def _convert_response_from_anthropic_to_openai(self, response):
79
- # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
80
- obj = ChatCompletion.construct(
81
- id=None,
82
- choices=[
83
- dict(
84
- index=0,
85
- message={
86
- "role": "assistant",
87
- "content": next(
88
- content.text
89
- for content in response.content
90
- if content.type == "text"
91
- ),
92
- },
93
- finish_reason=response.stop_reason,
94
- )
95
- ],
96
- created=None,
97
- model=response.model,
98
- object="chat.completion",
79
+ self.async_client = AsyncOpenAI(
80
+ api_key=self._api_key, base_url=self._url
99
81
  )
100
- return obj
101
82
 
102
83
  @property
103
84
  def token_counter(self) -> BaseTokenCounter:
@@ -126,22 +107,13 @@ class AnthropicModel(BaseModelBackend):
126
107
  Returns:
127
108
  ChatCompletion: Response in the OpenAI API format.
128
109
  """
129
- from anthropic import NOT_GIVEN
130
-
131
- if messages[0]["role"] == "system":
132
- sys_msg = str(messages.pop(0)["content"])
133
- else:
134
- sys_msg = NOT_GIVEN # type: ignore[assignment]
135
- response = self.client.messages.create(
110
+ response = self.client.chat.completions.create(
136
111
  model=self.model_type,
137
- system=sys_msg,
138
- messages=messages, # type: ignore[arg-type]
112
+ messages=messages,
139
113
  **self.model_config_dict,
114
+ tools=tools, # type: ignore[arg-type]
140
115
  )
141
116
 
142
- # format response to openai format
143
- response = self._convert_response_from_anthropic_to_openai(response)
144
-
145
117
  return response
146
118
 
147
119
  async def _arun(
@@ -159,21 +131,14 @@ class AnthropicModel(BaseModelBackend):
159
131
  Returns:
160
132
  ChatCompletion: Response in the OpenAI API format.
161
133
  """
162
- from anthropic import NOT_GIVEN
163
-
164
- if messages[0]["role"] == "system":
165
- sys_msg = str(messages.pop(0)["content"])
166
- else:
167
- sys_msg = NOT_GIVEN # type: ignore[assignment]
168
- response = await self.async_client.messages.create(
134
+ response = await self.async_client.chat.completions.create(
169
135
  model=self.model_type,
170
- system=sys_msg,
171
- messages=messages, # type: ignore[arg-type]
136
+ messages=messages,
172
137
  **self.model_config_dict,
138
+ tools=tools, # type: ignore[arg-type]
173
139
  )
174
140
 
175
- # format response to openai format
176
- return self._convert_response_from_anthropic_to_openai(response)
141
+ return response
177
142
 
178
143
  def check_model_config(self):
179
144
  r"""Check whether the model configuration is valid for anthropic
@@ -181,8 +146,7 @@ class AnthropicModel(BaseModelBackend):
181
146
 
182
147
  Raises:
183
148
  ValueError: If the model configuration dictionary contains any
184
- unexpected arguments to OpenAI API, or it does not contain
185
- :obj:`model_path` or :obj:`server_url`.
149
+ unexpected arguments to Anthropic API.
186
150
  """
187
151
  for param in self.model_config_dict:
188
152
  if param not in ANTHROPIC_API_PARAMS:
@@ -97,9 +97,16 @@ class SGLangModel(BaseModelBackend):
97
97
  def _start_server(self) -> None:
98
98
  try:
99
99
  if not self._url:
100
+ tool_call_flag = self.model_config_dict.get("tools")
101
+ tool_call_arg = (
102
+ f"--tool-call-parser {self._api_key} "
103
+ if tool_call_flag
104
+ else ""
105
+ )
100
106
  cmd = (
101
107
  f"python -m sglang.launch_server "
102
108
  f"--model-path {self.model_type} "
109
+ f"{tool_call_arg}"
103
110
  f"--port 30000 "
104
111
  f"--host 0.0.0.0"
105
112
  )
@@ -265,6 +272,19 @@ class SGLangModel(BaseModelBackend):
265
272
  """
266
273
  return self.model_config_dict.get('stream', False)
267
274
 
275
+ def __del__(self):
276
+ r"""Properly clean up resources when the model is destroyed."""
277
+ self.cleanup()
278
+
279
+ def cleanup(self):
280
+ r"""Terminate the server process and clean up resources."""
281
+ with self._lock:
282
+ if self.server_process:
283
+ _terminate_process(self.server_process)
284
+ self.server_process = None
285
+ self._client = None
286
+ logging.info("Server process terminated during cleanup.")
287
+
268
288
 
269
289
  # Below are helper functions from sglang.utils
270
290
  def _terminate_process(process):
@@ -326,21 +346,25 @@ def _execute_shell_command(command: str) -> subprocess.Popen:
326
346
  return subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)
327
347
 
328
348
 
329
- def _wait_for_server(base_url: str, timeout: Optional[int] = None) -> None:
349
+ def _wait_for_server(base_url: str, timeout: Optional[int] = 30) -> None:
330
350
  r"""Wait for the server to be ready by polling the /v1/models endpoint.
331
351
 
332
352
  Args:
333
353
  base_url: The base URL of the server
334
- timeout: Maximum time to wait in seconds. None means wait forever.
354
+ timeout: Maximum time to wait in seconds. Default is 30 seconds.
335
355
  """
336
356
  import requests
337
357
 
358
+ # Set a default value if timeout is None
359
+ actual_timeout = 30 if timeout is None else timeout
360
+
338
361
  start_time = time.time()
339
362
  while True:
340
363
  try:
341
364
  response = requests.get(
342
365
  f"{base_url}/v1/models",
343
366
  headers={"Authorization": "Bearer None"},
367
+ timeout=5, # Add a timeout for the request itself
344
368
  )
345
369
  if response.status_code == 200:
346
370
  time.sleep(5)
@@ -356,9 +380,15 @@ def _wait_for_server(base_url: str, timeout: Optional[int] = None) -> None:
356
380
  )
357
381
  break
358
382
 
359
- if timeout and time.time() - start_time > timeout:
383
+ if time.time() - start_time > actual_timeout:
384
+ raise TimeoutError(
385
+ f"Server did not become ready within "
386
+ f"{actual_timeout} seconds"
387
+ )
388
+ except (requests.exceptions.RequestException, TimeoutError) as e:
389
+ if time.time() - start_time > actual_timeout:
360
390
  raise TimeoutError(
361
- "Server did not become ready within timeout period"
391
+ f"Server did not become ready within "
392
+ f"{actual_timeout} seconds: {e}"
362
393
  )
363
- except requests.exceptions.RequestException:
364
394
  time.sleep(1)
camel/py.typed ADDED
File without changes
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import List, Union
16
+ from typing import List, Optional, Union
17
17
 
18
18
  from pydantic import BaseModel, ConfigDict, Field
19
19
 
@@ -45,6 +45,7 @@ class Relationship(BaseModel):
45
45
  subj (Node): The subject/source node of the relationship.
46
46
  obj (Node): The object/target node of the relationship.
47
47
  type (str): The type of the relationship.
48
+ timestamp (str, optional): The timestamp of the relationship.
48
49
  properties (dict): Additional properties associated with the
49
50
  relationship.
50
51
  """
@@ -52,6 +53,7 @@ class Relationship(BaseModel):
52
53
  subj: Node
53
54
  obj: Node
54
55
  type: str = "Relationship"
56
+ timestamp: Optional[str] = None
55
57
  properties: dict = Field(default_factory=dict)
56
58
 
57
59
 
@@ -339,18 +339,24 @@ class Neo4jGraph(BaseGraphStorage):
339
339
  ]
340
340
  )
341
341
 
342
- def add_triplet(self, subj: str, obj: str, rel: str) -> None:
343
- r"""Adds a relationship (triplet) between two entities in the database.
342
+ def add_triplet(
343
+ self, subj: str, obj: str, rel: str, timestamp: Optional[str] = None
344
+ ) -> None:
345
+ r"""Adds a relationship (triplet) between two entities
346
+ in the database with a timestamp.
344
347
 
345
348
  Args:
346
349
  subj (str): The identifier for the subject entity.
347
350
  obj (str): The identifier for the object entity.
348
351
  rel (str): The relationship between the subject and object.
352
+ timestamp (Optional[str]): The timestamp of the relationship.
353
+ Defaults to None.
349
354
  """
350
355
  query = """
351
356
  MERGE (n1:`%s` {id:$subj})
352
357
  MERGE (n2:`%s` {id:$obj})
353
- MERGE (n1)-[:`%s`]->(n2)
358
+ MERGE (n1)-[r:`%s`]->(n2)
359
+ SET r.timestamp = $timestamp
354
360
  """
355
361
 
356
362
  prepared_statement = query % (
@@ -361,7 +367,10 @@ class Neo4jGraph(BaseGraphStorage):
361
367
 
362
368
  # Execute the query within a database session
363
369
  with self.driver.session(database=self.database) as session:
364
- session.run(prepared_statement, {"subj": subj, "obj": obj})
370
+ session.run(
371
+ prepared_statement,
372
+ {"subj": subj, "obj": obj, "timestamp": timestamp},
373
+ )
365
374
 
366
375
  def _delete_rel(self, subj: str, obj: str, rel: str) -> None:
367
376
  r"""Deletes a specific relationship between two nodes in the Neo4j
@@ -721,3 +730,68 @@ class Neo4jGraph(BaseGraphStorage):
721
730
  return result[0] if result else {}
722
731
  except CypherSyntaxError as e:
723
732
  raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
733
+
734
+ def get_triplet(
735
+ self,
736
+ subj: Optional[str] = None,
737
+ obj: Optional[str] = None,
738
+ rel: Optional[str] = None,
739
+ ) -> List[Dict[str, Any]]:
740
+ r"""
741
+ Query triplet information. If subj, obj, or rel is
742
+ not specified, returns all matching triplets.
743
+
744
+ Args:
745
+ subj (Optional[str]): The ID of the subject node.
746
+ If None, matches any subject node.
747
+ obj (Optional[str]): The ID of the object node.
748
+ If None, matches any object node.
749
+ rel (Optional[str]): The type of relationship.
750
+ If None, matches any relationship type.
751
+
752
+ Returns:
753
+ List[Dict[str, Any]]: A list of matching triplets,
754
+ each containing subj, obj, rel, and timestamp.
755
+ """
756
+ import logging
757
+
758
+ logging.basicConfig(level=logging.DEBUG)
759
+ logger = logging.getLogger(__name__)
760
+
761
+ # Construct the query
762
+ query = """
763
+ MATCH (n1:Entity)-[r]->(n2:Entity)
764
+ WHERE ($subj IS NULL OR n1.id = $subj)
765
+ AND ($obj IS NULL OR n2.id = $obj)
766
+ AND ($rel IS NULL OR type(r) = $rel)
767
+ RETURN n1.id AS subj, n2.id AS obj,
768
+ type(r) AS rel, r.timestamp AS timestamp
769
+ """
770
+
771
+ # Construct the query parameters
772
+ params = {
773
+ "subj": subj
774
+ if subj is not None
775
+ else None, # If subj is None, match any subject node
776
+ "obj": obj
777
+ if obj is not None
778
+ else None, # If obj is None, match any object node
779
+ "rel": rel
780
+ if rel is not None
781
+ else None, # If rel is None, match any relationship type
782
+ }
783
+
784
+ logger.debug(f"Executing query: {query}")
785
+ logger.debug(f"Query parameters: {params}")
786
+
787
+ with self.driver.session(database=self.database) as session:
788
+ try:
789
+ result = session.run(query, params)
790
+ records = [record.data() for record in result]
791
+ logger.debug(
792
+ f"Query returned {len(records)} records: {records}"
793
+ )
794
+ return records
795
+ except Exception as e:
796
+ logger.error(f"Error executing query: {e}")
797
+ return []
@@ -58,6 +58,7 @@ from .mcp_toolkit import MCPToolkit
58
58
  from .web_toolkit import WebToolkit
59
59
  from .file_write_toolkit import FileWriteToolkit
60
60
  from .terminal_toolkit import TerminalToolkit
61
+ from .pubmed_toolkit import PubMedToolkit
61
62
 
62
63
 
63
64
  __all__ = [
@@ -104,4 +105,5 @@ __all__ = [
104
105
  'WebToolkit',
105
106
  'FileWriteToolkit',
106
107
  'TerminalToolkit',
108
+ 'PubMedToolkit',
107
109
  ]