agno 1.7.12__py3-none-any.whl → 1.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/tools/duckduckgo.py CHANGED
@@ -5,9 +5,9 @@ from agno.tools import Toolkit
5
5
  from agno.utils.log import log_debug
6
6
 
7
7
  try:
8
- from duckduckgo_search import DDGS
8
+ from ddgs import DDGS
9
9
  except ImportError:
10
- raise ImportError("`duckduckgo-search` not installed. Please install using `pip install duckduckgo-search`")
10
+ raise ImportError("`duckduckgo-search` not installed. Please install using `pip install ddgs`")
11
11
 
12
12
 
13
13
  class DuckDuckGoTools(Toolkit):
@@ -18,9 +18,7 @@ class DuckDuckGoTools(Toolkit):
18
18
  news (bool): Enable DuckDuckGo news function.
19
19
  modifier (Optional[str]): A modifier to be used in the search request.
20
20
  fixed_max_results (Optional[int]): A fixed number of maximum results.
21
- headers (Optional[Any]): Headers to be used in the search request.
22
21
  proxy (Optional[str]): Proxy to be used in the search request.
23
- proxies (Optional[Any]): A list of proxies to be used in the search request.
24
22
  timeout (Optional[int]): The maximum number of seconds to wait for a response.
25
23
 
26
24
  """
@@ -31,16 +29,12 @@ class DuckDuckGoTools(Toolkit):
31
29
  news: bool = True,
32
30
  modifier: Optional[str] = None,
33
31
  fixed_max_results: Optional[int] = None,
34
- headers: Optional[Any] = None,
35
32
  proxy: Optional[str] = None,
36
- proxies: Optional[Any] = None,
37
33
  timeout: Optional[int] = 10,
38
34
  verify_ssl: bool = True,
39
35
  **kwargs,
40
36
  ):
41
- self.headers: Optional[Any] = headers
42
37
  self.proxy: Optional[str] = proxy
43
- self.proxies: Optional[Any] = proxies
44
38
  self.timeout: Optional[int] = timeout
45
39
  self.fixed_max_results: Optional[int] = fixed_max_results
46
40
  self.modifier: Optional[str] = modifier
@@ -68,11 +62,10 @@ class DuckDuckGoTools(Toolkit):
68
62
  search_query = f"{self.modifier} {query}" if self.modifier else query
69
63
 
70
64
  log_debug(f"Searching DDG for: {search_query}")
71
- ddgs = DDGS(
72
- headers=self.headers, proxy=self.proxy, proxies=self.proxies, timeout=self.timeout, verify=self.verify_ssl
73
- )
65
+ with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
66
+ results = ddgs.text(search_query, max_results=actual_max_results)
74
67
 
75
- return json.dumps(ddgs.text(keywords=search_query, max_results=actual_max_results), indent=2)
68
+ return json.dumps(results, indent=2)
76
69
 
77
70
  def duckduckgo_news(self, query: str, max_results: int = 5) -> str:
78
71
  """Use this function to get the latest news from DuckDuckGo.
@@ -87,8 +80,7 @@ class DuckDuckGoTools(Toolkit):
87
80
  actual_max_results = self.fixed_max_results or max_results
88
81
 
89
82
  log_debug(f"Searching DDG news for: {query}")
90
- ddgs = DDGS(
91
- headers=self.headers, proxy=self.proxy, proxies=self.proxies, timeout=self.timeout, verify=self.verify_ssl
92
- )
83
+ with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
84
+ results = ddgs.news(query, max_results=actual_max_results)
93
85
 
94
- return json.dumps(ddgs.news(keywords=query, max_results=actual_max_results), indent=2)
86
+ return json.dumps(results, indent=2)
agno/tools/e2b.py CHANGED
@@ -58,7 +58,7 @@ class E2BTools(Toolkit):
58
58
 
59
59
  # According to official docs, the parameter is 'timeout' (in seconds), not 'timeout_ms'
60
60
  try:
61
- self.sandbox = Sandbox(api_key=self.api_key, timeout=timeout, **self.sandbox_options)
61
+ self.sandbox = Sandbox.create(api_key=self.api_key, timeout=timeout, **self.sandbox_options)
62
62
  except Exception as e:
63
63
  logger.error(f"Warning: Could not create sandbox: {e}")
64
64
  raise e
agno/tools/github.py CHANGED
@@ -1698,20 +1698,32 @@ class GithubTools(Toolkit):
1698
1698
  log_debug(f"Final search query: {search_query}")
1699
1699
  code_results = self.g.search_code(search_query)
1700
1700
 
1701
- # Process results
1702
- results = []
1703
- # Limit to 50 results to prevent timeouts
1704
- for code in code_results[:50]:
1705
- code_info = {
1706
- "repository": code.repository.full_name,
1707
- "path": code.path,
1708
- "name": code.name,
1709
- "sha": code.sha,
1710
- "html_url": code.html_url,
1711
- "git_url": code.git_url,
1712
- "score": code.score,
1713
- }
1714
- results.append(code_info)
1701
+ results: list[dict] = []
1702
+ limit = 60
1703
+ max_pages = 2 # GitHub returns 30 items per page, so 2 pages covers our limit
1704
+ page_index = 0
1705
+
1706
+ while len(results) < limit and page_index < max_pages:
1707
+ # Fetch one page of results from GitHub API
1708
+ page_items = code_results.get_page(page_index)
1709
+
1710
+ # Stop if no more results available
1711
+ if not page_items:
1712
+ break
1713
+
1714
+ # Process each code result in the current page
1715
+ for code in page_items:
1716
+ code_info = {
1717
+ "repository": code.repository.full_name,
1718
+ "path": code.path,
1719
+ "name": code.name,
1720
+ "sha": code.sha,
1721
+ "html_url": code.html_url,
1722
+ "git_url": code.git_url,
1723
+ "score": code.score,
1724
+ }
1725
+ results.append(code_info)
1726
+ page_index += 1
1715
1727
 
1716
1728
  # Return search results
1717
1729
  return json.dumps(
agno/tools/gmail.py CHANGED
@@ -133,7 +133,7 @@ class GmailTools(Toolkit):
133
133
  send_email (bool): Enable sending emails. Defaults to True.
134
134
  search_emails (bool): Enable searching emails. Defaults to True.
135
135
  send_email_reply (bool): Enable sending email replies. Defaults to True.
136
- creds (Optional[Credentials]): Pre-existing credentials. Defaults to None.
136
+ creds (Optional[Credentials]): Pre-fetched OAuth credentials. Use this to skip a new auth flow. Defaults to None.
137
137
  credentials_path (Optional[str]): Path to credentials file. Defaults to None.
138
138
  token_path (Optional[str]): Path to token file. Defaults to None.
139
139
  scopes (Optional[List[str]]): Custom OAuth scopes. If None, uses DEFAULT_SCOPES.
agno/tools/memori.py ADDED
@@ -0,0 +1,387 @@
1
+ import json
2
+ from typing import Any, Dict, Optional
3
+
4
+ from agno.agent import Agent
5
+ from agno.tools.toolkit import Toolkit
6
+ from agno.utils.log import log_debug, log_error, log_info, log_warning
7
+
8
+ try:
9
+ from memori import Memori, create_memory_tool
10
+ except ImportError:
11
+ raise ImportError("`memorisdk` package not found. Please install it with `pip install memorisdk`")
12
+
13
+
14
+ class MemoriTools(Toolkit):
15
+ """
16
+ Memori ToolKit for Agno Agents and Teams, providing persistent memory capabilities.
17
+
18
+ This toolkit integrates Memori's memory system with Agno, allowing Agents and Teams to:
19
+ - Store and retrieve conversation history
20
+ - Search through past interactions
21
+ - Maintain user preferences and context
22
+ - Build long-term memory across sessions
23
+
24
+ Requirements:
25
+ - pip install memorisdk
26
+ - Database connection string (SQLite, PostgreSQL, etc.)
27
+
28
+ Example:
29
+ ```python
30
+ from agno.tools.memori import MemoriTools
31
+
32
+ # Initialize with SQLite (default)
33
+ memori_tools = MemoriTools(
34
+ database_connect="sqlite:///agent_memory.db",
35
+ namespace="my_agent",
36
+ auto_ingest=True # Automatically ingest conversations
37
+ )
38
+
39
+ # Add to agent
40
+ agent = Agent(
41
+ model=OpenAIChat(),
42
+ tools=[memori_tools],
43
+ description="An AI assistant with persistent memory"
44
+ )
45
+ ```
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ database_connect: Optional[str] = None,
51
+ namespace: Optional[str] = None,
52
+ conscious_ingest: bool = True,
53
+ auto_ingest: bool = True,
54
+ verbose: bool = False,
55
+ config: Optional[Dict[str, Any]] = None,
56
+ auto_enable: bool = True,
57
+ **kwargs,
58
+ ):
59
+ """
60
+ Initialize Memori toolkit.
61
+
62
+ Args:
63
+ database_connect: Database connection string (e.g., "sqlite:///memory.db")
64
+ namespace: Namespace for organizing memories (e.g., "agent_v1", "user_session")
65
+ conscious_ingest: Whether to use conscious memory ingestion
66
+ auto_ingest: Whether to automatically ingest conversations into memory
67
+ verbose: Enable verbose logging from Memori
68
+ config: Additional Memori configuration
69
+ auto_enable: Automatically enable the memory system on initialization
70
+ **kwargs: Additional arguments passed to Toolkit base class
71
+ """
72
+ super().__init__(
73
+ name="memori_tools",
74
+ tools=[
75
+ self.search_memory,
76
+ self.record_conversation,
77
+ self.get_memory_stats,
78
+ ],
79
+ **kwargs,
80
+ )
81
+
82
+ # Set default database connection if not provided
83
+ if not database_connect:
84
+ sqlite_db = "sqlite:///agno_memori_memory.db"
85
+ log_info(f"No database connection provided, using default SQLite database at {sqlite_db}")
86
+ database_connect = sqlite_db
87
+
88
+ self.database_connect = database_connect
89
+ self.namespace = namespace or "agno_default"
90
+ self.conscious_ingest = conscious_ingest
91
+ self.auto_ingest = auto_ingest
92
+ self.verbose = verbose
93
+ self.config = config or {}
94
+
95
+ try:
96
+ # Initialize Memori memory system
97
+ log_debug(f"Initializing Memori with database: {self.database_connect}")
98
+ self.memory_system = Memori(
99
+ database_connect=self.database_connect,
100
+ conscious_ingest=self.conscious_ingest,
101
+ auto_ingest=self.auto_ingest,
102
+ verbose=self.verbose,
103
+ namespace=self.namespace,
104
+ **self.config,
105
+ )
106
+
107
+ # Enable the memory system if auto_enable is True
108
+ if auto_enable:
109
+ self.memory_system.enable()
110
+ log_debug("Memori memory system enabled")
111
+
112
+ # Create the memory tool for internal use
113
+ self._memory_tool = create_memory_tool(self.memory_system)
114
+
115
+ except Exception as e:
116
+ log_error(f"Failed to initialize Memori: {e}")
117
+ raise ConnectionError("Failed to initialize Memori memory system") from e
118
+
119
+ def search_memory(
120
+ self,
121
+ agent: Agent,
122
+ query: str,
123
+ limit: Optional[int] = None,
124
+ ) -> str:
125
+ """
126
+ Search the Agent's memory for past conversations and information.
127
+
128
+ This performs semantic search across all stored memories to find
129
+ relevant information based on the provided query.
130
+
131
+ Args:
132
+ query: What to search for in memory (e.g., "past conversations about AI", "user preferences")
133
+ limit: Maximum number of results to return (optional)
134
+
135
+ Returns:
136
+ str: JSON-encoded search results or error message
137
+
138
+ Example:
139
+ search_memory("user's favorite programming languages")
140
+ search_memory("previous discussions about machine learning")
141
+ """
142
+ try:
143
+ if not query.strip():
144
+ return json.dumps({"error": "Please provide a search query"})
145
+
146
+ log_debug(f"Searching memory for: {query}")
147
+
148
+ # Execute search using Memori's memory tool
149
+ result = self._memory_tool.execute(query=query.strip())
150
+
151
+ if result:
152
+ # If limit is specified, truncate results
153
+ if limit and isinstance(result, list):
154
+ result = result[:limit]
155
+
156
+ return json.dumps(
157
+ {
158
+ "success": True,
159
+ "query": query,
160
+ "results": result,
161
+ "count": len(result) if isinstance(result, list) else 1,
162
+ }
163
+ )
164
+ else:
165
+ return json.dumps(
166
+ {
167
+ "success": True,
168
+ "query": query,
169
+ "results": [],
170
+ "count": 0,
171
+ "message": "No relevant memories found",
172
+ }
173
+ )
174
+
175
+ except Exception as e:
176
+ log_error(f"Error searching memory: {e}")
177
+ return json.dumps({"success": False, "error": f"Memory search error: {str(e)}"})
178
+
179
+ def record_conversation(self, agent: Agent, content: str) -> str:
180
+ """
181
+ Add important information or facts to memory.
182
+
183
+ Use this tool to store important information, user preferences, facts, or context that should be remembered
184
+ for future conversations.
185
+
186
+ Args:
187
+ content: The information/facts to store in memory
188
+
189
+ Returns:
190
+ str: Success message or error details
191
+
192
+ Example:
193
+ record_conversation("User prefers Python over JavaScript")
194
+ record_conversation("User is working on an e-commerce project using Django")
195
+ record_conversation("User's name is John and they live in NYC")
196
+ """
197
+ try:
198
+ if not content.strip():
199
+ return json.dumps({"success": False, "error": "Content cannot be empty"})
200
+
201
+ log_debug(f"Adding conversation: {content}")
202
+
203
+ # Extract the actual AI response from the agent's conversation history
204
+ ai_output = "I've noted this information and will remember it."
205
+
206
+ self.memory_system.record_conversation(user_input=content, ai_output=str(ai_output))
207
+ return json.dumps(
208
+ {
209
+ "success": True,
210
+ "message": "Memory added successfully via conversation recording",
211
+ "content_length": len(content),
212
+ }
213
+ )
214
+
215
+ except Exception as e:
216
+ log_error(f"Error adding memory: {e}")
217
+ return json.dumps({"success": False, "error": f"Failed to add memory: {str(e)}"})
218
+
219
+ def get_memory_stats(
220
+ self,
221
+ agent: Agent,
222
+ ) -> str:
223
+ """
224
+ Get statistics about the memory system.
225
+
226
+ Returns information about the current state of the memory system,
227
+ including total memories, memory distribution by retention type
228
+ (short-term vs long-term), and system configuration.
229
+
230
+ Returns:
231
+ str: JSON-encoded memory statistics
232
+
233
+ Example:
234
+ Returns statistics like:
235
+ {
236
+ "success": true,
237
+ "total_memories": 42,
238
+ "memories_by_retention": {
239
+ "short_term": 5,
240
+ "long_term": 37
241
+ },
242
+ "namespace": "my_agent",
243
+ "conscious_ingest": true,
244
+ "auto_ingest": true,
245
+ "memory_system_enabled": true
246
+ }
247
+ """
248
+ try:
249
+ log_debug("Retrieving memory statistics")
250
+
251
+ # Base stats about the system configuration
252
+ stats = {
253
+ "success": True,
254
+ "namespace": self.namespace,
255
+ "database_connect": self.database_connect,
256
+ "conscious_ingest": self.conscious_ingest,
257
+ "auto_ingest": self.auto_ingest,
258
+ "verbose": self.verbose,
259
+ "memory_system_enabled": hasattr(self.memory_system, "_enabled") and self.memory_system._enabled,
260
+ }
261
+
262
+ # Get Memori's built-in memory statistics
263
+ try:
264
+ if hasattr(self.memory_system, "get_memory_stats"):
265
+ # Use the get_memory_stats method as shown in the example
266
+ memori_stats = self.memory_system.get_memory_stats()
267
+
268
+ # Add the Memori-specific stats to our response
269
+ if isinstance(memori_stats, dict):
270
+ # Include total memories
271
+ if "total_memories" in memori_stats:
272
+ stats["total_memories"] = memori_stats["total_memories"]
273
+
274
+ # Include memory distribution by retention type
275
+ if "memories_by_retention" in memori_stats:
276
+ stats["memories_by_retention"] = memori_stats["memories_by_retention"]
277
+
278
+ # Also add individual counts for convenience
279
+ retention_info = memori_stats["memories_by_retention"]
280
+ stats["short_term_memories"] = retention_info.get("short_term", 0)
281
+ stats["long_term_memories"] = retention_info.get("long_term", 0)
282
+
283
+ # Include any other available stats
284
+ for key, value in memori_stats.items():
285
+ if key not in stats:
286
+ stats[key] = value
287
+
288
+ log_debug(
289
+ f"Retrieved memory stats: total={stats.get('total_memories', 0)}, "
290
+ f"short_term={stats.get('short_term_memories', 0)}, "
291
+ f"long_term={stats.get('long_term_memories', 0)}"
292
+ )
293
+
294
+ else:
295
+ log_debug("get_memory_stats method not available, providing basic stats only")
296
+ stats["total_memories"] = 0
297
+ stats["memories_by_retention"] = {"short_term": 0, "long_term": 0}
298
+ stats["short_term_memories"] = 0
299
+ stats["long_term_memories"] = 0
300
+
301
+ except Exception as e:
302
+ log_debug(f"Could not retrieve detailed memory stats: {e}")
303
+ # Provide basic stats if detailed stats fail
304
+ stats["total_memories"] = 0
305
+ stats["memories_by_retention"] = {"short_term": 0, "long_term": 0}
306
+ stats["short_term_memories"] = 0
307
+ stats["long_term_memories"] = 0
308
+ stats["stats_warning"] = "Detailed memory statistics not available"
309
+
310
+ return json.dumps(stats)
311
+
312
+ except Exception as e:
313
+ log_error(f"Error getting memory stats: {e}")
314
+ return json.dumps({"success": False, "error": f"Failed to get memory statistics: {str(e)}"})
315
+
316
+ def enable_memory_system(self) -> bool:
317
+ """Enable the Memori memory system."""
318
+ try:
319
+ self.memory_system.enable()
320
+ log_debug("Memori memory system enabled")
321
+ return True
322
+ except Exception as e:
323
+ log_error(f"Failed to enable memory system: {e}")
324
+ return False
325
+
326
+ def disable_memory_system(self) -> bool:
327
+ """Disable the Memori memory system."""
328
+ try:
329
+ if hasattr(self.memory_system, "disable"):
330
+ self.memory_system.disable()
331
+ log_debug("Memori memory system disabled")
332
+ return True
333
+ else:
334
+ log_warning("Memory system disable method not available")
335
+ return False
336
+ except Exception as e:
337
+ log_error(f"Failed to disable memory system: {e}")
338
+ return False
339
+
340
+
341
+ def create_memori_search_tool(memori_toolkit: MemoriTools):
342
+ """
343
+ Create a standalone memory search function for use with Agno agents.
344
+
345
+ This is a convenience function that creates a memory search tool similar
346
+ to the pattern shown in the Memori example code.
347
+
348
+ Args:
349
+ memori_toolkit: An initialized MemoriTools instance
350
+
351
+ Returns:
352
+ Callable: A memory search function that can be used as an agent tool
353
+
354
+ Example:
355
+ ```python
356
+ memori_tools = MemoriTools(database_connect="sqlite:///memory.db")
357
+ search_tool = create_memori_search_tool(memori_tools)
358
+
359
+ agent = Agent(
360
+ model=OpenAIChat(),
361
+ tools=[search_tool],
362
+ description="Agent with memory search capability"
363
+ )
364
+ ```
365
+ """
366
+
367
+ def search_memory(query: str) -> str:
368
+ """
369
+ Search the agent's memory for past conversations and information.
370
+
371
+ Args:
372
+ query: What to search for in memory
373
+
374
+ Returns:
375
+ str: Search results or error message
376
+ """
377
+ try:
378
+ if not query.strip():
379
+ return "Please provide a search query"
380
+
381
+ result = memori_toolkit._memory_tool.execute(query=query.strip())
382
+ return str(result) if result else "No relevant memories found"
383
+
384
+ except Exception as e:
385
+ return f"Memory search error: {str(e)}"
386
+
387
+ return search_memory
agno/tools/neo4j.py ADDED
@@ -0,0 +1,132 @@
1
+ import os
2
+ from typing import Any, List, Optional
3
+
4
+ try:
5
+ from neo4j import GraphDatabase
6
+ except ImportError:
7
+ raise ImportError("`neo4j` not installed. Please install using `pip install neo4j`")
8
+
9
+ from agno.tools import Toolkit
10
+ from agno.utils.log import log_debug, logger
11
+
12
+
13
+ class Neo4jTools(Toolkit):
14
+ def __init__(
15
+ self,
16
+ uri: Optional[str] = None,
17
+ user: Optional[str] = None,
18
+ password: Optional[str] = None,
19
+ database: Optional[str] = None,
20
+ list_labels: bool = True,
21
+ list_relationships: bool = True,
22
+ get_schema: bool = True,
23
+ run_cypher: bool = True,
24
+ **kwargs,
25
+ ):
26
+ """
27
+ Initialize the Neo4jTools toolkit.
28
+ Connection parameters (uri/user/password or host/port) can be provided.
29
+ If not provided, falls back to NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD env vars.
30
+
31
+ Args:
32
+ uri (Optional[str]): The Neo4j URI.
33
+ user (Optional[str]): The Neo4j username.
34
+ password (Optional[str]): The Neo4j password.
35
+ host (Optional[str]): The Neo4j host.
36
+ port (Optional[int]): The Neo4j port.
37
+ database (Optional[str]): The Neo4j database.
38
+ list_labels (bool): Whether to list node labels.
39
+ list_relationships (bool): Whether to list relationship types.
40
+ get_schema (bool): Whether to get the schema.
41
+ run_cypher (bool): Whether to run Cypher queries.
42
+ **kwargs: Additional keyword arguments.
43
+ """
44
+ # Determine the connection URI and credentials
45
+ uri = uri or os.getenv("NEO4J_URI", "bolt://localhost:7687")
46
+ user = user or os.getenv("NEO4J_USERNAME")
47
+ password = password or os.getenv("NEO4J_PASSWORD")
48
+
49
+ if user is None or password is None:
50
+ raise ValueError("Username or password for Neo4j not provided")
51
+
52
+ # Create the Neo4j driver
53
+ try:
54
+ self.driver = GraphDatabase.driver(uri, auth=(user, password)) # type: ignore
55
+ self.driver.verify_connectivity()
56
+ log_debug("Connected to Neo4j database")
57
+ except Exception as e:
58
+ logger.error(f"Failed to connect to Neo4j: {e}")
59
+ raise
60
+
61
+ self.database = database or "neo4j"
62
+
63
+ # Register toolkit methods as tools
64
+ tools: List[Any] = []
65
+ if list_labels:
66
+ tools.append(self.list_labels)
67
+ if list_relationships:
68
+ tools.append(self.list_relationship_types)
69
+ if get_schema:
70
+ tools.append(self.get_schema)
71
+ if run_cypher:
72
+ tools.append(self.run_cypher_query)
73
+ super().__init__(name="neo4j_tools", tools=tools, **kwargs)
74
+
75
+ def list_labels(self) -> list:
76
+ """
77
+ Retrieve all node labels present in the connected Neo4j database.
78
+ """
79
+ try:
80
+ log_debug("Listing node labels in Neo4j database")
81
+ with self.driver.session(database=self.database) as session:
82
+ result = session.run("CALL db.labels()")
83
+ labels = [record["label"] for record in result]
84
+ return labels
85
+ except Exception as e:
86
+ logger.error(f"Error listing labels: {e}")
87
+ return []
88
+
89
+ def list_relationship_types(self) -> list:
90
+ """
91
+ Retrieve all relationship types present in the connected Neo4j database.
92
+ """
93
+ try:
94
+ log_debug("Listing relationship types in Neo4j database")
95
+ with self.driver.session(database=self.database) as session:
96
+ result = session.run("CALL db.relationshipTypes()")
97
+ types = [record["relationshipType"] for record in result]
98
+ return types
99
+ except Exception as e:
100
+ logger.error(f"Error listing relationship types: {e}")
101
+ return []
102
+
103
+ def get_schema(self) -> list:
104
+ """
105
+ Retrieve a visualization of the database schema, including nodes and relationships.
106
+ """
107
+ try:
108
+ log_debug("Retrieving Neo4j schema visualization")
109
+ with self.driver.session(database=self.database) as session:
110
+ result = session.run("CALL db.schema.visualization()")
111
+ schema_data = result.data()
112
+ return schema_data
113
+ except Exception as e:
114
+ logger.error(f"Error getting Neo4j schema: {e}")
115
+ return []
116
+
117
+ def run_cypher_query(self, query: str) -> list:
118
+ """
119
+ Execute an arbitrary Cypher query against the connected Neo4j database.
120
+
121
+ Args:
122
+ query (str): The Cypher query string to execute.
123
+ """
124
+ try:
125
+ log_debug(f"Running Cypher query: {query}")
126
+ with self.driver.session(database=self.database) as session:
127
+ result = session.run(query) # type: ignore[arg-type]
128
+ data = result.data()
129
+ return data
130
+ except Exception as e:
131
+ logger.error(f"Error running Cypher query: {e}")
132
+ return []