graflo 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. graflo/README.md +18 -0
  2. graflo/__init__.py +70 -0
  3. graflo/architecture/__init__.py +38 -0
  4. graflo/architecture/actor.py +1120 -0
  5. graflo/architecture/actor_util.py +450 -0
  6. graflo/architecture/edge.py +297 -0
  7. graflo/architecture/onto.py +374 -0
  8. graflo/architecture/resource.py +161 -0
  9. graflo/architecture/schema.py +136 -0
  10. graflo/architecture/transform.py +292 -0
  11. graflo/architecture/util.py +93 -0
  12. graflo/architecture/vertex.py +586 -0
  13. graflo/caster.py +655 -0
  14. graflo/cli/__init__.py +14 -0
  15. graflo/cli/ingest.py +194 -0
  16. graflo/cli/manage_dbs.py +197 -0
  17. graflo/cli/plot_schema.py +132 -0
  18. graflo/cli/xml2json.py +93 -0
  19. graflo/data_source/__init__.py +48 -0
  20. graflo/data_source/api.py +339 -0
  21. graflo/data_source/base.py +97 -0
  22. graflo/data_source/factory.py +298 -0
  23. graflo/data_source/file.py +133 -0
  24. graflo/data_source/memory.py +72 -0
  25. graflo/data_source/registry.py +82 -0
  26. graflo/data_source/sql.py +185 -0
  27. graflo/db/__init__.py +44 -0
  28. graflo/db/arango/__init__.py +22 -0
  29. graflo/db/arango/conn.py +1026 -0
  30. graflo/db/arango/query.py +180 -0
  31. graflo/db/arango/util.py +88 -0
  32. graflo/db/conn.py +377 -0
  33. graflo/db/connection/__init__.py +6 -0
  34. graflo/db/connection/config_mapping.py +18 -0
  35. graflo/db/connection/onto.py +688 -0
  36. graflo/db/connection/wsgi.py +29 -0
  37. graflo/db/manager.py +119 -0
  38. graflo/db/neo4j/__init__.py +16 -0
  39. graflo/db/neo4j/conn.py +639 -0
  40. graflo/db/postgres/__init__.py +156 -0
  41. graflo/db/postgres/conn.py +425 -0
  42. graflo/db/postgres/resource_mapping.py +139 -0
  43. graflo/db/postgres/schema_inference.py +245 -0
  44. graflo/db/postgres/types.py +148 -0
  45. graflo/db/tigergraph/__init__.py +9 -0
  46. graflo/db/tigergraph/conn.py +2212 -0
  47. graflo/db/util.py +49 -0
  48. graflo/filter/__init__.py +21 -0
  49. graflo/filter/onto.py +525 -0
  50. graflo/logging.conf +22 -0
  51. graflo/onto.py +190 -0
  52. graflo/plot/__init__.py +17 -0
  53. graflo/plot/plotter.py +556 -0
  54. graflo/util/__init__.py +23 -0
  55. graflo/util/chunker.py +751 -0
  56. graflo/util/merge.py +150 -0
  57. graflo/util/misc.py +37 -0
  58. graflo/util/onto.py +332 -0
  59. graflo/util/transform.py +448 -0
  60. graflo-1.3.3.dist-info/METADATA +190 -0
  61. graflo-1.3.3.dist-info/RECORD +64 -0
  62. graflo-1.3.3.dist-info/WHEEL +4 -0
  63. graflo-1.3.3.dist-info/entry_points.txt +5 -0
  64. graflo-1.3.3.dist-info/licenses/LICENSE +126 -0
@@ -0,0 +1,2212 @@
1
+ """TigerGraph connection implementation for graph database operations.
2
+
3
+ This module implements the Connection interface for TigerGraph, providing
4
+ specific functionality for graph operations in TigerGraph. It handles:
5
+ - Vertex and edge management
6
+ - GSQL query execution
7
+ - Schema management
8
+ - Batch operations
9
+ - Graph traversal and analytics
10
+
11
+ Key Features:
12
+ - Vertex and edge type management
13
+ - GSQL query execution
14
+ - Schema definition and management
15
+ - Batch vertex and edge operations
16
+ - Graph analytics and traversal
17
+
18
+ Example:
19
+ >>> conn = TigerGraphConnection(config)
20
+ >>> conn.init_db(schema, clean_start=True)
21
+ >>> conn.upsert_docs_batch(docs, "User", match_keys=["email"])
22
+ """
23
+
24
+ import contextlib
25
+ import json
26
+ import logging
27
+ from typing import Any, cast
28
+
29
+
30
+ import requests
31
+ from requests import exceptions as requests_exceptions
32
+
33
+ from pyTigerGraph import TigerGraphConnection as PyTigerGraphConnection
34
+
35
+ from graflo.architecture.edge import Edge
36
+ from graflo.architecture.onto import Index
37
+ from graflo.architecture.schema import Schema
38
+ from graflo.architecture.vertex import FieldType, Vertex, VertexConfig
39
+ from graflo.db.conn import Connection
40
+ from graflo.db.connection.onto import TigergraphConfig
41
+ from graflo.filter.onto import Clause, Expression
42
+ from graflo.onto import AggregationType, DBFlavor, ExpressionFlavor
43
+ from graflo.util.transform import pick_unique_dict
44
+
45
+ logger = logging.getLogger(__name__)
46
+
47
+
48
+ class TigerGraphConnection(Connection):
49
+ """
50
+ TigerGraph database connection implementation.
51
+
52
+ Key conceptual differences from ArangoDB:
53
+ 1. TigerGraph uses GSQL (Graph Query Language) instead of AQL
54
+ 2. Schema must be defined explicitly before data insertion
55
+ 3. No automatic collection creation - vertices and edges must be pre-defined
56
+ 4. Different query syntax and execution model
57
+ 5. Token-based authentication for some operations
58
+ """
59
+
60
+ flavor = DBFlavor.TIGERGRAPH
61
+
62
+ def __init__(self, config: TigergraphConfig):
63
+ super().__init__()
64
+ self.config = config
65
+ # Store base URLs for REST++ and GSQL endpoints
66
+ self.restpp_url = f"{config.url_without_port}:{config.port}"
67
+ self.gsql_url = f"{config.url_without_port}:{config.gs_port}"
68
+
69
+ # Initialize pyTigerGraph connection for most operations
70
+ # Use type narrowing to help type checker understand non-None values
71
+ # PyTigerGraphConnection has defaults for all parameters, so None values are acceptable
72
+ restpp_port: int | str = config.port if config.port is not None else "9000"
73
+ gs_port: int | str = config.gs_port if config.gs_port is not None else "14240"
74
+ graphname: str = (
75
+ config.database if config.database is not None else "DefaultGraph"
76
+ )
77
+ username: str = config.username if config.username is not None else "tigergraph"
78
+ password: str = config.password if config.password is not None else "tigergraph"
79
+ cert_path: str | None = getattr(config, "certPath", None)
80
+
81
+ # Build connection kwargs, only include certPath if it's not None
82
+ conn_kwargs: dict[str, Any] = {
83
+ "host": config.url_without_port,
84
+ "restppPort": restpp_port,
85
+ "gsPort": gs_port,
86
+ "graphname": graphname,
87
+ "username": username,
88
+ "password": password,
89
+ }
90
+ if cert_path is not None:
91
+ conn_kwargs["certPath"] = cert_path
92
+
93
+ self.conn = PyTigerGraphConnection(**conn_kwargs)
94
+
95
+ # Get authentication token if secret is provided
96
+ if config.secret:
97
+ try:
98
+ self.conn.getToken(config.secret)
99
+ except Exception as e:
100
+ logger.warning(f"Failed to get authentication token: {e}")
101
+
102
+ def _get_auth_headers(self) -> dict[str, str]:
103
+ """Get HTTP Basic Auth headers if credentials are available.
104
+
105
+ Returns:
106
+ Dictionary with Authorization header if credentials exist
107
+ """
108
+ headers = {}
109
+ if self.config.username and self.config.password:
110
+ import base64
111
+
112
+ credentials = f"{self.config.username}:{self.config.password}"
113
+ encoded_credentials = base64.b64encode(credentials.encode()).decode()
114
+ headers["Authorization"] = f"Basic {encoded_credentials}"
115
+ return headers
116
+
117
+ def _call_restpp_api(
118
+ self,
119
+ endpoint: str,
120
+ method: str = "GET",
121
+ data: dict[str, Any] | None = None,
122
+ params: dict[str, str] | None = None,
123
+ ) -> dict[str, Any] | list[dict]:
124
+ """Call TigerGraph REST++ API endpoint.
125
+
126
+ Args:
127
+ endpoint: REST++ API endpoint (e.g., "/graph/{graph_name}/vertices/{vertex_type}")
128
+ method: HTTP method (GET, POST, etc.)
129
+ data: Optional data to send in request body (for POST)
130
+ params: Optional query parameters
131
+
132
+ Returns:
133
+ Response data (dict or list)
134
+ """
135
+ url = f"{self.restpp_url}{endpoint}"
136
+
137
+ headers = {
138
+ "Content-Type": "application/json",
139
+ **self._get_auth_headers(),
140
+ }
141
+
142
+ logger.debug(f"REST++ API call: {method} {url}")
143
+
144
+ try:
145
+ if method.upper() == "GET":
146
+ response = requests.get(
147
+ url, headers=headers, params=params, timeout=120
148
+ )
149
+ elif method.upper() == "POST":
150
+ response = requests.post(
151
+ url,
152
+ headers=headers,
153
+ data=json.dumps(data) if data else None,
154
+ params=params,
155
+ timeout=120,
156
+ )
157
+ elif method.upper() == "DELETE":
158
+ response = requests.delete(
159
+ url, headers=headers, params=params, timeout=120
160
+ )
161
+ else:
162
+ raise ValueError(f"Unsupported HTTP method: {method}")
163
+
164
+ response.raise_for_status()
165
+ return response.json()
166
+
167
+ except requests_exceptions.HTTPError as errh:
168
+ logger.error(f"HTTP Error: {errh}")
169
+ error_response = {"error": True, "message": str(errh)}
170
+ try:
171
+ # Try to parse error response for more details
172
+ error_json = response.json()
173
+ if isinstance(error_json, dict):
174
+ error_response.update(error_json)
175
+ else:
176
+ error_response["details"] = response.text
177
+ except Exception:
178
+ error_response["details"] = response.text
179
+ return error_response
180
+ except requests_exceptions.ConnectionError as errc:
181
+ logger.error(f"Error Connecting: {errc}")
182
+ return {"error": True, "message": str(errc)}
183
+ except requests_exceptions.Timeout as errt:
184
+ logger.error(f"Timeout Error: {errt}")
185
+ return {"error": True, "message": str(errt)}
186
+ except requests_exceptions.RequestException as err:
187
+ logger.error(f"An unexpected error occurred: {err}")
188
+ return {"error": True, "message": str(err)}
189
+
190
+ @contextlib.contextmanager
191
+ def _ensure_graph_context(self, graph_name: str | None = None):
192
+ """
193
+ Context manager that ensures graph context for metadata operations.
194
+
195
+ Updates conn.graphname for PyTigerGraph metadata operations that rely on it
196
+ (e.g., getVertexTypes(), getEdgeTypes()).
197
+
198
+ Args:
199
+ graph_name: Name of the graph to use. If None, uses self.config.database.
200
+
201
+ Yields:
202
+ The graph name that was set.
203
+ """
204
+ graph_name = graph_name or self.config.database
205
+ if not graph_name:
206
+ raise ValueError(
207
+ "Graph name must be provided via graph_name parameter or config.database"
208
+ )
209
+
210
+ old_graphname = self.conn.graphname
211
+ self.conn.graphname = graph_name
212
+
213
+ try:
214
+ yield graph_name
215
+ finally:
216
+ # Restore original graphname
217
+ self.conn.graphname = old_graphname
218
+
219
+ def graph_exists(self, name: str) -> bool:
220
+ """
221
+ Check if a graph with the given name exists.
222
+
223
+ Uses the USE GRAPH command and checks the returned message.
224
+ If the graph doesn't exist, USE GRAPH returns an error message like
225
+ "Graph 'name' does not exist."
226
+
227
+ Args:
228
+ name: Name of the graph to check
229
+
230
+ Returns:
231
+ bool: True if the graph exists, False otherwise
232
+ """
233
+ try:
234
+ result = self.conn.gsql(f"USE GRAPH {name}")
235
+ result_str = str(result).lower()
236
+
237
+ # If the graph doesn't exist, USE GRAPH returns an error message
238
+ # Check for common error messages indicating the graph doesn't exist
239
+ error_patterns = [
240
+ "does not exist",
241
+ "doesn't exist",
242
+ "doesn't exist!",
243
+ f"graph '{name.lower()}' does not exist",
244
+ ]
245
+
246
+ # If any error pattern is found, the graph doesn't exist
247
+ for pattern in error_patterns:
248
+ if pattern in result_str:
249
+ return False
250
+
251
+ # If no error pattern is found, the graph likely exists
252
+ # (USE GRAPH succeeded or returned success message)
253
+ return True
254
+ except Exception as e:
255
+ logger.debug(f"Error checking if graph '{name}' exists: {e}")
256
+ # If there's an exception, try to parse it
257
+ error_str = str(e).lower()
258
+ if "does not exist" in error_str or "doesn't exist" in error_str:
259
+ return False
260
+ # If exception doesn't indicate "doesn't exist", assume it exists
261
+ # (other errors might indicate connection issues, not missing graph)
262
+ return False
263
+
264
+ def create_database(
265
+ self,
266
+ name: str,
267
+ vertex_names: list[str] | None = None,
268
+ edge_names: list[str] | None = None,
269
+ ):
270
+ """
271
+ Create a TigerGraph database (graph) using GSQL commands.
272
+
273
+ This method creates a graph with explicitly attached vertices and edges.
274
+ Example: CREATE GRAPH researchGraph (author, paper, wrote)
275
+
276
+ This method uses the pyTigerGraph gsql() method to execute GSQL commands
277
+ that create and use the graph. Supported in TigerGraph version 4.2.2+.
278
+
279
+ Args:
280
+ name: Name of the graph to create
281
+ vertex_names: Optional list of vertex type names to attach to the graph
282
+ edge_names: Optional list of edge type names to attach to the graph
283
+
284
+ Raises:
285
+ Exception: If graph creation fails
286
+ """
287
+ try:
288
+ # Build the list of types to include in CREATE GRAPH
289
+ all_types = []
290
+ if vertex_names:
291
+ all_types.extend(vertex_names)
292
+ if edge_names:
293
+ all_types.extend(edge_names)
294
+
295
+ # Format the CREATE GRAPH command with types
296
+ if all_types:
297
+ types_str = ", ".join(all_types)
298
+ gsql_commands = f"CREATE GRAPH {name} ({types_str})\nUSE GRAPH {name}"
299
+ else:
300
+ # Fallback to empty graph if no types provided
301
+ gsql_commands = f"CREATE GRAPH {name}()\nUSE GRAPH {name}"
302
+
303
+ # Execute using pyTigerGraph's gsql method which handles authentication
304
+ logger.debug(f"Creating graph '{name}' via GSQL: {gsql_commands}")
305
+ try:
306
+ result = self.conn.gsql(gsql_commands)
307
+ logger.info(
308
+ f"Successfully created graph '{name}' with types {all_types}: {result}"
309
+ )
310
+ return result
311
+ except Exception as e:
312
+ error_msg = str(e).lower()
313
+ # Check if graph already exists (might be acceptable)
314
+ if "already exists" in error_msg or "duplicate" in error_msg:
315
+ logger.info(f"Graph '{name}' may already exist: {e}")
316
+ return str(e)
317
+ logger.error(f"Failed to create graph '{name}': {e}")
318
+ raise
319
+
320
+ except Exception as e:
321
+ logger.error(f"Error creating graph '{name}' via GSQL: {e}")
322
+ raise
323
+
324
+ def delete_database(self, name: str):
325
+ """
326
+ Delete a TigerGraph database (graph).
327
+
328
+ This method attempts to drop the graph using GSQL DROP GRAPH.
329
+ If that fails (e.g., dependencies), it will:
330
+ 1) Remove associations and drop all edge types
331
+ 2) Drop all vertex types
332
+ 3) Clear remaining data as a last resort
333
+
334
+ Args:
335
+ name: Name of the graph to delete
336
+
337
+ Note:
338
+ In TigerGraph, deleting a graph structure requires the graph to be empty
339
+ or may fail if it has dependencies. This method handles both cases.
340
+ """
341
+ try:
342
+ logger.debug(f"Attempting to drop graph '{name}'")
343
+ try:
344
+ # Use the graph first to ensure we're working with the right graph
345
+ drop_command = f"USE GRAPH {name}\nDROP GRAPH {name}"
346
+ result = self.conn.gsql(drop_command)
347
+ logger.info(f"Successfully dropped graph '{name}': {result}")
348
+ return result
349
+ except Exception as e:
350
+ logger.debug(
351
+ f"Could not drop graph '{name}' (may not exist or have dependencies): {e}"
352
+ )
353
+
354
+ # Fallback 1: Attempt to drop edge and vertex types via ALTER GRAPH and DROP
355
+ try:
356
+ with self._ensure_graph_context(name):
357
+ # Drop edge associations and edge types
358
+ try:
359
+ edge_types = self.conn.getEdgeTypes(force=True)
360
+ except Exception:
361
+ edge_types = []
362
+
363
+ for e_type in edge_types:
364
+ # Try disassociate from graph (safe if already disassociated)
365
+ # ALTER GRAPH requires USE GRAPH context
366
+ try:
367
+ drop_edge_cmd = f"USE GRAPH {name}\nALTER GRAPH {name} DROP DIRECTED EDGE {e_type}"
368
+ self.conn.gsql(drop_edge_cmd)
369
+ except Exception:
370
+ pass
371
+ # Try drop edge type globally (edges are global, no USE GRAPH needed)
372
+ try:
373
+ drop_edge_global_cmd = f"DROP DIRECTED EDGE {e_type}"
374
+ self.conn.gsql(drop_edge_global_cmd)
375
+ except Exception:
376
+ pass
377
+
378
+ # Drop vertex associations and vertex types
379
+ try:
380
+ vertex_types = self.conn.getVertexTypes(force=True)
381
+ except Exception:
382
+ vertex_types = []
383
+
384
+ for v_type in vertex_types:
385
+ # Remove all data first to avoid dependency issues
386
+ try:
387
+ self.conn.delVertices(v_type)
388
+ except Exception:
389
+ pass
390
+ # Disassociate from graph (best-effort)
391
+ # ALTER GRAPH requires USE GRAPH context
392
+ try:
393
+ drop_vertex_cmd = f"USE GRAPH {name}\nALTER GRAPH {name} DROP VERTEX {v_type}"
394
+ self.conn.gsql(drop_vertex_cmd)
395
+ except Exception:
396
+ pass
397
+ # Drop vertex type globally (vertices are global, no USE GRAPH needed)
398
+ try:
399
+ drop_vertex_global_cmd = f"DROP VERTEX {v_type}"
400
+ self.conn.gsql(drop_vertex_global_cmd)
401
+ except Exception:
402
+ pass
403
+ except Exception as e3:
404
+ logger.warning(
405
+ f"Could not drop schema types for graph '{name}': {e3}. Proceeding to data clear."
406
+ )
407
+
408
+ # Fallback 2: Clear all data (if any remain)
409
+ try:
410
+ with self._ensure_graph_context(name):
411
+ vertex_types = self.conn.getVertexTypes()
412
+ for v_type in vertex_types:
413
+ result = self.conn.delVertices(v_type)
414
+ logger.debug(f"Cleared vertices of type {v_type}: {result}")
415
+ logger.info(f"Cleared all data from graph '{name}'")
416
+ except Exception as e2:
417
+ logger.warning(
418
+ f"Could not clear data from graph '{name}': {e2}. Graph may not exist."
419
+ )
420
+
421
+ except Exception as e:
422
+ logger.error(f"Error deleting database '{name}': {e}")
423
+
424
+ def execute(self, query, **kwargs):
425
+ """
426
+ Execute GSQL query or installed query based on content.
427
+ """
428
+ try:
429
+ # Check if this is an installed query call
430
+ if query.strip().upper().startswith("RUN "):
431
+ # Extract query name and parameters
432
+ query_name = query.strip()[4:].split("(")[0].strip()
433
+ result = self.conn.runInstalledQuery(query_name, **kwargs)
434
+ else:
435
+ # Execute as raw GSQL
436
+ result = self.conn.gsql(query)
437
+ return result
438
+ except Exception as e:
439
+ logger.error(f"Error executing query '{query}': {e}")
440
+ raise
441
+
442
+ def close(self):
443
+ """Close connection - pyTigerGraph handles cleanup automatically."""
444
+ pass
445
+
446
+ def init_db(self, schema: Schema, clean_start=False):
447
+ """
448
+ Initialize database with schema definition.
449
+
450
+ Follows the same pattern as ArangoDB:
451
+ 1. Clean if needed
452
+ 2. Create vertex and edge types globally (required before CREATE GRAPH)
453
+ 3. Create graph with vertices and edges explicitly attached
454
+ 4. Define indexes
455
+
456
+ If any step fails, the graph will be cleaned up gracefully.
457
+ """
458
+ # Use schema.general.name for graph creation
459
+ graph_created = False
460
+
461
+ # Determine graph name: use config.database if set, otherwise use schema.general.name
462
+ graph_name = self.config.database
463
+ if not graph_name:
464
+ graph_name = schema.general.name
465
+ # Update config for subsequent operations
466
+ self.config.database = graph_name
467
+ logger.info(f"Using schema name '{graph_name}' from schema.general.name")
468
+
469
+ try:
470
+ if clean_start:
471
+ try:
472
+ # Delete all graphs, edges, and vertices (full teardown)
473
+ self.delete_graph_structure([], [], delete_all=True)
474
+ logger.debug(f"Cleaned graph '{graph_name}' for fresh start")
475
+ except Exception as clean_error:
476
+ logger.warning(
477
+ f"Error during clean_start for graph '{graph_name}': {clean_error}",
478
+ exc_info=True,
479
+ )
480
+ # Continue - may be first run or already clean
481
+
482
+ # Step 1: Create vertex and edge types globally first
483
+ # These must exist before they can be included in CREATE GRAPH
484
+ logger.debug(
485
+ f"Creating vertex and edge types globally for graph '{graph_name}'"
486
+ )
487
+ try:
488
+ vertex_names = self._create_vertex_types_global(schema.vertex_config)
489
+ edge_names = self._create_edge_types_global(
490
+ schema.edge_config.edges_list(include_aux=True)
491
+ )
492
+ logger.debug(
493
+ f"Created {len(vertex_names)} vertex types and {len(edge_names)} edge types"
494
+ )
495
+ except Exception as type_error:
496
+ logger.error(
497
+ f"Failed to create vertex/edge types for graph '{graph_name}': {type_error}",
498
+ exc_info=True,
499
+ )
500
+ raise
501
+
502
+ # Step 2: Create graph with vertices and edges explicitly attached
503
+ try:
504
+ if not self.graph_exists(graph_name):
505
+ logger.debug(f"Creating graph '{graph_name}' with types in init_db")
506
+ try:
507
+ self.create_database(
508
+ graph_name,
509
+ vertex_names=vertex_names,
510
+ edge_names=edge_names,
511
+ )
512
+ graph_created = True
513
+ logger.info(f"Successfully created graph '{graph_name}'")
514
+ except Exception as create_error:
515
+ logger.error(
516
+ f"Failed to create graph '{graph_name}': {create_error}",
517
+ exc_info=True,
518
+ )
519
+ raise
520
+ else:
521
+ logger.debug(f"Graph '{graph_name}' already exists in init_db")
522
+ # If graph already exists, associate types via ALTER GRAPH
523
+ try:
524
+ self.define_vertex_collections(schema.vertex_config)
525
+ self.define_edge_collections(
526
+ schema.edge_config.edges_list(include_aux=True)
527
+ )
528
+ except Exception as define_error:
529
+ logger.warning(
530
+ f"Could not define collections for existing graph '{graph_name}': {define_error}",
531
+ exc_info=True,
532
+ )
533
+ # Continue - graph exists, collections may already be defined
534
+ except Exception as graph_error:
535
+ logger.error(
536
+ f"Error during graph creation/verification for '{graph_name}': {graph_error}",
537
+ exc_info=True,
538
+ )
539
+ raise
540
+
541
+ # Step 3: Define indexes
542
+ try:
543
+ self.define_indexes(schema)
544
+ logger.info(f"Index definition completed for graph '{graph_name}'")
545
+ except Exception as index_error:
546
+ logger.error(
547
+ f"Failed to define indexes for graph '{graph_name}': {index_error}",
548
+ exc_info=True,
549
+ )
550
+ raise
551
+ except Exception as e:
552
+ logger.error(f"Error initializing database: {e}")
553
+ # Graceful teardown: if graph was created in this session, clean it up
554
+ if graph_created:
555
+ try:
556
+ logger.info(
557
+ f"Cleaning up graph '{graph_name}' after initialization failure"
558
+ )
559
+ self.delete_database(graph_name)
560
+ except Exception as cleanup_error:
561
+ logger.warning(
562
+ f"Failed to clean up graph '{graph_name}': {cleanup_error}"
563
+ )
564
+ raise
565
+
566
+ def define_schema(self, schema: Schema):
567
+ """
568
+ Define TigerGraph schema with proper GSQL syntax.
569
+
570
+ Assumes graph already exists (created in init_db). This method:
571
+ 1. Uses the graph from config.database
572
+ 2. Defines vertex types within the graph
573
+ 3. Defines edge types within the graph
574
+ """
575
+ try:
576
+ # Define vertex and edge types within the graph
577
+ # Graph context is ensured by _ensure_graph_context in the called methods
578
+ self.define_vertex_collections(schema.vertex_config)
579
+ self.define_edge_collections(
580
+ schema.edge_config.edges_list(include_aux=True)
581
+ )
582
+
583
+ except Exception as e:
584
+ logger.error(f"Error defining schema: {e}")
585
+ raise
586
+
587
+ def _format_vertex_fields(self, vertex: Vertex) -> str:
588
+ """
589
+ Format vertex fields for GSQL CREATE VERTEX statement.
590
+
591
+ Uses Field objects with types, applying TigerGraph defaults (STRING for None types).
592
+ Formats fields as: field_name TYPE
593
+
594
+ Args:
595
+ vertex: Vertex object with Field definitions
596
+
597
+ Returns:
598
+ str: Formatted field definitions for GSQL CREATE VERTEX statement
599
+ """
600
+ # Get fields with TigerGraph default types applied (None -> STRING)
601
+ fields = vertex.get_fields_with_defaults(DBFlavor.TIGERGRAPH, with_aux=False)
602
+
603
+ if not fields:
604
+ # Default fields if none specified
605
+ return 'name STRING DEFAULT "",\n properties MAP<STRING, STRING> DEFAULT (map())'
606
+
607
+ field_list = []
608
+ for field in fields:
609
+ # Field type should already be set (STRING if was None)
610
+ field_type = field.type or FieldType.STRING.value
611
+ # Format as: field_name TYPE
612
+ # TODO: Add DEFAULT clause support if needed in the future
613
+ field_list.append(f"{field.name} {field_type}")
614
+
615
+ return ",\n ".join(field_list)
616
+
617
+ def _format_edge_attributes(self, edge: Edge) -> str:
618
+ """
619
+ Format edge attributes for GSQL CREATE EDGE statement.
620
+ """
621
+ if hasattr(edge, "attributes") and edge.attributes:
622
+ attrs = []
623
+ for attr_name, attr_type in edge.attributes.items():
624
+ tg_type = self._map_type_to_tigergraph(attr_type)
625
+ attrs.append(f"{attr_name} {tg_type}")
626
+ return ",\n " + ",\n ".join(attrs) if attrs else ""
627
+ else:
628
+ return ",\n weight FLOAT DEFAULT 1.0"
629
+
630
+ def _map_type_to_tigergraph(self, field_type: str) -> str:
631
+ """
632
+ Map common field types to TigerGraph types.
633
+ """
634
+ type_mapping = {
635
+ "str": "STRING",
636
+ "string": "STRING",
637
+ "int": "INT",
638
+ "integer": "INT",
639
+ "float": "FLOAT",
640
+ "double": "DOUBLE",
641
+ "bool": "BOOL",
642
+ "boolean": "BOOL",
643
+ "datetime": "DATETIME",
644
+ "date": "DATETIME",
645
+ }
646
+ return type_mapping.get(field_type.lower(), "STRING")
647
+
648
+ # _get_graph_name removed: always use schema.general.name
649
+
650
+ def _create_vertex_types_global(self, vertex_config: VertexConfig) -> list[str]:
651
+ """Create TigerGraph vertex types globally (without graph association).
652
+
653
+ Vertices are global in TigerGraph and must be created before they can be
654
+ included in a CREATE GRAPH statement.
655
+
656
+ Creates vertices with composite primary keys using PRIMARY KEY syntax.
657
+ According to TigerGraph documentation, fields used in PRIMARY KEY must be
658
+ defined as regular attributes first, and they remain accessible as attributes.
659
+
660
+ Reference: https://docs.tigergraph.com/gsql-ref/4.2/ddl-and-loading/defining-a-graph-schema#_composite_key_using_primary_key
661
+
662
+ Args:
663
+ vertex_config: Vertex configuration containing vertices to create
664
+
665
+ Returns:
666
+ list[str]: List of vertex type names that were created (or already existed)
667
+ """
668
+ vertex_names = []
669
+ for vertex in vertex_config.vertices:
670
+ field_definitions = self._format_vertex_fields(vertex)
671
+ vertex_dbname = vertex_config.vertex_dbname(vertex.name)
672
+ vindex = "(" + ", ".join(vertex_config.index(vertex.name).fields) + ")"
673
+
674
+ # Create the vertex type globally (ignore if exists)
675
+ # Vertices are global in TigerGraph, so no USE GRAPH needed
676
+ # Note: Fields used in PRIMARY KEY must be defined as regular attributes first.
677
+ # They remain accessible as attributes automatically (no primary_id_as_attribute needed).
678
+ create_vertex_cmd = (
679
+ f"CREATE VERTEX {vertex_dbname} (\n"
680
+ f" {field_definitions},\n"
681
+ f" PRIMARY KEY {vindex}\n"
682
+ f') WITH STATS="OUTDEGREE_BY_EDGETYPE"'
683
+ )
684
+ logger.debug(f"Executing GSQL: {create_vertex_cmd}")
685
+ try:
686
+ result = self.conn.gsql(create_vertex_cmd)
687
+ logger.debug(f"Result: {result}")
688
+ vertex_names.append(vertex_dbname)
689
+ except Exception as e:
690
+ err = str(e).lower()
691
+ if "used by another object" in err or "duplicate" in err:
692
+ logger.debug(
693
+ f"Vertex type '{vertex_dbname}' already exists; will include in graph"
694
+ )
695
+ vertex_names.append(vertex_dbname)
696
+ else:
697
+ raise
698
+ return vertex_names
699
+
700
+ def define_vertex_collections(self, vertex_config: VertexConfig):
701
+ """Define TigerGraph vertex types and associate them with the current graph.
702
+
703
+ Flow per vertex type:
704
+ 1) Try to CREATE VERTEX (idempotent: ignore "already exists" errors)
705
+ 2) Associate the vertex with the graph via ALTER GRAPH <graph> ADD VERTEX <vertex>
706
+
707
+ Args:
708
+ vertex_config: Vertex configuration containing vertices to create
709
+ """
710
+ # First create all vertex types globally
711
+ vertex_names = self._create_vertex_types_global(vertex_config)
712
+
713
+ # Then associate them with the graph (if graph already exists)
714
+ graph_name = self.config.database
715
+ if graph_name:
716
+ for vertex_name in vertex_names:
717
+ alter_graph_cmd = f"USE GRAPH {graph_name}\nALTER GRAPH {graph_name} ADD VERTEX {vertex_name}"
718
+ logger.debug(f"Executing GSQL: {alter_graph_cmd}")
719
+ try:
720
+ result = self.conn.gsql(alter_graph_cmd)
721
+ logger.debug(f"Result: {result}")
722
+ except Exception as e:
723
+ err = str(e).lower()
724
+ # If already associated, ignore
725
+ if "already" in err and ("added" in err or "exists" in err):
726
+ logger.debug(
727
+ f"Vertex '{vertex_name}' already associated with graph '{graph_name}'"
728
+ )
729
+ else:
730
+ raise
731
+
732
+ def _create_edge_types_global(self, edges: list[Edge]) -> list[str]:
733
+ """Create TigerGraph edge types globally (without graph association).
734
+
735
+ Edges are global in TigerGraph and must be created before they can be
736
+ included in a CREATE GRAPH statement.
737
+
738
+ Args:
739
+ edges: List of edges to create (should have _source_collection and _target_collection populated)
740
+
741
+ Returns:
742
+ list[str]: List of edge type names (relation names) that were created (or already existed)
743
+ """
744
+ edge_names = []
745
+ for edge in edges:
746
+ edge_attrs = self._format_edge_attributes(edge)
747
+
748
+ # Create the edge type globally (ignore if exists/used elsewhere)
749
+ # Edges are global in TigerGraph, so no USE GRAPH needed
750
+ create_edge_cmd = (
751
+ f"CREATE DIRECTED EDGE {edge.relation} (\n"
752
+ f" FROM {edge._source_collection},\n"
753
+ f" TO {edge._target_collection}{edge_attrs}\n"
754
+ f")"
755
+ )
756
+ logger.debug(f"Executing GSQL: {create_edge_cmd}")
757
+ try:
758
+ result = self.conn.gsql(create_edge_cmd)
759
+ logger.debug(f"Result: {result}")
760
+ edge_names.append(edge.relation)
761
+ except Exception as e:
762
+ err = str(e).lower()
763
+ # If the edge name is already used by another object or duplicates exist, continue
764
+ if (
765
+ "used by another object" in err
766
+ or "duplicate" in err
767
+ or "already exists" in err
768
+ ):
769
+ logger.debug(
770
+ f"Edge type '{edge.relation}' already defined; will include in graph"
771
+ )
772
+ edge_names.append(edge.relation)
773
+ else:
774
+ raise
775
+ return edge_names
776
+
777
+ def define_edge_collections(self, edges: list[Edge]):
778
+ """Define TigerGraph edge types and associate them with the current graph.
779
+
780
+ Flow per edge type:
781
+ 1) Try to CREATE DIRECTED EDGE (idempotent: ignore "used by another object"/"duplicate"/"already exists")
782
+ 2) Associate the edge with the graph via ALTER GRAPH <graph> ADD DIRECTED EDGE <edge>
783
+
784
+ Args:
785
+ edges: List of edges to create (should have _source_collection and _target_collection populated)
786
+ """
787
+ # First create all edge types globally
788
+ edge_names = self._create_edge_types_global(edges)
789
+
790
+ # Then associate them with the graph (if graph already exists)
791
+ graph_name = self.config.database
792
+ if graph_name:
793
+ for edge_name in edge_names:
794
+ alter_graph_cmd = (
795
+ f"USE GRAPH {graph_name}\n"
796
+ f"ALTER GRAPH {graph_name} ADD DIRECTED EDGE {edge_name}"
797
+ )
798
+ logger.debug(f"Executing GSQL: {alter_graph_cmd}")
799
+ try:
800
+ result = self.conn.gsql(alter_graph_cmd)
801
+ logger.debug(f"Result: {result}")
802
+ except Exception as e:
803
+ err = str(e).lower()
804
+ # If already associated, ignore
805
+ if "already" in err and ("added" in err or "exists" in err):
806
+ logger.debug(
807
+ f"Edge '{edge_name}' already associated with graph '{graph_name}'"
808
+ )
809
+ else:
810
+ raise
811
+
812
+ def define_vertex_indices(self, vertex_config: VertexConfig):
813
+ """
814
+ TigerGraph automatically indexes primary keys.
815
+ Secondary indices are less common but can be created.
816
+ """
817
+ for vertex_class in vertex_config.vertex_set:
818
+ vertex_dbname = vertex_config.vertex_dbname(vertex_class)
819
+ for index_obj in vertex_config.indexes(vertex_class)[1:]:
820
+ self._add_index(vertex_dbname, index_obj)
821
+
822
+ def define_edge_indices(self, edges: list[Edge]):
823
+ """Define indices for edges if specified."""
824
+ logger.warning("TigerGraph edge indices not implemented yet [version 4.2.2]")
825
+
826
+ def _add_index(self, obj_name, index: Index, is_vertex_index=True):
827
+ """
828
+ Create an index on a vertex or edge type using GSQL schema change jobs.
829
+
830
+ TigerGraph requires indexes to be created through schema change jobs:
831
+ 1. CREATE GLOBAL SCHEMA_CHANGE job job_name {ALTER VERTEX ... ADD INDEX ... ON (...);}
832
+ 2. RUN GLOBAL SCHEMA_CHANGE job job_name
833
+
834
+ Note: TigerGraph only supports secondary indexes on a single field.
835
+ Indexes with multiple fields will be skipped with a warning.
836
+ Edge indexes are not supported in TigerGraph and will be skipped with a warning.
837
+
838
+ Args:
839
+ obj_name: Name of the vertex type or edge type
840
+ index: Index configuration object
841
+ is_vertex_index: Whether this is a vertex index (True) or edge index (False)
842
+ """
843
+ try:
844
+ # TigerGraph doesn't support indexes on edges
845
+ if not is_vertex_index:
846
+ logger.warning(
847
+ f"Edge indexes are not supported in TigerGraph [current version 4.2.2]"
848
+ f"Skipping index creation for edge '{obj_name}' on field(s) '{index.fields}'"
849
+ )
850
+ return
851
+
852
+ if not index.fields:
853
+ logger.warning(f"No fields specified for index on {obj_name}, skipping")
854
+ return
855
+
856
+ # TigerGraph only supports secondary indexes on a single field
857
+ if len(index.fields) > 1:
858
+ logger.warning(
859
+ f"TigerGraph only supports indexes on a single field. "
860
+ f"Skipping multi-field index on {obj_name} with fields {index.fields}"
861
+ )
862
+ return
863
+
864
+ # We have exactly one field - proceed with index creation
865
+ field_name = index.fields[0]
866
+
867
+ # Generate index name if not provided
868
+ if index.name:
869
+ index_name = index.name
870
+ else:
871
+ # Generate name from obj_name and field name
872
+ index_name = f"{obj_name}_{field_name}_index"
873
+
874
+ # Generate job name from obj_name and field name
875
+ job_name = f"add_{obj_name}_{field_name}_index"
876
+
877
+ # Build the ALTER command (single field only)
878
+ graph_name = self.config.database
879
+
880
+ if not graph_name:
881
+ logger.warning(
882
+ f"No graph name configured, cannot create index on {obj_name}"
883
+ )
884
+ return
885
+
886
+ # Build the ALTER statement inside the job (single field in parentheses)
887
+ # Note: Only vertex indexes are supported - edge indexes are handled earlier
888
+ alter_stmt = (
889
+ f"ALTER VERTEX {obj_name} ADD INDEX {index_name} ON ({field_name})"
890
+ )
891
+
892
+ # Step 1: Create the schema change job
893
+ # only global changes are supported by tigergraph
894
+ create_job_cmd = (
895
+ f"USE GLOBAL \n"
896
+ f"CREATE GLOBAL SCHEMA_CHANGE job {job_name} {{{alter_stmt};}}"
897
+ )
898
+
899
+ logger.debug(f"Executing GSQL (create job): {create_job_cmd}")
900
+ try:
901
+ result = self.conn.gsql(create_job_cmd)
902
+ logger.debug(f"Created schema change job '{job_name}': {result}")
903
+ except Exception as e:
904
+ err = str(e).lower()
905
+ # Check if job already exists
906
+ if (
907
+ "already exists" in err
908
+ or "duplicate" in err
909
+ or "used by another object" in err
910
+ ):
911
+ logger.debug(f"Schema change job '{job_name}' already exists")
912
+ else:
913
+ logger.error(
914
+ f"Failed to create schema change job '{job_name}': {e}"
915
+ )
916
+ raise
917
+
918
+ # Step 2: Run the schema change job
919
+ run_job_cmd = f"RUN GLOBAL SCHEMA_CHANGE job {job_name}"
920
+
921
+ logger.debug(f"Executing GSQL (run job): {run_job_cmd}")
922
+ try:
923
+ result = self.conn.gsql(run_job_cmd)
924
+ logger.debug(
925
+ f"Ran schema change job '{job_name}', created index '{index_name}' on {obj_name}: {result}"
926
+ )
927
+ except Exception as e:
928
+ err = str(e).lower()
929
+ # Check if index already exists or job was already run
930
+ if (
931
+ "already exists" in err
932
+ or "duplicate" in err
933
+ or "used by another object" in err
934
+ or "already applied" in err
935
+ ):
936
+ logger.debug(
937
+ f"Index '{index_name}' on {obj_name} already exists or job already run, skipping"
938
+ )
939
+ else:
940
+ logger.error(f"Failed to run schema change job '{job_name}': {e}")
941
+ raise
942
+ except Exception as e:
943
+ logger.warning(f"Could not create index for {obj_name}: {e}")
944
+
945
+ def _parse_show_output(self, result_str: str, prefix: str) -> list[str]:
946
+ """
947
+ Generic parser for SHOW * output commands.
948
+
949
+ Extracts names from lines matching the pattern: "- PREFIX name(...)"
950
+
951
+ Args:
952
+ result_str: String output from SHOW * GSQL command
953
+ prefix: The prefix to look for (e.g., "VERTEX", "GRAPH", "JOB")
954
+
955
+ Returns:
956
+ List of extracted names
957
+ """
958
+ names = []
959
+ lines = result_str.split("\n")
960
+
961
+ for line in lines:
962
+ line = line.strip()
963
+ # Skip empty lines and headers
964
+ if not line or line.startswith("*"):
965
+ continue
966
+
967
+ # Remove leading "- " if present
968
+ if line.startswith("- "):
969
+ line = line[2:].strip()
970
+
971
+ # Look for prefix pattern
972
+ prefix_upper = prefix.upper()
973
+ if line.upper().startswith(f"{prefix_upper} "):
974
+ # Extract name (after prefix and before opening parenthesis or whitespace)
975
+ after_prefix = line[len(prefix_upper) + 1 :].strip()
976
+ # Name is the first word (before space or parenthesis)
977
+ if "(" in after_prefix:
978
+ name = after_prefix.split("(")[0].strip()
979
+ else:
980
+ # No parenthesis, take the first word
981
+ name = (
982
+ after_prefix.split()[0].strip()
983
+ if after_prefix.split()
984
+ else None
985
+ )
986
+
987
+ if name:
988
+ names.append(name)
989
+
990
+ return names
991
+
992
+ def _parse_show_edge_output(self, result_str: str) -> list[tuple[str, bool]]:
993
+ """
994
+ Parse SHOW EDGE * output to extract edge type names and direction.
995
+
996
+ Format: "- DIRECTED EDGE belongsTo(FROM Author, TO ResearchField, ...)"
997
+ or "- UNDIRECTED EDGE edgeName(...)"
998
+
999
+ Args:
1000
+ result_str: String output from SHOW EDGE * GSQL command
1001
+
1002
+ Returns:
1003
+ List of tuples (edge_name, is_directed)
1004
+ """
1005
+ edge_types = []
1006
+ lines = result_str.split("\n")
1007
+
1008
+ for line in lines:
1009
+ line = line.strip()
1010
+ # Skip empty lines and headers
1011
+ if not line or line.startswith("*"):
1012
+ continue
1013
+
1014
+ # Remove leading "- " if present
1015
+ if line.startswith("- "):
1016
+ line = line[2:].strip()
1017
+
1018
+ # Look for "DIRECTED EDGE" or "UNDIRECTED EDGE" pattern
1019
+ is_directed = None
1020
+ prefix = None
1021
+ if "DIRECTED EDGE" in line.upper():
1022
+ prefix = "DIRECTED EDGE "
1023
+ is_directed = True
1024
+ elif "UNDIRECTED EDGE" in line.upper():
1025
+ prefix = "UNDIRECTED EDGE "
1026
+ is_directed = False
1027
+
1028
+ if prefix:
1029
+ idx = line.upper().find(prefix)
1030
+ if idx >= 0:
1031
+ after_prefix = line[idx + len(prefix) :].strip()
1032
+ # Extract name before opening parenthesis
1033
+ if "(" in after_prefix:
1034
+ edge_name = after_prefix.split("(")[0].strip()
1035
+ if edge_name:
1036
+ edge_types.append((edge_name, is_directed))
1037
+
1038
+ return edge_types
1039
+
1040
+ def _is_not_found_error(self, error: Exception | str) -> bool:
1041
+ """
1042
+ Check if an error indicates that an object doesn't exist.
1043
+
1044
+ Args:
1045
+ error: Exception object or error string
1046
+
1047
+ Returns:
1048
+ True if the error indicates "not found" or "does not exist"
1049
+ """
1050
+ err_str = str(error).lower()
1051
+ return "does not exist" in err_str or "not found" in err_str
1052
+
1053
+ def _clean_document(self, doc: dict[str, Any]) -> dict[str, Any]:
1054
+ """
1055
+ Remove internal keys that shouldn't be stored in the database.
1056
+
1057
+ Removes keys starting with "_" except "_key".
1058
+
1059
+ Args:
1060
+ doc: Document dictionary to clean
1061
+
1062
+ Returns:
1063
+ Cleaned document dictionary
1064
+ """
1065
+ return {k: v for k, v in doc.items() if not k.startswith("_") or k == "_key"}
1066
+
1067
+ def _parse_show_vertex_output(self, result_str: str) -> list[str]:
1068
+ """Parse SHOW VERTEX * output to extract vertex type names."""
1069
+ return self._parse_show_output(result_str, "VERTEX")
1070
+
1071
+ def _parse_show_graph_output(self, result_str: str) -> list[str]:
1072
+ """Parse SHOW GRAPH * output to extract graph names."""
1073
+ return self._parse_show_output(result_str, "GRAPH")
1074
+
1075
+ def _parse_show_job_output(self, result_str: str) -> list[str]:
1076
+ """Parse SHOW JOB * output to extract job names."""
1077
+ return self._parse_show_output(result_str, "JOB")
1078
+
1079
+ def delete_graph_structure(self, vertex_types=(), graph_names=(), delete_all=False):
1080
+ """
1081
+ Delete graph structure (graphs, vertex types, edge types) from TigerGraph.
1082
+
1083
+ In TigerGraph:
1084
+ - Graph: Top-level container (functions like a database in ArangoDB)
1085
+ - Vertex Types: Global vertex type definitions (can be shared across graphs)
1086
+ - Edge Types: Global edge type definitions (can be shared across graphs)
1087
+ - Vertex and edge types are associated with graphs
1088
+
1089
+ Teardown order:
1090
+ 1. Drop all graphs
1091
+ 2. Drop all edge types globally
1092
+ 3. Drop all vertex types globally
1093
+ 4. Drop all jobs globally
1094
+
1095
+ Args:
1096
+ vertex_types: Vertex type names to delete (not used in TigerGraph teardown)
1097
+ graph_names: Graph names to delete (if empty and delete_all=True, deletes all)
1098
+ delete_all: If True, perform full teardown of all graphs, edges, vertices, and jobs
1099
+ """
1100
+ cnames = vertex_types
1101
+ gnames = graph_names
1102
+ try:
1103
+ if delete_all:
1104
+ # Step 1: Drop all graphs
1105
+ graphs_to_drop = list(gnames) if gnames else []
1106
+
1107
+ # If no specific graphs provided, try to discover and drop all graphs
1108
+ if not graphs_to_drop:
1109
+ try:
1110
+ # Use GSQL to list all graphs
1111
+ show_graphs_cmd = "SHOW GRAPH *"
1112
+ result = self.conn.gsql(show_graphs_cmd)
1113
+ result_str = str(result)
1114
+
1115
+ # Parse graph names using helper method
1116
+ graphs_to_drop = self._parse_show_graph_output(result_str)
1117
+ except Exception as e:
1118
+ logger.debug(f"Could not list graphs: {e}")
1119
+ graphs_to_drop = []
1120
+
1121
+ # Drop each graph
1122
+ logger.info(
1123
+ f"Found {len(graphs_to_drop)} graphs to drop: {graphs_to_drop}"
1124
+ )
1125
+ for graph_name in graphs_to_drop:
1126
+ try:
1127
+ self.delete_database(graph_name)
1128
+ logger.info(f"Successfully dropped graph '{graph_name}'")
1129
+ except Exception as e:
1130
+ if self._is_not_found_error(e):
1131
+ logger.debug(
1132
+ f"Graph '{graph_name}' already dropped or doesn't exist"
1133
+ )
1134
+ else:
1135
+ logger.warning(f"Failed to drop graph '{graph_name}': {e}")
1136
+ logger.warning(
1137
+ f"Error details: {type(e).__name__}: {str(e)}"
1138
+ )
1139
+
1140
+ # Step 2: Drop all edge types globally
1141
+ # Note: Edges must be dropped before vertices due to dependencies
1142
+ # Edges are global, so we need to query them at global level using GSQL
1143
+ try:
1144
+ # Use GSQL to list all global edge types (not graph-scoped)
1145
+ show_edges_cmd = "SHOW EDGE *"
1146
+ result = self.conn.gsql(show_edges_cmd)
1147
+ result_str = str(result)
1148
+
1149
+ # Parse edge types using helper method
1150
+ edge_types = self._parse_show_edge_output(result_str)
1151
+
1152
+ logger.info(
1153
+ f"Found {len(edge_types)} edge types to drop: {[name for name, _ in edge_types]}"
1154
+ )
1155
+ for e_type, is_directed in edge_types:
1156
+ try:
1157
+ # DROP EDGE works for both directed and undirected edges
1158
+ drop_edge_cmd = f"DROP EDGE {e_type}"
1159
+ logger.debug(f"Executing: {drop_edge_cmd}")
1160
+ result = self.conn.gsql(drop_edge_cmd)
1161
+ logger.info(
1162
+ f"Successfully dropped edge type '{e_type}': {result}"
1163
+ )
1164
+ except Exception as e:
1165
+ if self._is_not_found_error(e):
1166
+ logger.debug(
1167
+ f"Edge type '{e_type}' already dropped or doesn't exist"
1168
+ )
1169
+ else:
1170
+ logger.warning(
1171
+ f"Failed to drop edge type '{e_type}': {e}"
1172
+ )
1173
+ logger.warning(
1174
+ f"Error details: {type(e).__name__}: {str(e)}"
1175
+ )
1176
+ except Exception as e:
1177
+ logger.warning(f"Could not list or drop edge types: {e}")
1178
+ logger.warning(f"Error details: {type(e).__name__}: {str(e)}")
1179
+
1180
+ # Step 3: Drop all vertex types globally
1181
+ # Vertices are dropped after edges to avoid dependency issues
1182
+ # Vertices are global, so we need to query them at global level using GSQL
1183
+ try:
1184
+ # Use GSQL to list all global vertex types (not graph-scoped)
1185
+ show_vertices_cmd = "SHOW VERTEX *"
1186
+ result = self.conn.gsql(show_vertices_cmd)
1187
+ result_str = str(result)
1188
+
1189
+ # Parse vertex types using helper method
1190
+ vertex_types = self._parse_show_vertex_output(result_str)
1191
+
1192
+ logger.info(
1193
+ f"Found {len(vertex_types)} vertex types to drop: {vertex_types}"
1194
+ )
1195
+ for v_type in vertex_types:
1196
+ try:
1197
+ # Clear data first to avoid dependency issues
1198
+ try:
1199
+ result = self.conn.delVertices(v_type)
1200
+ logger.debug(
1201
+ f"Cleared data from vertex type '{v_type}': {result}"
1202
+ )
1203
+ except Exception as clear_err:
1204
+ logger.debug(
1205
+ f"Could not clear data from vertex type '{v_type}': {clear_err}"
1206
+ )
1207
+
1208
+ # Drop vertex type
1209
+ drop_vertex_cmd = f"DROP VERTEX {v_type}"
1210
+ logger.debug(f"Executing: {drop_vertex_cmd}")
1211
+ result = self.conn.gsql(drop_vertex_cmd)
1212
+ logger.info(
1213
+ f"Successfully dropped vertex type '{v_type}': {result}"
1214
+ )
1215
+ except Exception as e:
1216
+ if self._is_not_found_error(e):
1217
+ logger.debug(
1218
+ f"Vertex type '{v_type}' already dropped or doesn't exist"
1219
+ )
1220
+ else:
1221
+ logger.warning(
1222
+ f"Failed to drop vertex type '{v_type}': {e}"
1223
+ )
1224
+ logger.warning(
1225
+ f"Error details: {type(e).__name__}: {str(e)}"
1226
+ )
1227
+ except Exception as e:
1228
+ logger.warning(f"Could not list or drop vertex types: {e}")
1229
+ logger.warning(f"Error details: {type(e).__name__}: {str(e)}")
1230
+
1231
+ # Step 4: Drop all jobs globally
1232
+ # Jobs are dropped last since they may reference schema objects
1233
+ try:
1234
+ # Use GSQL to list all global jobs
1235
+ show_jobs_cmd = "SHOW JOB *"
1236
+ result = self.conn.gsql(show_jobs_cmd)
1237
+ result_str = str(result)
1238
+
1239
+ # Parse job names using helper method
1240
+ job_names = self._parse_show_job_output(result_str)
1241
+
1242
+ logger.info(f"Found {len(job_names)} jobs to drop: {job_names}")
1243
+ for job_name in job_names:
1244
+ try:
1245
+ # Drop job
1246
+ # Jobs can be of different types (SCHEMA_CHANGE, LOADING, etc.)
1247
+ # DROP JOB works for all job types
1248
+ drop_job_cmd = f"DROP JOB {job_name}"
1249
+ logger.debug(f"Executing: {drop_job_cmd}")
1250
+ result = self.conn.gsql(drop_job_cmd)
1251
+ logger.info(
1252
+ f"Successfully dropped job '{job_name}': {result}"
1253
+ )
1254
+ except Exception as e:
1255
+ if self._is_not_found_error(e):
1256
+ logger.debug(
1257
+ f"Job '{job_name}' already dropped or doesn't exist"
1258
+ )
1259
+ else:
1260
+ logger.warning(f"Failed to drop job '{job_name}': {e}")
1261
+ logger.warning(
1262
+ f"Error details: {type(e).__name__}: {str(e)}"
1263
+ )
1264
+ except Exception as e:
1265
+ logger.warning(f"Could not list or drop jobs: {e}")
1266
+ logger.warning(f"Error details: {type(e).__name__}: {str(e)}")
1267
+
1268
+ elif gnames:
1269
+ # Drop specific graphs
1270
+ for graph_name in gnames:
1271
+ try:
1272
+ self.delete_database(graph_name)
1273
+ except Exception as e:
1274
+ logger.error(f"Error deleting graph '{graph_name}': {e}")
1275
+ elif cnames:
1276
+ # Delete vertices from specific vertex types (data only, not schema)
1277
+ with self._ensure_graph_context():
1278
+ for class_name in cnames:
1279
+ try:
1280
+ result = self.conn.delVertices(class_name)
1281
+ logger.debug(
1282
+ f"Deleted vertices from {class_name}: {result}"
1283
+ )
1284
+ except Exception as e:
1285
+ logger.error(
1286
+ f"Error deleting vertices from {class_name}: {e}"
1287
+ )
1288
+
1289
+ except Exception as e:
1290
+ logger.error(f"Error in delete_graph_structure: {e}")
1291
+
1292
+ def _generate_upsert_payload(
1293
+ self, data: list[dict[str, Any]], vname: str, vindex: tuple[str, ...]
1294
+ ) -> dict[str, Any]:
1295
+ """
1296
+ Transforms a list of dictionaries into the TigerGraph REST++ batch upsert JSON format.
1297
+
1298
+ The composite Primary ID is created by concatenating the values of the fields
1299
+ specified in vindex with an underscore '_'. Index fields are included in the
1300
+ vertex attributes since PRIMARY KEY fields are automatically accessible as
1301
+ attributes in TigerGraph queries.
1302
+
1303
+ Attribute values are wrapped in {"value": ...} format as required by TigerGraph REST++ API.
1304
+
1305
+ Args:
1306
+ data: List of document dictionaries to upsert
1307
+ vname: Target vertex name
1308
+ vindex: Tuple of index fields used to create the composite Primary ID
1309
+
1310
+ Returns:
1311
+ Dictionary in TigerGraph REST++ batch upsert format:
1312
+ {"vertices": {vname: {vertex_id: {attr_name: {"value": attr_value}, ...}}}}
1313
+ """
1314
+ # Initialize the required JSON structure for vertices
1315
+ payload: dict[str, Any] = {"vertices": {vname: {}}}
1316
+ vertex_map = payload["vertices"][vname]
1317
+
1318
+ for record in data:
1319
+ try:
1320
+ # 1. Calculate the Composite Primary ID
1321
+ # Assumes all index keys exist in the record
1322
+ primary_id_components = [str(record[key]) for key in vindex]
1323
+ vertex_id = "_".join(primary_id_components)
1324
+
1325
+ # 2. Clean the record (remove internal keys that shouldn't be stored)
1326
+ clean_record = self._clean_document(record)
1327
+
1328
+ # 3. Keep index fields in attributes
1329
+ # When using PRIMARY KEY (composite keys), the key fields are automatically
1330
+ # accessible as attributes in queries, so we include them in the payload
1331
+
1332
+ # 4. Format attributes for TigerGraph REST++ API
1333
+ # TigerGraph requires attribute values to be wrapped in {"value": ...}
1334
+ formatted_attributes = {
1335
+ k: {"value": v} for k, v in clean_record.items()
1336
+ }
1337
+
1338
+ # 5. Add the record attributes to the map using the composite ID as the key
1339
+ vertex_map[vertex_id] = formatted_attributes
1340
+
1341
+ except KeyError as e:
1342
+ logger.warning(
1343
+ f"Record is missing a required index field: {e}. Skipping record: {record}"
1344
+ )
1345
+ continue
1346
+
1347
+ return payload
1348
+
1349
+ def _upsert_data(
1350
+ self,
1351
+ payload: dict[str, Any],
1352
+ host: str,
1353
+ graph_name: str,
1354
+ username: str | None = None,
1355
+ password: str | None = None,
1356
+ ) -> dict[str, Any]:
1357
+ """
1358
+ Sends the generated JSON payload to the TigerGraph REST++ upsert endpoint.
1359
+
1360
+ Args:
1361
+ payload: The JSON payload in TigerGraph REST++ format
1362
+ host: Base host URL (e.g., "http://localhost:9000")
1363
+ graph_name: Name of the graph
1364
+ username: Optional username for authentication
1365
+ password: Optional password for authentication
1366
+
1367
+ Returns:
1368
+ Dictionary containing the response from TigerGraph
1369
+ """
1370
+ url = f"{host}/graph/{graph_name}"
1371
+
1372
+ headers = {
1373
+ "Content-Type": "application/json",
1374
+ }
1375
+
1376
+ logger.debug(f"Attempting batch upsert to: {url}")
1377
+
1378
+ try:
1379
+ # Use HTTP Basic Auth if username and password are provided
1380
+ auth = None
1381
+ if username and password:
1382
+ auth = (username, password)
1383
+
1384
+ response = requests.post(
1385
+ url,
1386
+ headers=headers,
1387
+ data=json.dumps(payload),
1388
+ auth=auth,
1389
+ # Increase timeout for large batches
1390
+ timeout=120,
1391
+ )
1392
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
1393
+
1394
+ # TigerGraph response is a JSON object
1395
+ return response.json()
1396
+
1397
+ except requests_exceptions.HTTPError as errh:
1398
+ logger.error(f"HTTP Error: {errh}")
1399
+ error_details = ""
1400
+ try:
1401
+ error_details = response.text
1402
+ except Exception:
1403
+ pass
1404
+ return {"error": True, "message": str(errh), "details": error_details}
1405
+ except requests_exceptions.ConnectionError as errc:
1406
+ logger.error(f"Error Connecting: {errc}")
1407
+ return {"error": True, "message": str(errc)}
1408
+ except requests_exceptions.Timeout as errt:
1409
+ logger.error(f"Timeout Error: {errt}")
1410
+ return {"error": True, "message": str(errt)}
1411
+ except requests_exceptions.RequestException as err:
1412
+ logger.error(f"An unexpected error occurred: {err}")
1413
+ return {"error": True, "message": str(err)}
1414
+
1415
+ def upsert_docs_batch(self, docs, class_name, match_keys, **kwargs):
1416
+ """
1417
+ Batch upsert documents as vertices using TigerGraph REST++ API.
1418
+
1419
+ Creates a GSQL job and formats the payload for batch upsert operations.
1420
+ Uses composite Primary IDs constructed from match_keys.
1421
+ """
1422
+ dry = kwargs.pop("dry", False)
1423
+ if dry:
1424
+ logger.debug(f"Dry run: would upsert {len(docs)} documents to {class_name}")
1425
+ return
1426
+
1427
+ try:
1428
+ # Convert match_keys to tuple if it's a list
1429
+ vindex = tuple(match_keys) if isinstance(match_keys, list) else match_keys
1430
+
1431
+ # Generate the upsert payload
1432
+ payload = self._generate_upsert_payload(docs, class_name, vindex)
1433
+
1434
+ # Check if payload has any vertices
1435
+ if not payload.get("vertices", {}).get(class_name):
1436
+ logger.warning(f"No valid vertices to upsert for {class_name}")
1437
+ return
1438
+
1439
+ # Build REST++ endpoint URL
1440
+ host = f"{self.config.url_without_port}:{self.config.port}"
1441
+ graph_name = self.config.database
1442
+ if not graph_name:
1443
+ raise ValueError("Graph name (database) must be configured")
1444
+
1445
+ # Send the upsert request with username/password authentication
1446
+ result = self._upsert_data(
1447
+ payload,
1448
+ host,
1449
+ graph_name,
1450
+ username=self.config.username,
1451
+ password=self.config.password,
1452
+ )
1453
+
1454
+ if result.get("error"):
1455
+ logger.error(
1456
+ f"Error upserting vertices to {class_name}: {result.get('message')}"
1457
+ )
1458
+ # Fallback to individual operations
1459
+ self._fallback_individual_upsert(docs, class_name, match_keys)
1460
+ else:
1461
+ num_vertices = len(payload["vertices"][class_name])
1462
+ logger.debug(
1463
+ f"Upserted {num_vertices} vertices to {class_name}: {result}"
1464
+ )
1465
+ return result
1466
+
1467
+ except Exception as e:
1468
+ logger.error(f"Error upserting vertices to {class_name}: {e}")
1469
+ # Fallback to individual operations
1470
+ self._fallback_individual_upsert(docs, class_name, match_keys)
1471
+
1472
+ def _fallback_individual_upsert(self, docs, class_name, match_keys):
1473
+ """Fallback method for individual vertex upserts."""
1474
+ for doc in docs:
1475
+ try:
1476
+ vertex_id = self._extract_id(doc, match_keys)
1477
+ if vertex_id:
1478
+ clean_doc = self._clean_document(doc)
1479
+ self.conn.upsertVertex(class_name, vertex_id, clean_doc)
1480
+ except Exception as e:
1481
+ logger.error(f"Error upserting individual vertex {vertex_id}: {e}")
1482
+
1483
+ def _generate_edge_upsert_payload(
1484
+ self,
1485
+ edges_data: list[tuple[dict, dict, dict]],
1486
+ source_class: str,
1487
+ target_class: str,
1488
+ edge_type: str,
1489
+ match_keys_source: tuple[str, ...],
1490
+ match_keys_target: tuple[str, ...],
1491
+ ) -> dict[str, Any]:
1492
+ """
1493
+ Transforms edge data into the TigerGraph REST++ batch upsert JSON format.
1494
+
1495
+ Args:
1496
+ edges_data: List of tuples (source_doc, target_doc, edge_props)
1497
+ source_class: Source vertex type name
1498
+ target_class: Target vertex type name
1499
+ edge_type: Edge type/relation name
1500
+ match_keys_source: Tuple of index fields for source vertex
1501
+ match_keys_target: Tuple of index fields for target vertex
1502
+
1503
+ Returns:
1504
+ Dictionary in TigerGraph REST++ batch upsert format for edges
1505
+ """
1506
+ # Initialize the required JSON structure for edges
1507
+ payload: dict[str, Any] = {"edges": {source_class: {}}}
1508
+ source_map = payload["edges"][source_class]
1509
+
1510
+ for source_doc, target_doc, edge_props in edges_data:
1511
+ try:
1512
+ # Extract source ID (composite if needed)
1513
+ if isinstance(match_keys_source, tuple) and len(match_keys_source) > 1:
1514
+ source_id_components = [
1515
+ str(source_doc[key]) for key in match_keys_source
1516
+ ]
1517
+ source_id = "_".join(source_id_components)
1518
+ else:
1519
+ source_id = self._extract_id(source_doc, match_keys_source)
1520
+
1521
+ # Extract target ID (composite if needed)
1522
+ if isinstance(match_keys_target, tuple) and len(match_keys_target) > 1:
1523
+ target_id_components = [
1524
+ str(target_doc[key]) for key in match_keys_target
1525
+ ]
1526
+ target_id = "_".join(target_id_components)
1527
+ else:
1528
+ target_id = self._extract_id(target_doc, match_keys_target)
1529
+
1530
+ if not source_id or not target_id:
1531
+ logger.warning(
1532
+ f"Missing source_id ({source_id}) or target_id ({target_id}) for edge"
1533
+ )
1534
+ continue
1535
+
1536
+ # Initialize source vertex entry if not exists
1537
+ if source_id not in source_map:
1538
+ source_map[source_id] = {edge_type: {}}
1539
+
1540
+ # Initialize edge type entry if not exists
1541
+ if edge_type not in source_map[source_id]:
1542
+ source_map[source_id][edge_type] = {}
1543
+
1544
+ # Initialize target vertex type entry if not exists
1545
+ if target_class not in source_map[source_id][edge_type]:
1546
+ source_map[source_id][edge_type][target_class] = {}
1547
+
1548
+ # Format edge attributes for TigerGraph REST++ API
1549
+ # Clean edge properties (remove internal keys)
1550
+ clean_edge_props = self._clean_document(edge_props)
1551
+
1552
+ # Format attributes with {"value": ...} wrapper
1553
+ formatted_attributes = {
1554
+ k: {"value": v} for k, v in clean_edge_props.items()
1555
+ }
1556
+
1557
+ # Add target vertex with edge attributes under target vertex type
1558
+ source_map[source_id][edge_type][target_class][target_id] = (
1559
+ formatted_attributes
1560
+ )
1561
+
1562
+ except KeyError as e:
1563
+ logger.warning(
1564
+ f"Edge is missing a required field: {e}. Skipping edge: {source_doc}, {target_doc}"
1565
+ )
1566
+ continue
1567
+ except Exception as e:
1568
+ logger.error(f"Error processing edge: {e}")
1569
+ continue
1570
+
1571
+ return payload
1572
+
1573
+ def insert_edges_batch(
1574
+ self,
1575
+ docs_edges,
1576
+ source_class,
1577
+ target_class,
1578
+ relation_name,
1579
+ collection_name=None,
1580
+ match_keys_source=("_key",),
1581
+ match_keys_target=("_key",),
1582
+ filter_uniques=True,
1583
+ uniq_weight_fields=None,
1584
+ uniq_weight_collections=None,
1585
+ upsert_option=False,
1586
+ head=None,
1587
+ **kwargs,
1588
+ ):
1589
+ """
1590
+ Batch insert/upsert edges using TigerGraph REST++ API.
1591
+
1592
+ Handles edge data in tuple format: [(source_doc, target_doc, edge_props), ...]
1593
+ or dict format: [{"_source_aux": {...}, "_target_aux": {...}, "_edge_props": {...}}, ...]
1594
+
1595
+ Args:
1596
+ docs_edges: List of edge documents (tuples or dicts)
1597
+ source_class: Source vertex type name
1598
+ target_class: Target vertex type name
1599
+ relation_name: Edge type/relation name
1600
+ collection_name: Alternative edge collection name (used if relation_name is None)
1601
+ match_keys_source: Keys to match source vertices
1602
+ match_keys_target: Keys to match target vertices
1603
+ filter_uniques: If True, filter duplicate edges
1604
+ uniq_weight_fields: Fields to consider for uniqueness (not used in TigerGraph)
1605
+ uniq_weight_collections: Collections to consider for uniqueness (not used in TigerGraph)
1606
+ upsert_option: If True, use upsert (default behavior in TigerGraph)
1607
+ head: Optional limit on number of edges to insert
1608
+ **kwargs: Additional options:
1609
+ - dry: If True, don't execute the query
1610
+ """
1611
+ dry = kwargs.pop("dry", False)
1612
+ if dry:
1613
+ logger.debug(f"Dry run: would insert {len(docs_edges)} edges")
1614
+ return
1615
+
1616
+ # Process edges list
1617
+ if isinstance(docs_edges, list):
1618
+ if head is not None:
1619
+ docs_edges = docs_edges[:head]
1620
+ if filter_uniques:
1621
+ docs_edges = pick_unique_dict(docs_edges)
1622
+
1623
+ # Normalize edge data format - handle both tuple and dict formats
1624
+ normalized_edges = []
1625
+ for edge_item in docs_edges:
1626
+ try:
1627
+ if isinstance(edge_item, tuple) and len(edge_item) == 3:
1628
+ # Tuple format: (source_doc, target_doc, edge_props)
1629
+ source_doc, target_doc, edge_props = edge_item
1630
+ normalized_edges.append((source_doc, target_doc, edge_props))
1631
+ elif isinstance(edge_item, dict):
1632
+ # Dict format: {"_source_aux": {...}, "_target_aux": {...}, "_edge_props": {...}}
1633
+ source_doc = edge_item.get("_source_aux", {})
1634
+ target_doc = edge_item.get("_target_aux", {})
1635
+ edge_props = edge_item.get("_edge_props", {})
1636
+ normalized_edges.append((source_doc, target_doc, edge_props))
1637
+ else:
1638
+ logger.warning(f"Unexpected edge format: {edge_item}")
1639
+ except Exception as e:
1640
+ logger.error(f"Error normalizing edge item: {e}")
1641
+ continue
1642
+
1643
+ if not normalized_edges:
1644
+ logger.warning("No valid edges to insert")
1645
+ return
1646
+
1647
+ try:
1648
+ # Convert match_keys to tuples if they're lists
1649
+ match_keys_src = (
1650
+ tuple(match_keys_source)
1651
+ if isinstance(match_keys_source, list)
1652
+ else match_keys_source
1653
+ )
1654
+ match_keys_tgt = (
1655
+ tuple(match_keys_target)
1656
+ if isinstance(match_keys_target, list)
1657
+ else match_keys_target
1658
+ )
1659
+
1660
+ edge_type = relation_name or collection_name
1661
+ if not edge_type:
1662
+ logger.error(
1663
+ "Edge type must be specified via relation_name or collection_name"
1664
+ )
1665
+ return
1666
+
1667
+ # Generate the edge upsert payload
1668
+ payload = self._generate_edge_upsert_payload(
1669
+ normalized_edges,
1670
+ source_class,
1671
+ target_class,
1672
+ edge_type,
1673
+ match_keys_src,
1674
+ match_keys_tgt,
1675
+ )
1676
+
1677
+ # Check if payload has any edges
1678
+ source_vertices = payload.get("edges", {}).get(source_class, {})
1679
+ if not source_vertices:
1680
+ logger.warning(f"No valid edges to upsert for edge type {edge_type}")
1681
+ return
1682
+
1683
+ # Build REST++ endpoint URL
1684
+ host = f"{self.config.url_without_port}:{self.config.port}"
1685
+ graph_name = self.config.database
1686
+ if not graph_name:
1687
+ raise ValueError("Graph name (database) must be configured")
1688
+
1689
+ # Send the upsert request with username/password authentication
1690
+ result = self._upsert_data(
1691
+ payload,
1692
+ host,
1693
+ graph_name,
1694
+ username=self.config.username,
1695
+ password=self.config.password,
1696
+ )
1697
+
1698
+ if result.get("error"):
1699
+ logger.error(
1700
+ f"Error upserting edges of type {edge_type}: {result.get('message')}"
1701
+ )
1702
+ else:
1703
+ # Count edges in payload
1704
+ edge_count = 0
1705
+ for source_edges in source_vertices.values():
1706
+ if edge_type in source_edges:
1707
+ if target_class in source_edges[edge_type]:
1708
+ edge_count += len(source_edges[edge_type][target_class])
1709
+ logger.debug(
1710
+ f"Upserted {edge_count} edges of type {edge_type}: {result}"
1711
+ )
1712
+ return result
1713
+
1714
+ except Exception as e:
1715
+ logger.error(f"Error batch inserting edges: {e}")
1716
+
1717
+ def _extract_id(self, doc, match_keys):
1718
+ """
1719
+ Extract vertex ID from document based on match keys.
1720
+ """
1721
+ if not doc:
1722
+ return None
1723
+
1724
+ # Try _key first (common in ArangoDB style docs)
1725
+ if "_key" in doc and doc["_key"]:
1726
+ return str(doc["_key"])
1727
+
1728
+ # Try other match keys
1729
+ for key in match_keys:
1730
+ if key in doc and doc[key] is not None:
1731
+ return str(doc[key])
1732
+
1733
+ # Fallback: create composite ID
1734
+ id_parts = []
1735
+ for key in match_keys:
1736
+ if key in doc and doc[key] is not None:
1737
+ id_parts.append(str(doc[key]))
1738
+
1739
+ return "_".join(id_parts) if id_parts else None
1740
+
1741
+ def insert_return_batch(self, docs, class_name):
1742
+ """
1743
+ TigerGraph doesn't have INSERT...RETURN semantics like ArangoDB.
1744
+ """
1745
+ raise NotImplementedError(
1746
+ "insert_return_batch not supported in TigerGraph - use upsert_docs_batch instead"
1747
+ )
1748
+
1749
+ def _render_rest_filter(
1750
+ self,
1751
+ filters: list | dict | Clause | None,
1752
+ field_types: dict[str, FieldType] | None = None,
1753
+ ) -> str:
1754
+ """Convert filter expressions to REST++ filter format.
1755
+
1756
+ REST++ filter format: "field=value" or "field>value" etc.
1757
+ Format: fieldoperatorvalue (no spaces, quotes for string values)
1758
+ Example: "hindex=10" or "hindex>20" or 'name="John"'
1759
+
1760
+ Args:
1761
+ filters: Filter expression to convert
1762
+ field_types: Optional mapping of field names to FieldType enum values
1763
+
1764
+ Returns:
1765
+ str: REST++ filter string (empty if no filters)
1766
+ """
1767
+ if filters is not None:
1768
+ if not isinstance(filters, Clause):
1769
+ ff = Expression.from_dict(filters)
1770
+ else:
1771
+ ff = filters
1772
+
1773
+ # Use ExpressionFlavor.TIGERGRAPH with empty doc_name to trigger REST++ format
1774
+ # Pass field_types to help with proper value quoting
1775
+ filter_str = ff(
1776
+ doc_name="",
1777
+ kind=ExpressionFlavor.TIGERGRAPH,
1778
+ field_types=field_types,
1779
+ )
1780
+ return filter_str
1781
+ else:
1782
+ return ""
1783
+
1784
+ def _get_field_types_for_vertex(
1785
+ self, vertex_name: str, vertex_config: None | VertexConfig = None
1786
+ ) -> dict[str, FieldType] | None:
1787
+ """Get field types for a vertex from vertex config.
1788
+
1789
+ Args:
1790
+ vertex_name: Name of the vertex type (or dbname)
1791
+ vertex_config: Vertex configuration to use for lookup
1792
+
1793
+ Returns:
1794
+ dict[str, FieldType]: Mapping of field names to their FieldType enum values, or None if not available
1795
+ """
1796
+ if vertex_config is None:
1797
+ return None
1798
+
1799
+ try:
1800
+ # Get fields with TigerGraph defaults applied
1801
+ fields = vertex_config.fields(
1802
+ vertex_name,
1803
+ with_aux=False,
1804
+ as_names=False,
1805
+ db_flavor=DBFlavor.TIGERGRAPH,
1806
+ )
1807
+ # Build field_types dict: {field_name: FieldType}
1808
+ field_types = {}
1809
+ for field in fields:
1810
+ if field.type:
1811
+ # Convert string type to FieldType enum
1812
+ field_types[field.name] = FieldType(field.type)
1813
+ return field_types if field_types else None
1814
+ except (KeyError, ValueError):
1815
+ # Vertex not found in config
1816
+ return None
1817
+
1818
+ def fetch_docs(
1819
+ self,
1820
+ class_name,
1821
+ filters: list | dict | Clause | None = None,
1822
+ limit: int | None = None,
1823
+ return_keys: list | None = None,
1824
+ unset_keys: list | None = None,
1825
+ **kwargs,
1826
+ ):
1827
+ """
1828
+ Fetch documents (vertices) with filtering and projection using REST++ API.
1829
+
1830
+ Args:
1831
+ class_name: Vertex type name (or dbname)
1832
+ filters: Filter expression (list, dict, or Clause)
1833
+ limit: Maximum number of documents to return
1834
+ return_keys: Keys to return (projection)
1835
+ unset_keys: Keys to exclude (projection)
1836
+ **kwargs: Additional parameters
1837
+ field_types: Optional mapping of field names to FieldType enum values
1838
+ Used to properly quote string values in filters
1839
+ If not provided and vertex_config is provided, will be auto-detected
1840
+ vertex_config: Optional VertexConfig object to use for field type lookup
1841
+
1842
+ Returns:
1843
+ list: List of fetched documents
1844
+ """
1845
+ try:
1846
+ graph_name = self.config.database
1847
+ if not graph_name:
1848
+ raise ValueError("Graph name (database) must be configured")
1849
+
1850
+ # Get field_types from kwargs or auto-detect from vertex_config
1851
+ field_types = kwargs.get("field_types")
1852
+ if field_types is None:
1853
+ vertex_config = kwargs.get("vertex_config")
1854
+ field_types = self._get_field_types_for_vertex(
1855
+ class_name, vertex_config
1856
+ )
1857
+
1858
+ # Build REST++ filter string with field type information
1859
+ filter_str = self._render_rest_filter(filters, field_types=field_types)
1860
+
1861
+ # Build REST++ API endpoint with query parameters manually
1862
+ # Format: /graph/{graph_name}/vertices/{vertex_type}?filter=...&limit=...
1863
+ # Example: /graph/g22c97325/vertices/Author?filter=hindex>20&limit=10
1864
+ from urllib.parse import quote
1865
+
1866
+ endpoint = f"/graph/{graph_name}/vertices/{class_name}"
1867
+ query_parts = []
1868
+
1869
+ if filter_str:
1870
+ # URL-encode the filter string to handle special characters
1871
+ encoded_filter = quote(filter_str, safe="=<>!&|")
1872
+ query_parts.append(f"filter={encoded_filter}")
1873
+ if limit is not None:
1874
+ query_parts.append(f"limit={limit}")
1875
+
1876
+ if query_parts:
1877
+ endpoint = f"{endpoint}?{'&'.join(query_parts)}"
1878
+
1879
+ logger.debug(f"Calling REST++ API: {endpoint}")
1880
+
1881
+ # Call REST++ API directly (no params dict, we built the URL ourselves)
1882
+ response = self._call_restpp_api(endpoint)
1883
+
1884
+ # Parse REST++ response (vertices only)
1885
+ result: list[dict[str, Any]] = self._parse_restpp_response(
1886
+ response, is_edge=False
1887
+ )
1888
+
1889
+ # Check for errors
1890
+ if isinstance(response, dict) and response.get("error"):
1891
+ raise Exception(
1892
+ f"REST++ API error: {response.get('message', response)}"
1893
+ )
1894
+
1895
+ # Apply projection (client-side projection is acceptable for result formatting)
1896
+ if return_keys is not None:
1897
+ result = [
1898
+ {k: doc.get(k) for k in return_keys if k in doc}
1899
+ for doc in result
1900
+ if isinstance(doc, dict)
1901
+ ]
1902
+ elif unset_keys is not None:
1903
+ result = [
1904
+ {k: v for k, v in doc.items() if k not in unset_keys}
1905
+ for doc in result
1906
+ if isinstance(doc, dict)
1907
+ ]
1908
+
1909
+ return result
1910
+
1911
+ except Exception as e:
1912
+ logger.error(f"Error fetching documents from {class_name} via REST++: {e}")
1913
+ raise
1914
+
1915
+ def fetch_edges(
1916
+ self,
1917
+ from_type: str,
1918
+ from_id: str,
1919
+ edge_type: str | None = None,
1920
+ to_type: str | None = None,
1921
+ to_id: str | None = None,
1922
+ filters: list | dict | Clause | None = None,
1923
+ limit: int | None = None,
1924
+ return_keys: list | None = None,
1925
+ unset_keys: list | None = None,
1926
+ **kwargs,
1927
+ ):
1928
+ """
1929
+ Fetch edges from TigerGraph using pyTigerGraph's getEdges method.
1930
+
1931
+ In TigerGraph, you must know at least one vertex ID before you can fetch edges.
1932
+ Uses pyTigerGraph's getEdges method which handles special characters in vertex IDs.
1933
+
1934
+ Args:
1935
+ from_type: Source vertex type (required)
1936
+ from_id: Source vertex ID (required)
1937
+ edge_type: Optional edge type to filter by
1938
+ to_type: Optional target vertex type to filter by (not used in pyTigerGraph)
1939
+ to_id: Optional target vertex ID to filter by (not used in pyTigerGraph)
1940
+ filters: Additional query filters (not supported by pyTigerGraph getEdges)
1941
+ limit: Maximum number of edges to return (not supported by pyTigerGraph getEdges)
1942
+ return_keys: Keys to return (projection)
1943
+ unset_keys: Keys to exclude (projection)
1944
+ **kwargs: Additional parameters
1945
+
1946
+ Returns:
1947
+ list: List of fetched edges
1948
+ """
1949
+ try:
1950
+ if not from_type or not from_id:
1951
+ raise ValueError(
1952
+ "from_type and from_id are required for fetching edges in TigerGraph"
1953
+ )
1954
+
1955
+ # Use pyTigerGraph's getEdges method
1956
+ # Signature: getEdges(sourceVertexType, sourceVertexId, edgeType=None)
1957
+ # Returns: list of edge dictionaries
1958
+ logger.debug(
1959
+ f"Fetching edges using pyTigerGraph: from_type={from_type}, from_id={from_id}, edge_type={edge_type}"
1960
+ )
1961
+
1962
+ # Handle None edge_type by passing empty string (default behavior)
1963
+ edge_type_str = edge_type if edge_type is not None else ""
1964
+ edges = self.conn.getEdges(from_type, from_id, edge_type_str, fmt="py")
1965
+
1966
+ # Parse pyTigerGraph response format
1967
+ # getEdges returns list of dicts with format like:
1968
+ # [{"e_type": "...", "from": {...}, "to": {...}, "attributes": {...}}, ...]
1969
+ # Type annotation: result is list[dict[str, Any]]
1970
+ # getEdges can return dict, str, or DataFrame, but with fmt="py" it returns dict
1971
+ if isinstance(edges, list):
1972
+ # Type narrowing: after isinstance check, we know it's a list
1973
+ # Use cast to help type checker understand the elements are dicts
1974
+ result = cast(list[dict[str, Any]], edges)
1975
+ elif isinstance(edges, dict):
1976
+ # If it's a single dict, wrap it in a list
1977
+ result = [cast(dict[str, Any], edges)]
1978
+ else:
1979
+ # Fallback for unexpected types
1980
+ result: list[dict[str, Any]] = []
1981
+
1982
+ # Apply limit if specified (client-side since pyTigerGraph doesn't support it)
1983
+ if limit is not None and limit > 0:
1984
+ result = result[:limit]
1985
+
1986
+ # Apply projection (client-side projection is acceptable for result formatting)
1987
+ if return_keys is not None:
1988
+ result = [
1989
+ {k: doc.get(k) for k in return_keys if k in doc}
1990
+ for doc in result
1991
+ if isinstance(doc, dict)
1992
+ ]
1993
+ elif unset_keys is not None:
1994
+ result = [
1995
+ {k: v for k, v in doc.items() if k not in unset_keys}
1996
+ for doc in result
1997
+ if isinstance(doc, dict)
1998
+ ]
1999
+
2000
+ return result
2001
+
2002
+ except Exception as e:
2003
+ logger.error(f"Error fetching edges via pyTigerGraph: {e}")
2004
+ raise
2005
+
2006
+ def _parse_restpp_response(
2007
+ self, response: dict | list, is_edge: bool = False
2008
+ ) -> list[dict]:
2009
+ """Parse REST++ API response into list of documents.
2010
+
2011
+ Args:
2012
+ response: REST++ API response (dict or list)
2013
+ is_edge: Whether this is an edge response (default: False for vertices)
2014
+
2015
+ Returns:
2016
+ list: List of parsed documents
2017
+ """
2018
+ result = []
2019
+ if isinstance(response, dict):
2020
+ if "results" in response:
2021
+ for data in response["results"]:
2022
+ if is_edge:
2023
+ # Edge response format: {"e_type": "...", "from_id": "...", "to_id": "...", "attributes": {...}}
2024
+ edge_type = data.get("e_type", "")
2025
+ from_id = data.get("from_id", data.get("from", ""))
2026
+ to_id = data.get("to_id", data.get("to", ""))
2027
+ attributes = data.get("attributes", {})
2028
+ doc = {
2029
+ **attributes,
2030
+ "edge_type": edge_type,
2031
+ "from_id": from_id,
2032
+ "to_id": to_id,
2033
+ }
2034
+ else:
2035
+ # Vertex response format: {"v_id": "...", "attributes": {...}}
2036
+ vertex_id = data.get("v_id", data.get("id"))
2037
+ attributes = data.get("attributes", {})
2038
+ doc = {**attributes, "id": vertex_id}
2039
+ result.append(doc)
2040
+ elif isinstance(response, list):
2041
+ # Direct list response
2042
+ for data in response:
2043
+ if isinstance(data, dict):
2044
+ if is_edge:
2045
+ edge_type = data.get("e_type", "")
2046
+ from_id = data.get("from_id", data.get("from", ""))
2047
+ to_id = data.get("to_id", data.get("to", ""))
2048
+ attributes = data.get("attributes", data)
2049
+ doc = {
2050
+ **attributes,
2051
+ "edge_type": edge_type,
2052
+ "from_id": from_id,
2053
+ "to_id": to_id,
2054
+ }
2055
+ else:
2056
+ vertex_id = data.get("v_id", data.get("id"))
2057
+ attributes = data.get("attributes", data)
2058
+ doc = {**attributes, "id": vertex_id}
2059
+ result.append(doc)
2060
+ return result
2061
+
2062
+ def fetch_present_documents(
2063
+ self,
2064
+ batch,
2065
+ class_name,
2066
+ match_keys,
2067
+ keep_keys,
2068
+ flatten=False,
2069
+ filters: list | dict | None = None,
2070
+ ):
2071
+ """
2072
+ Check which documents from batch are present in the database.
2073
+ """
2074
+ try:
2075
+ present_docs = {}
2076
+
2077
+ for i, doc in enumerate(batch):
2078
+ vertex_id = self._extract_id(doc, match_keys)
2079
+ if not vertex_id:
2080
+ continue
2081
+
2082
+ try:
2083
+ vertex_data = self.conn.getVerticesById(class_name, vertex_id)
2084
+ if vertex_data and vertex_id in vertex_data:
2085
+ # Extract requested keys
2086
+ vertex_attrs = vertex_data[vertex_id].get("attributes", {})
2087
+ filtered_doc = {}
2088
+
2089
+ for key in keep_keys:
2090
+ if key == "id":
2091
+ filtered_doc[key] = vertex_id
2092
+ elif key in vertex_attrs:
2093
+ filtered_doc[key] = vertex_attrs[key]
2094
+
2095
+ present_docs[i] = [filtered_doc]
2096
+
2097
+ except Exception:
2098
+ # Vertex doesn't exist or error occurred
2099
+ continue
2100
+
2101
+ return present_docs
2102
+
2103
+ except Exception as e:
2104
+ logger.error(f"Error fetching present documents: {e}")
2105
+ return {}
2106
+
2107
+ def aggregate(
2108
+ self,
2109
+ class_name,
2110
+ aggregation_function: AggregationType,
2111
+ discriminant: str | None = None,
2112
+ aggregated_field: str | None = None,
2113
+ filters: list | dict | None = None,
2114
+ ):
2115
+ """
2116
+ Perform aggregation operations.
2117
+ """
2118
+ try:
2119
+ if aggregation_function == AggregationType.COUNT and discriminant is None:
2120
+ # Simple vertex count
2121
+ count = self.conn.getVertexCount(class_name)
2122
+ return [{"_value": count}]
2123
+ else:
2124
+ # Complex aggregations require custom GSQL queries
2125
+ logger.warning(
2126
+ f"Complex aggregation {aggregation_function} requires custom GSQL implementation"
2127
+ )
2128
+ return []
2129
+ except Exception as e:
2130
+ logger.error(f"Error in aggregation: {e}")
2131
+ return []
2132
+
2133
+ def keep_absent_documents(
2134
+ self,
2135
+ batch,
2136
+ class_name,
2137
+ match_keys,
2138
+ keep_keys,
2139
+ filters: list | dict | None = None,
2140
+ ):
2141
+ """
2142
+ Return documents from batch that are NOT present in database.
2143
+ """
2144
+ present_docs_indices = self.fetch_present_documents(
2145
+ batch=batch,
2146
+ class_name=class_name,
2147
+ match_keys=match_keys,
2148
+ keep_keys=keep_keys,
2149
+ flatten=False,
2150
+ filters=filters,
2151
+ )
2152
+
2153
+ absent_indices = sorted(
2154
+ set(range(len(batch))) - set(present_docs_indices.keys())
2155
+ )
2156
+ return [batch[i] for i in absent_indices]
2157
+
2158
+ def define_indexes(self, schema: Schema):
2159
+ """Define all indexes from schema."""
2160
+ try:
2161
+ self.define_vertex_indices(schema.vertex_config)
2162
+ self.define_edge_indices(schema.edge_config.edges_list(include_aux=True))
2163
+ except Exception as e:
2164
+ logger.error(f"Error defining indexes: {e}")
2165
+
2166
+ def fetch_indexes(self, vertex_type: str | None = None):
2167
+ """
2168
+ Fetch indexes for vertex types using GSQL.
2169
+
2170
+ In TigerGraph, indexes are associated with vertex types.
2171
+ Use DESCRIBE VERTEX to get index information.
2172
+
2173
+ Args:
2174
+ vertex_type: Optional vertex type name to fetch indexes for.
2175
+ If None, fetches indexes for all vertex types.
2176
+
2177
+ Returns:
2178
+ dict: Mapping of vertex type names to their indexes.
2179
+ Format: {vertex_type: [{"name": "index_name", "fields": ["field1", ...]}, ...]}
2180
+ """
2181
+ try:
2182
+ with self._ensure_graph_context():
2183
+ result = {}
2184
+
2185
+ if vertex_type:
2186
+ vertex_types = [vertex_type]
2187
+ else:
2188
+ vertex_types = self.conn.getVertexTypes(force=True)
2189
+
2190
+ for v_type in vertex_types:
2191
+ try:
2192
+ # Parse indexes from the describe output
2193
+ indexes = []
2194
+ try:
2195
+ indexes.append(
2196
+ {"name": "stat_index", "source": "show_stat"}
2197
+ )
2198
+ except Exception:
2199
+ # If SHOW STAT INDEX doesn't work, try alternative methods
2200
+ pass
2201
+
2202
+ result[v_type] = indexes
2203
+ except Exception as e:
2204
+ logger.debug(
2205
+ f"Could not fetch indexes for vertex type {v_type}: {e}"
2206
+ )
2207
+ result[v_type] = []
2208
+
2209
+ return result
2210
+ except Exception as e:
2211
+ logger.error(f"Error fetching indexes: {e}")
2212
+ return {}