robosystems-client 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of robosystems-client might be problematic. Click here for more details.

Files changed (104) hide show
  1. robosystems_client/api/agent/auto_select_agent.py +1 -41
  2. robosystems_client/api/agent/batch_process_queries.py +1 -41
  3. robosystems_client/api/agent/execute_specific_agent.py +1 -41
  4. robosystems_client/api/agent/get_agent_metadata.py +5 -49
  5. robosystems_client/api/agent/list_agents.py +4 -42
  6. robosystems_client/api/agent/recommend_agent.py +5 -45
  7. robosystems_client/api/auth/generate_sso_token.py +0 -18
  8. robosystems_client/api/auth/get_current_auth_user.py +14 -74
  9. robosystems_client/api/auth/logout_user.py +14 -50
  10. robosystems_client/api/auth/refresh_auth_session.py +14 -50
  11. robosystems_client/api/auth/resend_verification_email.py +14 -74
  12. robosystems_client/api/backup/create_backup.py +5 -45
  13. robosystems_client/api/backup/get_backup_download_url.py +4 -42
  14. robosystems_client/api/backup/get_backup_stats.py +5 -49
  15. robosystems_client/api/backup/list_backups.py +4 -42
  16. robosystems_client/api/backup/restore_backup.py +5 -45
  17. robosystems_client/api/connections/create_connection.py +5 -45
  18. robosystems_client/api/connections/create_link_token.py +5 -45
  19. robosystems_client/api/connections/delete_connection.py +5 -49
  20. robosystems_client/api/connections/exchange_link_token.py +5 -45
  21. robosystems_client/api/connections/get_connection.py +5 -49
  22. robosystems_client/api/connections/get_connection_options.py +5 -49
  23. robosystems_client/api/connections/init_o_auth.py +5 -45
  24. robosystems_client/api/connections/list_connections.py +4 -42
  25. robosystems_client/api/connections/oauth_callback.py +5 -45
  26. robosystems_client/api/connections/sync_connection.py +5 -45
  27. robosystems_client/api/graph_analytics/get_graph_metrics.py +5 -49
  28. robosystems_client/api/graph_analytics/get_graph_usage_stats.py +4 -42
  29. robosystems_client/api/graph_billing/get_current_graph_bill.py +5 -49
  30. robosystems_client/api/graph_billing/get_graph_billing_history.py +4 -42
  31. robosystems_client/api/graph_billing/get_graph_monthly_bill.py +5 -49
  32. robosystems_client/api/graph_billing/get_graph_usage_details.py +4 -42
  33. robosystems_client/api/graph_credits/check_credit_balance.py +0 -38
  34. robosystems_client/api/graph_credits/check_storage_limits.py +1 -45
  35. robosystems_client/api/graph_credits/get_credit_summary.py +1 -45
  36. robosystems_client/api/graph_credits/get_storage_usage.py +0 -38
  37. robosystems_client/api/graph_credits/list_credit_transactions.py +4 -42
  38. robosystems_client/api/graph_health/get_database_health.py +5 -49
  39. robosystems_client/api/graph_info/get_database_info.py +5 -49
  40. robosystems_client/api/graph_limits/get_graph_limits.py +5 -49
  41. robosystems_client/api/graphs/create_graph.py +21 -57
  42. robosystems_client/api/graphs/get_available_extensions.py +131 -15
  43. robosystems_client/api/graphs/get_graphs.py +154 -79
  44. robosystems_client/api/graphs/select_graph.py +117 -49
  45. robosystems_client/api/mcp/call_mcp_tool.py +24 -47
  46. robosystems_client/api/mcp/list_mcp_tools.py +13 -61
  47. robosystems_client/api/operations/cancel_operation.py +1 -45
  48. robosystems_client/api/operations/get_operation_status.py +1 -45
  49. robosystems_client/api/query/execute_cypher_query.py +69 -53
  50. robosystems_client/api/schema/export_graph_schema.py +223 -65
  51. robosystems_client/api/schema/get_graph_schema.py +137 -79
  52. robosystems_client/api/schema/validate_schema.py +5 -45
  53. robosystems_client/api/subgraphs/create_subgraph.py +5 -45
  54. robosystems_client/api/subgraphs/delete_subgraph.py +5 -45
  55. robosystems_client/api/subgraphs/get_subgraph_info.py +5 -49
  56. robosystems_client/api/subgraphs/get_subgraph_quota.py +5 -49
  57. robosystems_client/api/subgraphs/list_subgraphs.py +5 -49
  58. robosystems_client/api/tables/delete_file.py +181 -301
  59. robosystems_client/api/tables/get_file_info.py +117 -265
  60. robosystems_client/api/tables/get_upload_url.py +193 -389
  61. robosystems_client/api/tables/ingest_tables.py +277 -465
  62. robosystems_client/api/tables/list_table_files.py +193 -373
  63. robosystems_client/api/tables/list_tables.py +189 -361
  64. robosystems_client/api/tables/query_tables.py +85 -141
  65. robosystems_client/api/tables/update_file_status.py +205 -349
  66. robosystems_client/api/user/create_user_api_key.py +1 -41
  67. robosystems_client/api/user/get_all_credit_summaries.py +14 -111
  68. robosystems_client/api/user/get_current_user.py +14 -75
  69. robosystems_client/api/user/list_user_api_keys.py +14 -75
  70. robosystems_client/api/user/revoke_user_api_key.py +1 -45
  71. robosystems_client/api/user/update_user.py +1 -41
  72. robosystems_client/api/user/update_user_api_key.py +1 -41
  73. robosystems_client/api/user/update_user_password.py +1 -41
  74. robosystems_client/api/user_analytics/get_detailed_user_analytics.py +0 -38
  75. robosystems_client/api/user_analytics/get_user_usage_overview.py +14 -75
  76. robosystems_client/api/user_limits/get_all_shared_repository_limits.py +14 -105
  77. robosystems_client/api/user_limits/get_shared_repository_limits.py +1 -45
  78. robosystems_client/api/user_limits/get_user_limits.py +14 -75
  79. robosystems_client/api/user_limits/get_user_usage.py +14 -75
  80. robosystems_client/api/user_subscriptions/cancel_shared_repository_subscription.py +1 -45
  81. robosystems_client/api/user_subscriptions/get_repository_credits.py +1 -45
  82. robosystems_client/api/user_subscriptions/get_shared_repository_credits.py +14 -75
  83. robosystems_client/api/user_subscriptions/get_user_shared_subscriptions.py +0 -38
  84. robosystems_client/api/user_subscriptions/subscribe_to_shared_repository.py +1 -41
  85. robosystems_client/api/user_subscriptions/upgrade_shared_repository_subscription.py +1 -41
  86. robosystems_client/extensions/__init__.py +8 -1
  87. robosystems_client/extensions/auth_integration.py +1 -2
  88. robosystems_client/extensions/query_client.py +3 -2
  89. robosystems_client/extensions/sse_client.py +1 -1
  90. robosystems_client/extensions/table_ingest_client.py +5 -0
  91. robosystems_client/extensions/utils.py +2 -2
  92. robosystems_client/models/__init__.py +4 -4
  93. robosystems_client/models/auth_response.py +40 -0
  94. robosystems_client/models/create_graph_request.py +4 -3
  95. robosystems_client/models/cypher_query_request.py +5 -22
  96. robosystems_client/models/schema_export_response.py +4 -2
  97. robosystems_client/models/schema_info_response.py +77 -0
  98. robosystems_client/models/{get_graph_schema_response_getgraphschema.py → schema_info_response_schema.py} +6 -6
  99. robosystems_client/models/schema_validation_response.py +7 -6
  100. robosystems_client/models/table_query_request.py +37 -2
  101. {robosystems_client-0.2.3.dist-info → robosystems_client-0.2.5.dist-info}/METADATA +2 -4
  102. {robosystems_client-0.2.3.dist-info → robosystems_client-0.2.5.dist-info}/RECORD +104 -103
  103. {robosystems_client-0.2.3.dist-info → robosystems_client-0.2.5.dist-info}/WHEEL +0 -0
  104. {robosystems_client-0.2.3.dist-info → robosystems_client-0.2.5.dist-info}/licenses/LICENSE +0 -0
@@ -9,35 +9,19 @@ from ...models.bulk_ingest_request import BulkIngestRequest
9
9
  from ...models.bulk_ingest_response import BulkIngestResponse
10
10
  from ...models.error_response import ErrorResponse
11
11
  from ...models.http_validation_error import HTTPValidationError
12
- from ...types import UNSET, Response, Unset
12
+ from ...types import Response
13
13
 
14
14
 
15
15
  def _get_kwargs(
16
16
  graph_id: str,
17
17
  *,
18
18
  body: BulkIngestRequest,
19
- token: Union[None, Unset, str] = UNSET,
20
- authorization: Union[None, Unset, str] = UNSET,
21
19
  ) -> dict[str, Any]:
22
20
  headers: dict[str, Any] = {}
23
- if not isinstance(authorization, Unset):
24
- headers["authorization"] = authorization
25
-
26
- params: dict[str, Any] = {}
27
-
28
- json_token: Union[None, Unset, str]
29
- if isinstance(token, Unset):
30
- json_token = UNSET
31
- else:
32
- json_token = token
33
- params["token"] = json_token
34
-
35
- params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
36
21
 
37
22
  _kwargs: dict[str, Any] = {
38
23
  "method": "post",
39
24
  "url": f"/v1/graphs/{graph_id}/tables/ingest",
40
- "params": params,
41
25
  }
42
26
 
43
27
  _kwargs["json"] = body.to_dict()
@@ -107,123 +91,80 @@ def sync_detailed(
107
91
  *,
108
92
  client: AuthenticatedClient,
109
93
  body: BulkIngestRequest,
110
- token: Union[None, Unset, str] = UNSET,
111
- authorization: Union[None, Unset, str] = UNSET,
112
94
  ) -> Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
113
- r""" Ingest Tables to Graph
114
-
115
- Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
116
-
117
- **Purpose:**
118
- Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
119
- Processes all tables in a single bulk operation with comprehensive error handling and metrics.
120
-
121
- **Use Cases:**
122
- - Initial graph population from uploaded data
123
- - Incremental data updates with new files
124
- - Complete database rebuild from source files
125
- - Recovery from failed ingestion attempts
126
-
127
- **Workflow:**
128
- 1. Upload data files via `POST /tables/{table_name}/files`
129
- 2. Files are validated and marked as 'uploaded'
130
- 3. Trigger ingestion: `POST /tables/ingest`
131
- 4. DuckDB staging tables created from S3 patterns
132
- 5. Data copied row-by-row from DuckDB to Kuzu
133
- 6. Per-table results and metrics returned
134
-
135
- **Rebuild Feature:**
136
- Setting `rebuild=true` regenerates the entire graph database from scratch:
137
- - Deletes existing Kuzu database
138
- - Recreates with fresh schema from active GraphSchema
139
- - Ingests all data files
140
- - Safe operation - S3 is source of truth
141
- - Useful for schema changes or data corrections
142
- - Graph marked as 'rebuilding' during process
143
-
144
- **Error Handling:**
145
- - Per-table error isolation with `ignore_errors` flag
146
- - Partial success support (some tables succeed, some fail)
147
- - Detailed error reporting per table
148
- - Graph status tracking throughout process
149
- - Automatic failure recovery and cleanup
150
-
151
- **Performance:**
152
- - Processes all tables in sequence
153
- - Each table timed independently
154
- - Total execution metrics provided
155
- - Scales to thousands of files
156
- - Optimized for large datasets
157
-
158
- **Example Request:**
159
- ```bash
160
- curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
161
- -H \"Authorization: Bearer YOUR_TOKEN\" \
162
- -H \"Content-Type: application/json\" \
163
- -d '{
164
- \"ignore_errors\": true,
165
- \"rebuild\": false
166
- }'
167
- ```
168
-
169
- **Example Response:**
170
- ```json
171
- {
172
- \"status\": \"success\",
173
- \"graph_id\": \"kg123\",
174
- \"total_tables\": 5,
175
- \"successful_tables\": 5,
176
- \"failed_tables\": 0,
177
- \"skipped_tables\": 0,
178
- \"total_rows_ingested\": 25000,
179
- \"total_execution_time_ms\": 15420.5,
180
- \"results\": [
181
- {
182
- \"table_name\": \"Entity\",
183
- \"status\": \"success\",
184
- \"rows_ingested\": 5000,
185
- \"execution_time_ms\": 3200.1,
186
- \"error\": null
187
- }
188
- ]
189
- }
190
- ```
191
-
192
- **Concurrency Control:**
193
- Only one ingestion can run per graph at a time. If another ingestion is in progress,
194
- you'll receive a 409 Conflict error. The distributed lock automatically expires after
195
- the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
196
-
197
- **Tips:**
198
- - Only files with 'uploaded' status are processed
199
- - Tables with no uploaded files are skipped
200
- - Use `ignore_errors=false` for strict validation
201
- - Monitor progress via per-table results
202
- - Check graph metadata for rebuild status
203
- - Wait for current ingestion to complete before starting another
204
-
205
- **Note:**
206
- Table ingestion is included - no credit consumption.
207
-
208
- Args:
209
- graph_id (str): Graph database identifier
210
- token (Union[None, Unset, str]): JWT token for SSE authentication
211
- authorization (Union[None, Unset, str]):
212
- body (BulkIngestRequest):
213
-
214
- Raises:
215
- errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
216
- httpx.TimeoutException: If the request takes longer than Client.timeout.
217
-
218
- Returns:
219
- Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
220
- """
95
+ """Ingest Tables to Graph
96
+
97
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
98
+
99
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
100
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
101
+
102
+ **Use Cases:**
103
+ - Initial graph population from uploaded data
104
+ - Incremental data updates with new files
105
+ - Complete database rebuild from source files
106
+ - Recovery from failed ingestion attempts
107
+
108
+ **Workflow:**
109
+ 1. Upload data files via `POST /tables/{table_name}/files`
110
+ 2. Files are validated and marked as 'uploaded'
111
+ 3. Trigger ingestion: `POST /tables/ingest`
112
+ 4. DuckDB staging tables created from S3 patterns
113
+ 5. Data copied row-by-row from DuckDB to Kuzu
114
+ 6. Per-table results and metrics returned
115
+
116
+ **Rebuild Feature:**
117
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
118
+ - Deletes existing Kuzu database
119
+ - Recreates with fresh schema from active GraphSchema
120
+ - Ingests all data files
121
+ - Safe operation - S3 is source of truth
122
+ - Useful for schema changes or data corrections
123
+ - Graph marked as 'rebuilding' during process
124
+
125
+ **Error Handling:**
126
+ - Per-table error isolation with `ignore_errors` flag
127
+ - Partial success support (some tables succeed, some fail)
128
+ - Detailed error reporting per table
129
+ - Graph status tracking throughout process
130
+ - Automatic failure recovery and cleanup
131
+
132
+ **Performance:**
133
+ - Processes all tables in sequence
134
+ - Each table timed independently
135
+ - Total execution metrics provided
136
+ - Scales to thousands of files
137
+ - Optimized for large datasets
138
+
139
+ **Concurrency Control:**
140
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
141
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
142
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
143
+
144
+ **Important Notes:**
145
+ - Only files with 'uploaded' status are processed
146
+ - Tables with no uploaded files are skipped
147
+ - Use `ignore_errors=false` for strict validation
148
+ - Monitor progress via per-table results
149
+ - Check graph metadata for rebuild status
150
+ - Wait for current ingestion to complete before starting another
151
+ - Table ingestion is included - no credit consumption
152
+
153
+ Args:
154
+ graph_id (str):
155
+ body (BulkIngestRequest):
156
+
157
+ Raises:
158
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
159
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
160
+
161
+ Returns:
162
+ Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
163
+ """
221
164
 
222
165
  kwargs = _get_kwargs(
223
166
  graph_id=graph_id,
224
167
  body=body,
225
- token=token,
226
- authorization=authorization,
227
168
  )
228
169
 
229
170
  response = client.get_httpx_client().request(
@@ -238,124 +179,81 @@ def sync(
238
179
  *,
239
180
  client: AuthenticatedClient,
240
181
  body: BulkIngestRequest,
241
- token: Union[None, Unset, str] = UNSET,
242
- authorization: Union[None, Unset, str] = UNSET,
243
182
  ) -> Optional[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
244
- r""" Ingest Tables to Graph
245
-
246
- Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
247
-
248
- **Purpose:**
249
- Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
250
- Processes all tables in a single bulk operation with comprehensive error handling and metrics.
251
-
252
- **Use Cases:**
253
- - Initial graph population from uploaded data
254
- - Incremental data updates with new files
255
- - Complete database rebuild from source files
256
- - Recovery from failed ingestion attempts
257
-
258
- **Workflow:**
259
- 1. Upload data files via `POST /tables/{table_name}/files`
260
- 2. Files are validated and marked as 'uploaded'
261
- 3. Trigger ingestion: `POST /tables/ingest`
262
- 4. DuckDB staging tables created from S3 patterns
263
- 5. Data copied row-by-row from DuckDB to Kuzu
264
- 6. Per-table results and metrics returned
265
-
266
- **Rebuild Feature:**
267
- Setting `rebuild=true` regenerates the entire graph database from scratch:
268
- - Deletes existing Kuzu database
269
- - Recreates with fresh schema from active GraphSchema
270
- - Ingests all data files
271
- - Safe operation - S3 is source of truth
272
- - Useful for schema changes or data corrections
273
- - Graph marked as 'rebuilding' during process
274
-
275
- **Error Handling:**
276
- - Per-table error isolation with `ignore_errors` flag
277
- - Partial success support (some tables succeed, some fail)
278
- - Detailed error reporting per table
279
- - Graph status tracking throughout process
280
- - Automatic failure recovery and cleanup
281
-
282
- **Performance:**
283
- - Processes all tables in sequence
284
- - Each table timed independently
285
- - Total execution metrics provided
286
- - Scales to thousands of files
287
- - Optimized for large datasets
288
-
289
- **Example Request:**
290
- ```bash
291
- curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
292
- -H \"Authorization: Bearer YOUR_TOKEN\" \
293
- -H \"Content-Type: application/json\" \
294
- -d '{
295
- \"ignore_errors\": true,
296
- \"rebuild\": false
297
- }'
298
- ```
299
-
300
- **Example Response:**
301
- ```json
302
- {
303
- \"status\": \"success\",
304
- \"graph_id\": \"kg123\",
305
- \"total_tables\": 5,
306
- \"successful_tables\": 5,
307
- \"failed_tables\": 0,
308
- \"skipped_tables\": 0,
309
- \"total_rows_ingested\": 25000,
310
- \"total_execution_time_ms\": 15420.5,
311
- \"results\": [
312
- {
313
- \"table_name\": \"Entity\",
314
- \"status\": \"success\",
315
- \"rows_ingested\": 5000,
316
- \"execution_time_ms\": 3200.1,
317
- \"error\": null
318
- }
319
- ]
320
- }
321
- ```
322
-
323
- **Concurrency Control:**
324
- Only one ingestion can run per graph at a time. If another ingestion is in progress,
325
- you'll receive a 409 Conflict error. The distributed lock automatically expires after
326
- the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
327
-
328
- **Tips:**
329
- - Only files with 'uploaded' status are processed
330
- - Tables with no uploaded files are skipped
331
- - Use `ignore_errors=false` for strict validation
332
- - Monitor progress via per-table results
333
- - Check graph metadata for rebuild status
334
- - Wait for current ingestion to complete before starting another
335
-
336
- **Note:**
337
- Table ingestion is included - no credit consumption.
338
-
339
- Args:
340
- graph_id (str): Graph database identifier
341
- token (Union[None, Unset, str]): JWT token for SSE authentication
342
- authorization (Union[None, Unset, str]):
343
- body (BulkIngestRequest):
344
-
345
- Raises:
346
- errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
347
- httpx.TimeoutException: If the request takes longer than Client.timeout.
348
-
349
- Returns:
350
- Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
351
- """
183
+ """Ingest Tables to Graph
184
+
185
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
186
+
187
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
188
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
189
+
190
+ **Use Cases:**
191
+ - Initial graph population from uploaded data
192
+ - Incremental data updates with new files
193
+ - Complete database rebuild from source files
194
+ - Recovery from failed ingestion attempts
195
+
196
+ **Workflow:**
197
+ 1. Upload data files via `POST /tables/{table_name}/files`
198
+ 2. Files are validated and marked as 'uploaded'
199
+ 3. Trigger ingestion: `POST /tables/ingest`
200
+ 4. DuckDB staging tables created from S3 patterns
201
+ 5. Data copied row-by-row from DuckDB to Kuzu
202
+ 6. Per-table results and metrics returned
203
+
204
+ **Rebuild Feature:**
205
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
206
+ - Deletes existing Kuzu database
207
+ - Recreates with fresh schema from active GraphSchema
208
+ - Ingests all data files
209
+ - Safe operation - S3 is source of truth
210
+ - Useful for schema changes or data corrections
211
+ - Graph marked as 'rebuilding' during process
212
+
213
+ **Error Handling:**
214
+ - Per-table error isolation with `ignore_errors` flag
215
+ - Partial success support (some tables succeed, some fail)
216
+ - Detailed error reporting per table
217
+ - Graph status tracking throughout process
218
+ - Automatic failure recovery and cleanup
219
+
220
+ **Performance:**
221
+ - Processes all tables in sequence
222
+ - Each table timed independently
223
+ - Total execution metrics provided
224
+ - Scales to thousands of files
225
+ - Optimized for large datasets
226
+
227
+ **Concurrency Control:**
228
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
229
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
230
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
231
+
232
+ **Important Notes:**
233
+ - Only files with 'uploaded' status are processed
234
+ - Tables with no uploaded files are skipped
235
+ - Use `ignore_errors=false` for strict validation
236
+ - Monitor progress via per-table results
237
+ - Check graph metadata for rebuild status
238
+ - Wait for current ingestion to complete before starting another
239
+ - Table ingestion is included - no credit consumption
240
+
241
+ Args:
242
+ graph_id (str):
243
+ body (BulkIngestRequest):
244
+
245
+ Raises:
246
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
247
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
248
+
249
+ Returns:
250
+ Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
251
+ """
352
252
 
353
253
  return sync_detailed(
354
254
  graph_id=graph_id,
355
255
  client=client,
356
256
  body=body,
357
- token=token,
358
- authorization=authorization,
359
257
  ).parsed
360
258
 
361
259
 
@@ -364,123 +262,80 @@ async def asyncio_detailed(
364
262
  *,
365
263
  client: AuthenticatedClient,
366
264
  body: BulkIngestRequest,
367
- token: Union[None, Unset, str] = UNSET,
368
- authorization: Union[None, Unset, str] = UNSET,
369
265
  ) -> Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
370
- r""" Ingest Tables to Graph
371
-
372
- Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
373
-
374
- **Purpose:**
375
- Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
376
- Processes all tables in a single bulk operation with comprehensive error handling and metrics.
377
-
378
- **Use Cases:**
379
- - Initial graph population from uploaded data
380
- - Incremental data updates with new files
381
- - Complete database rebuild from source files
382
- - Recovery from failed ingestion attempts
383
-
384
- **Workflow:**
385
- 1. Upload data files via `POST /tables/{table_name}/files`
386
- 2. Files are validated and marked as 'uploaded'
387
- 3. Trigger ingestion: `POST /tables/ingest`
388
- 4. DuckDB staging tables created from S3 patterns
389
- 5. Data copied row-by-row from DuckDB to Kuzu
390
- 6. Per-table results and metrics returned
391
-
392
- **Rebuild Feature:**
393
- Setting `rebuild=true` regenerates the entire graph database from scratch:
394
- - Deletes existing Kuzu database
395
- - Recreates with fresh schema from active GraphSchema
396
- - Ingests all data files
397
- - Safe operation - S3 is source of truth
398
- - Useful for schema changes or data corrections
399
- - Graph marked as 'rebuilding' during process
400
-
401
- **Error Handling:**
402
- - Per-table error isolation with `ignore_errors` flag
403
- - Partial success support (some tables succeed, some fail)
404
- - Detailed error reporting per table
405
- - Graph status tracking throughout process
406
- - Automatic failure recovery and cleanup
407
-
408
- **Performance:**
409
- - Processes all tables in sequence
410
- - Each table timed independently
411
- - Total execution metrics provided
412
- - Scales to thousands of files
413
- - Optimized for large datasets
414
-
415
- **Example Request:**
416
- ```bash
417
- curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
418
- -H \"Authorization: Bearer YOUR_TOKEN\" \
419
- -H \"Content-Type: application/json\" \
420
- -d '{
421
- \"ignore_errors\": true,
422
- \"rebuild\": false
423
- }'
424
- ```
425
-
426
- **Example Response:**
427
- ```json
428
- {
429
- \"status\": \"success\",
430
- \"graph_id\": \"kg123\",
431
- \"total_tables\": 5,
432
- \"successful_tables\": 5,
433
- \"failed_tables\": 0,
434
- \"skipped_tables\": 0,
435
- \"total_rows_ingested\": 25000,
436
- \"total_execution_time_ms\": 15420.5,
437
- \"results\": [
438
- {
439
- \"table_name\": \"Entity\",
440
- \"status\": \"success\",
441
- \"rows_ingested\": 5000,
442
- \"execution_time_ms\": 3200.1,
443
- \"error\": null
444
- }
445
- ]
446
- }
447
- ```
448
-
449
- **Concurrency Control:**
450
- Only one ingestion can run per graph at a time. If another ingestion is in progress,
451
- you'll receive a 409 Conflict error. The distributed lock automatically expires after
452
- the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
453
-
454
- **Tips:**
455
- - Only files with 'uploaded' status are processed
456
- - Tables with no uploaded files are skipped
457
- - Use `ignore_errors=false` for strict validation
458
- - Monitor progress via per-table results
459
- - Check graph metadata for rebuild status
460
- - Wait for current ingestion to complete before starting another
461
-
462
- **Note:**
463
- Table ingestion is included - no credit consumption.
464
-
465
- Args:
466
- graph_id (str): Graph database identifier
467
- token (Union[None, Unset, str]): JWT token for SSE authentication
468
- authorization (Union[None, Unset, str]):
469
- body (BulkIngestRequest):
470
-
471
- Raises:
472
- errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
473
- httpx.TimeoutException: If the request takes longer than Client.timeout.
474
-
475
- Returns:
476
- Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
477
- """
266
+ """Ingest Tables to Graph
267
+
268
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
269
+
270
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
271
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
272
+
273
+ **Use Cases:**
274
+ - Initial graph population from uploaded data
275
+ - Incremental data updates with new files
276
+ - Complete database rebuild from source files
277
+ - Recovery from failed ingestion attempts
278
+
279
+ **Workflow:**
280
+ 1. Upload data files via `POST /tables/{table_name}/files`
281
+ 2. Files are validated and marked as 'uploaded'
282
+ 3. Trigger ingestion: `POST /tables/ingest`
283
+ 4. DuckDB staging tables created from S3 patterns
284
+ 5. Data copied row-by-row from DuckDB to Kuzu
285
+ 6. Per-table results and metrics returned
286
+
287
+ **Rebuild Feature:**
288
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
289
+ - Deletes existing Kuzu database
290
+ - Recreates with fresh schema from active GraphSchema
291
+ - Ingests all data files
292
+ - Safe operation - S3 is source of truth
293
+ - Useful for schema changes or data corrections
294
+ - Graph marked as 'rebuilding' during process
295
+
296
+ **Error Handling:**
297
+ - Per-table error isolation with `ignore_errors` flag
298
+ - Partial success support (some tables succeed, some fail)
299
+ - Detailed error reporting per table
300
+ - Graph status tracking throughout process
301
+ - Automatic failure recovery and cleanup
302
+
303
+ **Performance:**
304
+ - Processes all tables in sequence
305
+ - Each table timed independently
306
+ - Total execution metrics provided
307
+ - Scales to thousands of files
308
+ - Optimized for large datasets
309
+
310
+ **Concurrency Control:**
311
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
312
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
313
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
314
+
315
+ **Important Notes:**
316
+ - Only files with 'uploaded' status are processed
317
+ - Tables with no uploaded files are skipped
318
+ - Use `ignore_errors=false` for strict validation
319
+ - Monitor progress via per-table results
320
+ - Check graph metadata for rebuild status
321
+ - Wait for current ingestion to complete before starting another
322
+ - Table ingestion is included - no credit consumption
323
+
324
+ Args:
325
+ graph_id (str):
326
+ body (BulkIngestRequest):
327
+
328
+ Raises:
329
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
330
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
331
+
332
+ Returns:
333
+ Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
334
+ """
478
335
 
479
336
  kwargs = _get_kwargs(
480
337
  graph_id=graph_id,
481
338
  body=body,
482
- token=token,
483
- authorization=authorization,
484
339
  )
485
340
 
486
341
  response = await client.get_async_httpx_client().request(**kwargs)
@@ -493,124 +348,81 @@ async def asyncio(
493
348
  *,
494
349
  client: AuthenticatedClient,
495
350
  body: BulkIngestRequest,
496
- token: Union[None, Unset, str] = UNSET,
497
- authorization: Union[None, Unset, str] = UNSET,
498
351
  ) -> Optional[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
499
- r""" Ingest Tables to Graph
500
-
501
- Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
502
-
503
- **Purpose:**
504
- Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
505
- Processes all tables in a single bulk operation with comprehensive error handling and metrics.
506
-
507
- **Use Cases:**
508
- - Initial graph population from uploaded data
509
- - Incremental data updates with new files
510
- - Complete database rebuild from source files
511
- - Recovery from failed ingestion attempts
512
-
513
- **Workflow:**
514
- 1. Upload data files via `POST /tables/{table_name}/files`
515
- 2. Files are validated and marked as 'uploaded'
516
- 3. Trigger ingestion: `POST /tables/ingest`
517
- 4. DuckDB staging tables created from S3 patterns
518
- 5. Data copied row-by-row from DuckDB to Kuzu
519
- 6. Per-table results and metrics returned
520
-
521
- **Rebuild Feature:**
522
- Setting `rebuild=true` regenerates the entire graph database from scratch:
523
- - Deletes existing Kuzu database
524
- - Recreates with fresh schema from active GraphSchema
525
- - Ingests all data files
526
- - Safe operation - S3 is source of truth
527
- - Useful for schema changes or data corrections
528
- - Graph marked as 'rebuilding' during process
529
-
530
- **Error Handling:**
531
- - Per-table error isolation with `ignore_errors` flag
532
- - Partial success support (some tables succeed, some fail)
533
- - Detailed error reporting per table
534
- - Graph status tracking throughout process
535
- - Automatic failure recovery and cleanup
536
-
537
- **Performance:**
538
- - Processes all tables in sequence
539
- - Each table timed independently
540
- - Total execution metrics provided
541
- - Scales to thousands of files
542
- - Optimized for large datasets
543
-
544
- **Example Request:**
545
- ```bash
546
- curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
547
- -H \"Authorization: Bearer YOUR_TOKEN\" \
548
- -H \"Content-Type: application/json\" \
549
- -d '{
550
- \"ignore_errors\": true,
551
- \"rebuild\": false
552
- }'
553
- ```
554
-
555
- **Example Response:**
556
- ```json
557
- {
558
- \"status\": \"success\",
559
- \"graph_id\": \"kg123\",
560
- \"total_tables\": 5,
561
- \"successful_tables\": 5,
562
- \"failed_tables\": 0,
563
- \"skipped_tables\": 0,
564
- \"total_rows_ingested\": 25000,
565
- \"total_execution_time_ms\": 15420.5,
566
- \"results\": [
567
- {
568
- \"table_name\": \"Entity\",
569
- \"status\": \"success\",
570
- \"rows_ingested\": 5000,
571
- \"execution_time_ms\": 3200.1,
572
- \"error\": null
573
- }
574
- ]
575
- }
576
- ```
577
-
578
- **Concurrency Control:**
579
- Only one ingestion can run per graph at a time. If another ingestion is in progress,
580
- you'll receive a 409 Conflict error. The distributed lock automatically expires after
581
- the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
582
-
583
- **Tips:**
584
- - Only files with 'uploaded' status are processed
585
- - Tables with no uploaded files are skipped
586
- - Use `ignore_errors=false` for strict validation
587
- - Monitor progress via per-table results
588
- - Check graph metadata for rebuild status
589
- - Wait for current ingestion to complete before starting another
590
-
591
- **Note:**
592
- Table ingestion is included - no credit consumption.
593
-
594
- Args:
595
- graph_id (str): Graph database identifier
596
- token (Union[None, Unset, str]): JWT token for SSE authentication
597
- authorization (Union[None, Unset, str]):
598
- body (BulkIngestRequest):
599
-
600
- Raises:
601
- errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
602
- httpx.TimeoutException: If the request takes longer than Client.timeout.
603
-
604
- Returns:
605
- Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
606
- """
352
+ """Ingest Tables to Graph
353
+
354
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
355
+
356
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
357
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
358
+
359
+ **Use Cases:**
360
+ - Initial graph population from uploaded data
361
+ - Incremental data updates with new files
362
+ - Complete database rebuild from source files
363
+ - Recovery from failed ingestion attempts
364
+
365
+ **Workflow:**
366
+ 1. Upload data files via `POST /tables/{table_name}/files`
367
+ 2. Files are validated and marked as 'uploaded'
368
+ 3. Trigger ingestion: `POST /tables/ingest`
369
+ 4. DuckDB staging tables created from S3 patterns
370
+ 5. Data copied row-by-row from DuckDB to Kuzu
371
+ 6. Per-table results and metrics returned
372
+
373
+ **Rebuild Feature:**
374
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
375
+ - Deletes existing Kuzu database
376
+ - Recreates with fresh schema from active GraphSchema
377
+ - Ingests all data files
378
+ - Safe operation - S3 is source of truth
379
+ - Useful for schema changes or data corrections
380
+ - Graph marked as 'rebuilding' during process
381
+
382
+ **Error Handling:**
383
+ - Per-table error isolation with `ignore_errors` flag
384
+ - Partial success support (some tables succeed, some fail)
385
+ - Detailed error reporting per table
386
+ - Graph status tracking throughout process
387
+ - Automatic failure recovery and cleanup
388
+
389
+ **Performance:**
390
+ - Processes all tables in sequence
391
+ - Each table timed independently
392
+ - Total execution metrics provided
393
+ - Scales to thousands of files
394
+ - Optimized for large datasets
395
+
396
+ **Concurrency Control:**
397
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
398
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
399
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
400
+
401
+ **Important Notes:**
402
+ - Only files with 'uploaded' status are processed
403
+ - Tables with no uploaded files are skipped
404
+ - Use `ignore_errors=false` for strict validation
405
+ - Monitor progress via per-table results
406
+ - Check graph metadata for rebuild status
407
+ - Wait for current ingestion to complete before starting another
408
+ - Table ingestion is included - no credit consumption
409
+
410
+ Args:
411
+ graph_id (str):
412
+ body (BulkIngestRequest):
413
+
414
+ Raises:
415
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
416
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
417
+
418
+ Returns:
419
+ Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
420
+ """
607
421
 
608
422
  return (
609
423
  await asyncio_detailed(
610
424
  graph_id=graph_id,
611
425
  client=client,
612
426
  body=body,
613
- token=token,
614
- authorization=authorization,
615
427
  )
616
428
  ).parsed