robosystems-client 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of robosystems-client might be problematic. Click here for more details.

Files changed (34) hide show
  1. robosystems_client/api/query/execute_cypher_query.py +0 -5
  2. robosystems_client/api/tables/delete_file.py +437 -0
  3. robosystems_client/api/tables/get_file_info.py +397 -0
  4. robosystems_client/api/tables/get_upload_url.py +548 -0
  5. robosystems_client/api/tables/ingest_tables.py +616 -0
  6. robosystems_client/api/tables/list_table_files.py +509 -0
  7. robosystems_client/api/tables/list_tables.py +488 -0
  8. robosystems_client/api/tables/query_tables.py +487 -0
  9. robosystems_client/api/tables/update_file_status.py +539 -0
  10. robosystems_client/extensions/graph_client.py +5 -0
  11. robosystems_client/extensions/table_ingest_client.py +31 -40
  12. robosystems_client/models/__init__.py +13 -17
  13. robosystems_client/models/create_graph_request.py +11 -0
  14. robosystems_client/models/{delete_file_v1_graphs_graph_id_tables_files_file_id_delete_response_delete_file_v1_graphs_graph_id_tables_files_file_id_delete.py → delete_file_response.py} +45 -9
  15. robosystems_client/models/file_info.py +169 -0
  16. robosystems_client/models/file_status_update.py +41 -0
  17. robosystems_client/models/get_file_info_response.py +205 -0
  18. robosystems_client/models/list_table_files_response.py +105 -0
  19. robosystems_client/models/{get_file_info_v1_graphs_graph_id_tables_files_file_id_get_response_get_file_info_v1_graphs_graph_id_tables_files_file_id_get.py → update_file_status_response_updatefilestatus.py} +5 -8
  20. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/METADATA +1 -1
  21. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/RECORD +23 -22
  22. robosystems_client/api/tables/delete_file_v1_graphs_graph_id_tables_files_file_id_delete.py +0 -287
  23. robosystems_client/api/tables/get_file_info_v1_graphs_graph_id_tables_files_file_id_get.py +0 -283
  24. robosystems_client/api/tables/get_upload_url_v1_graphs_graph_id_tables_table_name_files_post.py +0 -260
  25. robosystems_client/api/tables/ingest_tables_v1_graphs_graph_id_tables_ingest_post.py +0 -251
  26. robosystems_client/api/tables/list_table_files_v1_graphs_graph_id_tables_table_name_files_get.py +0 -283
  27. robosystems_client/api/tables/list_tables_v1_graphs_graph_id_tables_get.py +0 -224
  28. robosystems_client/api/tables/query_tables_v1_graphs_graph_id_tables_query_post.py +0 -247
  29. robosystems_client/api/tables/update_file_v1_graphs_graph_id_tables_files_file_id_patch.py +0 -306
  30. robosystems_client/models/file_update_request.py +0 -62
  31. robosystems_client/models/list_table_files_v1_graphs_graph_id_tables_table_name_files_get_response_list_table_files_v1_graphs_graph_id_tables_table_name_files_get.py +0 -47
  32. robosystems_client/models/update_file_v1_graphs_graph_id_tables_files_file_id_patch_response_update_file_v1_graphs_graph_id_tables_files_file_id_patch.py +0 -47
  33. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/WHEEL +0 -0
  34. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,616 @@
1
+ from http import HTTPStatus
2
+ from typing import Any, Optional, Union, cast
3
+
4
+ import httpx
5
+
6
+ from ... import errors
7
+ from ...client import AuthenticatedClient, Client
8
+ from ...models.bulk_ingest_request import BulkIngestRequest
9
+ from ...models.bulk_ingest_response import BulkIngestResponse
10
+ from ...models.error_response import ErrorResponse
11
+ from ...models.http_validation_error import HTTPValidationError
12
+ from ...types import UNSET, Response, Unset
13
+
14
+
15
+ def _get_kwargs(
16
+ graph_id: str,
17
+ *,
18
+ body: BulkIngestRequest,
19
+ token: Union[None, Unset, str] = UNSET,
20
+ authorization: Union[None, Unset, str] = UNSET,
21
+ ) -> dict[str, Any]:
22
+ headers: dict[str, Any] = {}
23
+ if not isinstance(authorization, Unset):
24
+ headers["authorization"] = authorization
25
+
26
+ params: dict[str, Any] = {}
27
+
28
+ json_token: Union[None, Unset, str]
29
+ if isinstance(token, Unset):
30
+ json_token = UNSET
31
+ else:
32
+ json_token = token
33
+ params["token"] = json_token
34
+
35
+ params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
36
+
37
+ _kwargs: dict[str, Any] = {
38
+ "method": "post",
39
+ "url": f"/v1/graphs/{graph_id}/tables/ingest",
40
+ "params": params,
41
+ }
42
+
43
+ _kwargs["json"] = body.to_dict()
44
+
45
+ headers["Content-Type"] = "application/json"
46
+
47
+ _kwargs["headers"] = headers
48
+ return _kwargs
49
+
50
+
51
+ def _parse_response(
52
+ *, client: Union[AuthenticatedClient, Client], response: httpx.Response
53
+ ) -> Optional[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
54
+ if response.status_code == 200:
55
+ response_200 = BulkIngestResponse.from_dict(response.json())
56
+
57
+ return response_200
58
+
59
+ if response.status_code == 401:
60
+ response_401 = cast(Any, None)
61
+ return response_401
62
+
63
+ if response.status_code == 403:
64
+ response_403 = ErrorResponse.from_dict(response.json())
65
+
66
+ return response_403
67
+
68
+ if response.status_code == 404:
69
+ response_404 = ErrorResponse.from_dict(response.json())
70
+
71
+ return response_404
72
+
73
+ if response.status_code == 409:
74
+ response_409 = ErrorResponse.from_dict(response.json())
75
+
76
+ return response_409
77
+
78
+ if response.status_code == 422:
79
+ response_422 = HTTPValidationError.from_dict(response.json())
80
+
81
+ return response_422
82
+
83
+ if response.status_code == 500:
84
+ response_500 = ErrorResponse.from_dict(response.json())
85
+
86
+ return response_500
87
+
88
+ if client.raise_on_unexpected_status:
89
+ raise errors.UnexpectedStatus(response.status_code, response.content)
90
+ else:
91
+ return None
92
+
93
+
94
+ def _build_response(
95
+ *, client: Union[AuthenticatedClient, Client], response: httpx.Response
96
+ ) -> Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
97
+ return Response(
98
+ status_code=HTTPStatus(response.status_code),
99
+ content=response.content,
100
+ headers=response.headers,
101
+ parsed=_parse_response(client=client, response=response),
102
+ )
103
+
104
+
105
+ def sync_detailed(
106
+ graph_id: str,
107
+ *,
108
+ client: AuthenticatedClient,
109
+ body: BulkIngestRequest,
110
+ token: Union[None, Unset, str] = UNSET,
111
+ authorization: Union[None, Unset, str] = UNSET,
112
+ ) -> Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
113
+ r""" Ingest Tables to Graph
114
+
115
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
116
+
117
+ **Purpose:**
118
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
119
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
120
+
121
+ **Use Cases:**
122
+ - Initial graph population from uploaded data
123
+ - Incremental data updates with new files
124
+ - Complete database rebuild from source files
125
+ - Recovery from failed ingestion attempts
126
+
127
+ **Workflow:**
128
+ 1. Upload data files via `POST /tables/{table_name}/files`
129
+ 2. Files are validated and marked as 'uploaded'
130
+ 3. Trigger ingestion: `POST /tables/ingest`
131
+ 4. DuckDB staging tables created from S3 patterns
132
+ 5. Data copied row-by-row from DuckDB to Kuzu
133
+ 6. Per-table results and metrics returned
134
+
135
+ **Rebuild Feature:**
136
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
137
+ - Deletes existing Kuzu database
138
+ - Recreates with fresh schema from active GraphSchema
139
+ - Ingests all data files
140
+ - Safe operation - S3 is source of truth
141
+ - Useful for schema changes or data corrections
142
+ - Graph marked as 'rebuilding' during process
143
+
144
+ **Error Handling:**
145
+ - Per-table error isolation with `ignore_errors` flag
146
+ - Partial success support (some tables succeed, some fail)
147
+ - Detailed error reporting per table
148
+ - Graph status tracking throughout process
149
+ - Automatic failure recovery and cleanup
150
+
151
+ **Performance:**
152
+ - Processes all tables in sequence
153
+ - Each table timed independently
154
+ - Total execution metrics provided
155
+ - Scales to thousands of files
156
+ - Optimized for large datasets
157
+
158
+ **Example Request:**
159
+ ```bash
160
+ curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
161
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
162
+ -H \"Content-Type: application/json\" \
163
+ -d '{
164
+ \"ignore_errors\": true,
165
+ \"rebuild\": false
166
+ }'
167
+ ```
168
+
169
+ **Example Response:**
170
+ ```json
171
+ {
172
+ \"status\": \"success\",
173
+ \"graph_id\": \"kg123\",
174
+ \"total_tables\": 5,
175
+ \"successful_tables\": 5,
176
+ \"failed_tables\": 0,
177
+ \"skipped_tables\": 0,
178
+ \"total_rows_ingested\": 25000,
179
+ \"total_execution_time_ms\": 15420.5,
180
+ \"results\": [
181
+ {
182
+ \"table_name\": \"Entity\",
183
+ \"status\": \"success\",
184
+ \"rows_ingested\": 5000,
185
+ \"execution_time_ms\": 3200.1,
186
+ \"error\": null
187
+ }
188
+ ]
189
+ }
190
+ ```
191
+
192
+ **Concurrency Control:**
193
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
194
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
195
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
196
+
197
+ **Tips:**
198
+ - Only files with 'uploaded' status are processed
199
+ - Tables with no uploaded files are skipped
200
+ - Use `ignore_errors=false` for strict validation
201
+ - Monitor progress via per-table results
202
+ - Check graph metadata for rebuild status
203
+ - Wait for current ingestion to complete before starting another
204
+
205
+ **Note:**
206
+ Table ingestion is included - no credit consumption.
207
+
208
+ Args:
209
+ graph_id (str): Graph database identifier
210
+ token (Union[None, Unset, str]): JWT token for SSE authentication
211
+ authorization (Union[None, Unset, str]):
212
+ body (BulkIngestRequest):
213
+
214
+ Raises:
215
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
216
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
217
+
218
+ Returns:
219
+ Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
220
+ """
221
+
222
+ kwargs = _get_kwargs(
223
+ graph_id=graph_id,
224
+ body=body,
225
+ token=token,
226
+ authorization=authorization,
227
+ )
228
+
229
+ response = client.get_httpx_client().request(
230
+ **kwargs,
231
+ )
232
+
233
+ return _build_response(client=client, response=response)
234
+
235
+
236
+ def sync(
237
+ graph_id: str,
238
+ *,
239
+ client: AuthenticatedClient,
240
+ body: BulkIngestRequest,
241
+ token: Union[None, Unset, str] = UNSET,
242
+ authorization: Union[None, Unset, str] = UNSET,
243
+ ) -> Optional[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
244
+ r""" Ingest Tables to Graph
245
+
246
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
247
+
248
+ **Purpose:**
249
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
250
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
251
+
252
+ **Use Cases:**
253
+ - Initial graph population from uploaded data
254
+ - Incremental data updates with new files
255
+ - Complete database rebuild from source files
256
+ - Recovery from failed ingestion attempts
257
+
258
+ **Workflow:**
259
+ 1. Upload data files via `POST /tables/{table_name}/files`
260
+ 2. Files are validated and marked as 'uploaded'
261
+ 3. Trigger ingestion: `POST /tables/ingest`
262
+ 4. DuckDB staging tables created from S3 patterns
263
+ 5. Data copied row-by-row from DuckDB to Kuzu
264
+ 6. Per-table results and metrics returned
265
+
266
+ **Rebuild Feature:**
267
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
268
+ - Deletes existing Kuzu database
269
+ - Recreates with fresh schema from active GraphSchema
270
+ - Ingests all data files
271
+ - Safe operation - S3 is source of truth
272
+ - Useful for schema changes or data corrections
273
+ - Graph marked as 'rebuilding' during process
274
+
275
+ **Error Handling:**
276
+ - Per-table error isolation with `ignore_errors` flag
277
+ - Partial success support (some tables succeed, some fail)
278
+ - Detailed error reporting per table
279
+ - Graph status tracking throughout process
280
+ - Automatic failure recovery and cleanup
281
+
282
+ **Performance:**
283
+ - Processes all tables in sequence
284
+ - Each table timed independently
285
+ - Total execution metrics provided
286
+ - Scales to thousands of files
287
+ - Optimized for large datasets
288
+
289
+ **Example Request:**
290
+ ```bash
291
+ curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
292
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
293
+ -H \"Content-Type: application/json\" \
294
+ -d '{
295
+ \"ignore_errors\": true,
296
+ \"rebuild\": false
297
+ }'
298
+ ```
299
+
300
+ **Example Response:**
301
+ ```json
302
+ {
303
+ \"status\": \"success\",
304
+ \"graph_id\": \"kg123\",
305
+ \"total_tables\": 5,
306
+ \"successful_tables\": 5,
307
+ \"failed_tables\": 0,
308
+ \"skipped_tables\": 0,
309
+ \"total_rows_ingested\": 25000,
310
+ \"total_execution_time_ms\": 15420.5,
311
+ \"results\": [
312
+ {
313
+ \"table_name\": \"Entity\",
314
+ \"status\": \"success\",
315
+ \"rows_ingested\": 5000,
316
+ \"execution_time_ms\": 3200.1,
317
+ \"error\": null
318
+ }
319
+ ]
320
+ }
321
+ ```
322
+
323
+ **Concurrency Control:**
324
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
325
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
326
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
327
+
328
+ **Tips:**
329
+ - Only files with 'uploaded' status are processed
330
+ - Tables with no uploaded files are skipped
331
+ - Use `ignore_errors=false` for strict validation
332
+ - Monitor progress via per-table results
333
+ - Check graph metadata for rebuild status
334
+ - Wait for current ingestion to complete before starting another
335
+
336
+ **Note:**
337
+ Table ingestion is included - no credit consumption.
338
+
339
+ Args:
340
+ graph_id (str): Graph database identifier
341
+ token (Union[None, Unset, str]): JWT token for SSE authentication
342
+ authorization (Union[None, Unset, str]):
343
+ body (BulkIngestRequest):
344
+
345
+ Raises:
346
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
347
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
348
+
349
+ Returns:
350
+ Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
351
+ """
352
+
353
+ return sync_detailed(
354
+ graph_id=graph_id,
355
+ client=client,
356
+ body=body,
357
+ token=token,
358
+ authorization=authorization,
359
+ ).parsed
360
+
361
+
362
+ async def asyncio_detailed(
363
+ graph_id: str,
364
+ *,
365
+ client: AuthenticatedClient,
366
+ body: BulkIngestRequest,
367
+ token: Union[None, Unset, str] = UNSET,
368
+ authorization: Union[None, Unset, str] = UNSET,
369
+ ) -> Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
370
+ r""" Ingest Tables to Graph
371
+
372
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
373
+
374
+ **Purpose:**
375
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
376
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
377
+
378
+ **Use Cases:**
379
+ - Initial graph population from uploaded data
380
+ - Incremental data updates with new files
381
+ - Complete database rebuild from source files
382
+ - Recovery from failed ingestion attempts
383
+
384
+ **Workflow:**
385
+ 1. Upload data files via `POST /tables/{table_name}/files`
386
+ 2. Files are validated and marked as 'uploaded'
387
+ 3. Trigger ingestion: `POST /tables/ingest`
388
+ 4. DuckDB staging tables created from S3 patterns
389
+ 5. Data copied row-by-row from DuckDB to Kuzu
390
+ 6. Per-table results and metrics returned
391
+
392
+ **Rebuild Feature:**
393
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
394
+ - Deletes existing Kuzu database
395
+ - Recreates with fresh schema from active GraphSchema
396
+ - Ingests all data files
397
+ - Safe operation - S3 is source of truth
398
+ - Useful for schema changes or data corrections
399
+ - Graph marked as 'rebuilding' during process
400
+
401
+ **Error Handling:**
402
+ - Per-table error isolation with `ignore_errors` flag
403
+ - Partial success support (some tables succeed, some fail)
404
+ - Detailed error reporting per table
405
+ - Graph status tracking throughout process
406
+ - Automatic failure recovery and cleanup
407
+
408
+ **Performance:**
409
+ - Processes all tables in sequence
410
+ - Each table timed independently
411
+ - Total execution metrics provided
412
+ - Scales to thousands of files
413
+ - Optimized for large datasets
414
+
415
+ **Example Request:**
416
+ ```bash
417
+ curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
418
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
419
+ -H \"Content-Type: application/json\" \
420
+ -d '{
421
+ \"ignore_errors\": true,
422
+ \"rebuild\": false
423
+ }'
424
+ ```
425
+
426
+ **Example Response:**
427
+ ```json
428
+ {
429
+ \"status\": \"success\",
430
+ \"graph_id\": \"kg123\",
431
+ \"total_tables\": 5,
432
+ \"successful_tables\": 5,
433
+ \"failed_tables\": 0,
434
+ \"skipped_tables\": 0,
435
+ \"total_rows_ingested\": 25000,
436
+ \"total_execution_time_ms\": 15420.5,
437
+ \"results\": [
438
+ {
439
+ \"table_name\": \"Entity\",
440
+ \"status\": \"success\",
441
+ \"rows_ingested\": 5000,
442
+ \"execution_time_ms\": 3200.1,
443
+ \"error\": null
444
+ }
445
+ ]
446
+ }
447
+ ```
448
+
449
+ **Concurrency Control:**
450
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
451
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
452
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
453
+
454
+ **Tips:**
455
+ - Only files with 'uploaded' status are processed
456
+ - Tables with no uploaded files are skipped
457
+ - Use `ignore_errors=false` for strict validation
458
+ - Monitor progress via per-table results
459
+ - Check graph metadata for rebuild status
460
+ - Wait for current ingestion to complete before starting another
461
+
462
+ **Note:**
463
+ Table ingestion is included - no credit consumption.
464
+
465
+ Args:
466
+ graph_id (str): Graph database identifier
467
+ token (Union[None, Unset, str]): JWT token for SSE authentication
468
+ authorization (Union[None, Unset, str]):
469
+ body (BulkIngestRequest):
470
+
471
+ Raises:
472
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
473
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
474
+
475
+ Returns:
476
+ Response[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]
477
+ """
478
+
479
+ kwargs = _get_kwargs(
480
+ graph_id=graph_id,
481
+ body=body,
482
+ token=token,
483
+ authorization=authorization,
484
+ )
485
+
486
+ response = await client.get_async_httpx_client().request(**kwargs)
487
+
488
+ return _build_response(client=client, response=response)
489
+
490
+
491
+ async def asyncio(
492
+ graph_id: str,
493
+ *,
494
+ client: AuthenticatedClient,
495
+ body: BulkIngestRequest,
496
+ token: Union[None, Unset, str] = UNSET,
497
+ authorization: Union[None, Unset, str] = UNSET,
498
+ ) -> Optional[Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]]:
499
+ r""" Ingest Tables to Graph
500
+
501
+ Load all files from S3 into DuckDB staging tables and ingest into Kuzu graph database.
502
+
503
+ **Purpose:**
504
+ Orchestrates the complete data pipeline from S3 staging files into the Kuzu graph database.
505
+ Processes all tables in a single bulk operation with comprehensive error handling and metrics.
506
+
507
+ **Use Cases:**
508
+ - Initial graph population from uploaded data
509
+ - Incremental data updates with new files
510
+ - Complete database rebuild from source files
511
+ - Recovery from failed ingestion attempts
512
+
513
+ **Workflow:**
514
+ 1. Upload data files via `POST /tables/{table_name}/files`
515
+ 2. Files are validated and marked as 'uploaded'
516
+ 3. Trigger ingestion: `POST /tables/ingest`
517
+ 4. DuckDB staging tables created from S3 patterns
518
+ 5. Data copied row-by-row from DuckDB to Kuzu
519
+ 6. Per-table results and metrics returned
520
+
521
+ **Rebuild Feature:**
522
+ Setting `rebuild=true` regenerates the entire graph database from scratch:
523
+ - Deletes existing Kuzu database
524
+ - Recreates with fresh schema from active GraphSchema
525
+ - Ingests all data files
526
+ - Safe operation - S3 is source of truth
527
+ - Useful for schema changes or data corrections
528
+ - Graph marked as 'rebuilding' during process
529
+
530
+ **Error Handling:**
531
+ - Per-table error isolation with `ignore_errors` flag
532
+ - Partial success support (some tables succeed, some fail)
533
+ - Detailed error reporting per table
534
+ - Graph status tracking throughout process
535
+ - Automatic failure recovery and cleanup
536
+
537
+ **Performance:**
538
+ - Processes all tables in sequence
539
+ - Each table timed independently
540
+ - Total execution metrics provided
541
+ - Scales to thousands of files
542
+ - Optimized for large datasets
543
+
544
+ **Example Request:**
545
+ ```bash
546
+ curl -X POST \"https://api.robosystems.ai/v1/graphs/kg123/tables/ingest\" \
547
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
548
+ -H \"Content-Type: application/json\" \
549
+ -d '{
550
+ \"ignore_errors\": true,
551
+ \"rebuild\": false
552
+ }'
553
+ ```
554
+
555
+ **Example Response:**
556
+ ```json
557
+ {
558
+ \"status\": \"success\",
559
+ \"graph_id\": \"kg123\",
560
+ \"total_tables\": 5,
561
+ \"successful_tables\": 5,
562
+ \"failed_tables\": 0,
563
+ \"skipped_tables\": 0,
564
+ \"total_rows_ingested\": 25000,
565
+ \"total_execution_time_ms\": 15420.5,
566
+ \"results\": [
567
+ {
568
+ \"table_name\": \"Entity\",
569
+ \"status\": \"success\",
570
+ \"rows_ingested\": 5000,
571
+ \"execution_time_ms\": 3200.1,
572
+ \"error\": null
573
+ }
574
+ ]
575
+ }
576
+ ```
577
+
578
+ **Concurrency Control:**
579
+ Only one ingestion can run per graph at a time. If another ingestion is in progress,
580
+ you'll receive a 409 Conflict error. The distributed lock automatically expires after
581
+ the configured TTL (default: 1 hour) to prevent deadlocks from failed ingestions.
582
+
583
+ **Tips:**
584
+ - Only files with 'uploaded' status are processed
585
+ - Tables with no uploaded files are skipped
586
+ - Use `ignore_errors=false` for strict validation
587
+ - Monitor progress via per-table results
588
+ - Check graph metadata for rebuild status
589
+ - Wait for current ingestion to complete before starting another
590
+
591
+ **Note:**
592
+ Table ingestion is included - no credit consumption.
593
+
594
+ Args:
595
+ graph_id (str): Graph database identifier
596
+ token (Union[None, Unset, str]): JWT token for SSE authentication
597
+ authorization (Union[None, Unset, str]):
598
+ body (BulkIngestRequest):
599
+
600
+ Raises:
601
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
602
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
603
+
604
+ Returns:
605
+ Union[Any, BulkIngestResponse, ErrorResponse, HTTPValidationError]
606
+ """
607
+
608
+ return (
609
+ await asyncio_detailed(
610
+ graph_id=graph_id,
611
+ client=client,
612
+ body=body,
613
+ token=token,
614
+ authorization=authorization,
615
+ )
616
+ ).parsed