robosystems-client 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of robosystems-client might be problematic. Click here for more details.

Files changed (34) hide show
  1. robosystems_client/api/query/execute_cypher_query.py +0 -5
  2. robosystems_client/api/tables/delete_file.py +437 -0
  3. robosystems_client/api/tables/get_file_info.py +397 -0
  4. robosystems_client/api/tables/get_upload_url.py +548 -0
  5. robosystems_client/api/tables/ingest_tables.py +616 -0
  6. robosystems_client/api/tables/list_table_files.py +509 -0
  7. robosystems_client/api/tables/list_tables.py +488 -0
  8. robosystems_client/api/tables/query_tables.py +487 -0
  9. robosystems_client/api/tables/update_file_status.py +539 -0
  10. robosystems_client/extensions/graph_client.py +5 -0
  11. robosystems_client/extensions/table_ingest_client.py +31 -40
  12. robosystems_client/models/__init__.py +13 -17
  13. robosystems_client/models/create_graph_request.py +11 -0
  14. robosystems_client/models/{delete_file_v1_graphs_graph_id_tables_files_file_id_delete_response_delete_file_v1_graphs_graph_id_tables_files_file_id_delete.py → delete_file_response.py} +45 -9
  15. robosystems_client/models/file_info.py +169 -0
  16. robosystems_client/models/file_status_update.py +41 -0
  17. robosystems_client/models/get_file_info_response.py +205 -0
  18. robosystems_client/models/list_table_files_response.py +105 -0
  19. robosystems_client/models/{get_file_info_v1_graphs_graph_id_tables_files_file_id_get_response_get_file_info_v1_graphs_graph_id_tables_files_file_id_get.py → update_file_status_response_updatefilestatus.py} +5 -8
  20. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/METADATA +1 -1
  21. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/RECORD +23 -22
  22. robosystems_client/api/tables/delete_file_v1_graphs_graph_id_tables_files_file_id_delete.py +0 -287
  23. robosystems_client/api/tables/get_file_info_v1_graphs_graph_id_tables_files_file_id_get.py +0 -283
  24. robosystems_client/api/tables/get_upload_url_v1_graphs_graph_id_tables_table_name_files_post.py +0 -260
  25. robosystems_client/api/tables/ingest_tables_v1_graphs_graph_id_tables_ingest_post.py +0 -251
  26. robosystems_client/api/tables/list_table_files_v1_graphs_graph_id_tables_table_name_files_get.py +0 -283
  27. robosystems_client/api/tables/list_tables_v1_graphs_graph_id_tables_get.py +0 -224
  28. robosystems_client/api/tables/query_tables_v1_graphs_graph_id_tables_query_post.py +0 -247
  29. robosystems_client/api/tables/update_file_v1_graphs_graph_id_tables_files_file_id_patch.py +0 -306
  30. robosystems_client/models/file_update_request.py +0 -62
  31. robosystems_client/models/list_table_files_v1_graphs_graph_id_tables_table_name_files_get_response_list_table_files_v1_graphs_graph_id_tables_table_name_files_get.py +0 -47
  32. robosystems_client/models/update_file_v1_graphs_graph_id_tables_files_file_id_patch_response_update_file_v1_graphs_graph_id_tables_files_file_id_patch.py +0 -47
  33. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/WHEEL +0 -0
  34. {robosystems_client-0.2.2.dist-info → robosystems_client-0.2.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,539 @@
1
+ from http import HTTPStatus
2
+ from typing import Any, Optional, Union, cast
3
+
4
+ import httpx
5
+
6
+ from ... import errors
7
+ from ...client import AuthenticatedClient, Client
8
+ from ...models.error_response import ErrorResponse
9
+ from ...models.file_status_update import FileStatusUpdate
10
+ from ...models.http_validation_error import HTTPValidationError
11
+ from ...models.update_file_status_response_updatefilestatus import (
12
+ UpdateFileStatusResponseUpdatefilestatus,
13
+ )
14
+ from ...types import UNSET, Response, Unset
15
+
16
+
17
+ def _get_kwargs(
18
+ graph_id: str,
19
+ file_id: str,
20
+ *,
21
+ body: FileStatusUpdate,
22
+ token: Union[None, Unset, str] = UNSET,
23
+ authorization: Union[None, Unset, str] = UNSET,
24
+ ) -> dict[str, Any]:
25
+ headers: dict[str, Any] = {}
26
+ if not isinstance(authorization, Unset):
27
+ headers["authorization"] = authorization
28
+
29
+ params: dict[str, Any] = {}
30
+
31
+ json_token: Union[None, Unset, str]
32
+ if isinstance(token, Unset):
33
+ json_token = UNSET
34
+ else:
35
+ json_token = token
36
+ params["token"] = json_token
37
+
38
+ params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
39
+
40
+ _kwargs: dict[str, Any] = {
41
+ "method": "patch",
42
+ "url": f"/v1/graphs/{graph_id}/tables/files/{file_id}",
43
+ "params": params,
44
+ }
45
+
46
+ _kwargs["json"] = body.to_dict()
47
+
48
+ headers["Content-Type"] = "application/json"
49
+
50
+ _kwargs["headers"] = headers
51
+ return _kwargs
52
+
53
+
54
+ def _parse_response(
55
+ *, client: Union[AuthenticatedClient, Client], response: httpx.Response
56
+ ) -> Optional[
57
+ Union[
58
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
59
+ ]
60
+ ]:
61
+ if response.status_code == 200:
62
+ response_200 = UpdateFileStatusResponseUpdatefilestatus.from_dict(response.json())
63
+
64
+ return response_200
65
+
66
+ if response.status_code == 400:
67
+ response_400 = ErrorResponse.from_dict(response.json())
68
+
69
+ return response_400
70
+
71
+ if response.status_code == 401:
72
+ response_401 = cast(Any, None)
73
+ return response_401
74
+
75
+ if response.status_code == 403:
76
+ response_403 = ErrorResponse.from_dict(response.json())
77
+
78
+ return response_403
79
+
80
+ if response.status_code == 404:
81
+ response_404 = ErrorResponse.from_dict(response.json())
82
+
83
+ return response_404
84
+
85
+ if response.status_code == 413:
86
+ response_413 = ErrorResponse.from_dict(response.json())
87
+
88
+ return response_413
89
+
90
+ if response.status_code == 422:
91
+ response_422 = HTTPValidationError.from_dict(response.json())
92
+
93
+ return response_422
94
+
95
+ if response.status_code == 500:
96
+ response_500 = cast(Any, None)
97
+ return response_500
98
+
99
+ if client.raise_on_unexpected_status:
100
+ raise errors.UnexpectedStatus(response.status_code, response.content)
101
+ else:
102
+ return None
103
+
104
+
105
+ def _build_response(
106
+ *, client: Union[AuthenticatedClient, Client], response: httpx.Response
107
+ ) -> Response[
108
+ Union[
109
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
110
+ ]
111
+ ]:
112
+ return Response(
113
+ status_code=HTTPStatus(response.status_code),
114
+ content=response.content,
115
+ headers=response.headers,
116
+ parsed=_parse_response(client=client, response=response),
117
+ )
118
+
119
+
120
+ def sync_detailed(
121
+ graph_id: str,
122
+ file_id: str,
123
+ *,
124
+ client: AuthenticatedClient,
125
+ body: FileStatusUpdate,
126
+ token: Union[None, Unset, str] = UNSET,
127
+ authorization: Union[None, Unset, str] = UNSET,
128
+ ) -> Response[
129
+ Union[
130
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
131
+ ]
132
+ ]:
133
+ r""" Update File Upload Status
134
+
135
+ Update file status after upload completes.
136
+
137
+ **Purpose:**
138
+ Mark files as uploaded after successful S3 upload. The backend validates
139
+ the file, calculates size and row count, enforces storage limits, and
140
+ registers the DuckDB table for queries.
141
+
142
+ **Status Values:**
143
+ - `uploaded`: File successfully uploaded to S3 (triggers validation)
144
+ - `disabled`: Exclude file from ingestion
145
+ - `archived`: Soft delete file
146
+
147
+ **What Happens on 'uploaded' Status:**
148
+ 1. Verify file exists in S3
149
+ 2. Calculate actual file size
150
+ 3. Enforce tier storage limits
151
+ 4. Calculate or estimate row count
152
+ 5. Update table statistics
153
+ 6. Register DuckDB external table
154
+ 7. File ready for ingestion
155
+
156
+ **Row Count Calculation:**
157
+ - **Parquet**: Exact count from file metadata
158
+ - **CSV**: Count rows (minus header)
159
+ - **JSON**: Count array elements
160
+ - **Fallback**: Estimate from file size if reading fails
161
+
162
+ **Storage Limits:**
163
+ Enforced per subscription tier:
164
+ - Prevents uploads exceeding tier limit
165
+ - Returns HTTP 413 if limit exceeded
166
+ - Check current usage before large uploads
167
+
168
+ **Example Response:**
169
+ ```json
170
+ {
171
+ \"status\": \"success\",
172
+ \"file_id\": \"f123\",
173
+ \"upload_status\": \"uploaded\",
174
+ \"file_size_bytes\": 1048576,
175
+ \"row_count\": 5000,
176
+ \"message\": \"File validated and ready for ingestion\"
177
+ }
178
+ ```
179
+
180
+ **Example Usage:**
181
+ ```bash
182
+ # After uploading file to S3 presigned URL
183
+ curl -X PATCH \"https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123\" \
184
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
185
+ -H \"Content-Type: application/json\" \
186
+ -d '{\"status\": \"uploaded\"}'
187
+ ```
188
+
189
+ **Tips:**
190
+ - Always call this after S3 upload completes
191
+ - Check response for actual row count
192
+ - Storage limit errors (413) mean tier upgrade needed
193
+ - DuckDB registration failures are non-fatal (retried later)
194
+
195
+ **Note:**
196
+ Status updates are included - no credit consumption.
197
+
198
+ Args:
199
+ graph_id (str): Graph database identifier
200
+ file_id (str): File identifier
201
+ token (Union[None, Unset, str]): JWT token for SSE authentication
202
+ authorization (Union[None, Unset, str]):
203
+ body (FileStatusUpdate):
204
+
205
+ Raises:
206
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
207
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
208
+
209
+ Returns:
210
+ Response[Union[Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus]]
211
+ """
212
+
213
+ kwargs = _get_kwargs(
214
+ graph_id=graph_id,
215
+ file_id=file_id,
216
+ body=body,
217
+ token=token,
218
+ authorization=authorization,
219
+ )
220
+
221
+ response = client.get_httpx_client().request(
222
+ **kwargs,
223
+ )
224
+
225
+ return _build_response(client=client, response=response)
226
+
227
+
228
+ def sync(
229
+ graph_id: str,
230
+ file_id: str,
231
+ *,
232
+ client: AuthenticatedClient,
233
+ body: FileStatusUpdate,
234
+ token: Union[None, Unset, str] = UNSET,
235
+ authorization: Union[None, Unset, str] = UNSET,
236
+ ) -> Optional[
237
+ Union[
238
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
239
+ ]
240
+ ]:
241
+ r""" Update File Upload Status
242
+
243
+ Update file status after upload completes.
244
+
245
+ **Purpose:**
246
+ Mark files as uploaded after successful S3 upload. The backend validates
247
+ the file, calculates size and row count, enforces storage limits, and
248
+ registers the DuckDB table for queries.
249
+
250
+ **Status Values:**
251
+ - `uploaded`: File successfully uploaded to S3 (triggers validation)
252
+ - `disabled`: Exclude file from ingestion
253
+ - `archived`: Soft delete file
254
+
255
+ **What Happens on 'uploaded' Status:**
256
+ 1. Verify file exists in S3
257
+ 2. Calculate actual file size
258
+ 3. Enforce tier storage limits
259
+ 4. Calculate or estimate row count
260
+ 5. Update table statistics
261
+ 6. Register DuckDB external table
262
+ 7. File ready for ingestion
263
+
264
+ **Row Count Calculation:**
265
+ - **Parquet**: Exact count from file metadata
266
+ - **CSV**: Count rows (minus header)
267
+ - **JSON**: Count array elements
268
+ - **Fallback**: Estimate from file size if reading fails
269
+
270
+ **Storage Limits:**
271
+ Enforced per subscription tier:
272
+ - Prevents uploads exceeding tier limit
273
+ - Returns HTTP 413 if limit exceeded
274
+ - Check current usage before large uploads
275
+
276
+ **Example Response:**
277
+ ```json
278
+ {
279
+ \"status\": \"success\",
280
+ \"file_id\": \"f123\",
281
+ \"upload_status\": \"uploaded\",
282
+ \"file_size_bytes\": 1048576,
283
+ \"row_count\": 5000,
284
+ \"message\": \"File validated and ready for ingestion\"
285
+ }
286
+ ```
287
+
288
+ **Example Usage:**
289
+ ```bash
290
+ # After uploading file to S3 presigned URL
291
+ curl -X PATCH \"https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123\" \
292
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
293
+ -H \"Content-Type: application/json\" \
294
+ -d '{\"status\": \"uploaded\"}'
295
+ ```
296
+
297
+ **Tips:**
298
+ - Always call this after S3 upload completes
299
+ - Check response for actual row count
300
+ - Storage limit errors (413) mean tier upgrade needed
301
+ - DuckDB registration failures are non-fatal (retried later)
302
+
303
+ **Note:**
304
+ Status updates are included - no credit consumption.
305
+
306
+ Args:
307
+ graph_id (str): Graph database identifier
308
+ file_id (str): File identifier
309
+ token (Union[None, Unset, str]): JWT token for SSE authentication
310
+ authorization (Union[None, Unset, str]):
311
+ body (FileStatusUpdate):
312
+
313
+ Raises:
314
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
315
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
316
+
317
+ Returns:
318
+ Union[Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus]
319
+ """
320
+
321
+ return sync_detailed(
322
+ graph_id=graph_id,
323
+ file_id=file_id,
324
+ client=client,
325
+ body=body,
326
+ token=token,
327
+ authorization=authorization,
328
+ ).parsed
329
+
330
+
331
+ async def asyncio_detailed(
332
+ graph_id: str,
333
+ file_id: str,
334
+ *,
335
+ client: AuthenticatedClient,
336
+ body: FileStatusUpdate,
337
+ token: Union[None, Unset, str] = UNSET,
338
+ authorization: Union[None, Unset, str] = UNSET,
339
+ ) -> Response[
340
+ Union[
341
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
342
+ ]
343
+ ]:
344
+ r""" Update File Upload Status
345
+
346
+ Update file status after upload completes.
347
+
348
+ **Purpose:**
349
+ Mark files as uploaded after successful S3 upload. The backend validates
350
+ the file, calculates size and row count, enforces storage limits, and
351
+ registers the DuckDB table for queries.
352
+
353
+ **Status Values:**
354
+ - `uploaded`: File successfully uploaded to S3 (triggers validation)
355
+ - `disabled`: Exclude file from ingestion
356
+ - `archived`: Soft delete file
357
+
358
+ **What Happens on 'uploaded' Status:**
359
+ 1. Verify file exists in S3
360
+ 2. Calculate actual file size
361
+ 3. Enforce tier storage limits
362
+ 4. Calculate or estimate row count
363
+ 5. Update table statistics
364
+ 6. Register DuckDB external table
365
+ 7. File ready for ingestion
366
+
367
+ **Row Count Calculation:**
368
+ - **Parquet**: Exact count from file metadata
369
+ - **CSV**: Count rows (minus header)
370
+ - **JSON**: Count array elements
371
+ - **Fallback**: Estimate from file size if reading fails
372
+
373
+ **Storage Limits:**
374
+ Enforced per subscription tier:
375
+ - Prevents uploads exceeding tier limit
376
+ - Returns HTTP 413 if limit exceeded
377
+ - Check current usage before large uploads
378
+
379
+ **Example Response:**
380
+ ```json
381
+ {
382
+ \"status\": \"success\",
383
+ \"file_id\": \"f123\",
384
+ \"upload_status\": \"uploaded\",
385
+ \"file_size_bytes\": 1048576,
386
+ \"row_count\": 5000,
387
+ \"message\": \"File validated and ready for ingestion\"
388
+ }
389
+ ```
390
+
391
+ **Example Usage:**
392
+ ```bash
393
+ # After uploading file to S3 presigned URL
394
+ curl -X PATCH \"https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123\" \
395
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
396
+ -H \"Content-Type: application/json\" \
397
+ -d '{\"status\": \"uploaded\"}'
398
+ ```
399
+
400
+ **Tips:**
401
+ - Always call this after S3 upload completes
402
+ - Check response for actual row count
403
+ - Storage limit errors (413) mean tier upgrade needed
404
+ - DuckDB registration failures are non-fatal (retried later)
405
+
406
+ **Note:**
407
+ Status updates are included - no credit consumption.
408
+
409
+ Args:
410
+ graph_id (str): Graph database identifier
411
+ file_id (str): File identifier
412
+ token (Union[None, Unset, str]): JWT token for SSE authentication
413
+ authorization (Union[None, Unset, str]):
414
+ body (FileStatusUpdate):
415
+
416
+ Raises:
417
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
418
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
419
+
420
+ Returns:
421
+ Response[Union[Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus]]
422
+ """
423
+
424
+ kwargs = _get_kwargs(
425
+ graph_id=graph_id,
426
+ file_id=file_id,
427
+ body=body,
428
+ token=token,
429
+ authorization=authorization,
430
+ )
431
+
432
+ response = await client.get_async_httpx_client().request(**kwargs)
433
+
434
+ return _build_response(client=client, response=response)
435
+
436
+
437
+ async def asyncio(
438
+ graph_id: str,
439
+ file_id: str,
440
+ *,
441
+ client: AuthenticatedClient,
442
+ body: FileStatusUpdate,
443
+ token: Union[None, Unset, str] = UNSET,
444
+ authorization: Union[None, Unset, str] = UNSET,
445
+ ) -> Optional[
446
+ Union[
447
+ Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus
448
+ ]
449
+ ]:
450
+ r""" Update File Upload Status
451
+
452
+ Update file status after upload completes.
453
+
454
+ **Purpose:**
455
+ Mark files as uploaded after successful S3 upload. The backend validates
456
+ the file, calculates size and row count, enforces storage limits, and
457
+ registers the DuckDB table for queries.
458
+
459
+ **Status Values:**
460
+ - `uploaded`: File successfully uploaded to S3 (triggers validation)
461
+ - `disabled`: Exclude file from ingestion
462
+ - `archived`: Soft delete file
463
+
464
+ **What Happens on 'uploaded' Status:**
465
+ 1. Verify file exists in S3
466
+ 2. Calculate actual file size
467
+ 3. Enforce tier storage limits
468
+ 4. Calculate or estimate row count
469
+ 5. Update table statistics
470
+ 6. Register DuckDB external table
471
+ 7. File ready for ingestion
472
+
473
+ **Row Count Calculation:**
474
+ - **Parquet**: Exact count from file metadata
475
+ - **CSV**: Count rows (minus header)
476
+ - **JSON**: Count array elements
477
+ - **Fallback**: Estimate from file size if reading fails
478
+
479
+ **Storage Limits:**
480
+ Enforced per subscription tier:
481
+ - Prevents uploads exceeding tier limit
482
+ - Returns HTTP 413 if limit exceeded
483
+ - Check current usage before large uploads
484
+
485
+ **Example Response:**
486
+ ```json
487
+ {
488
+ \"status\": \"success\",
489
+ \"file_id\": \"f123\",
490
+ \"upload_status\": \"uploaded\",
491
+ \"file_size_bytes\": 1048576,
492
+ \"row_count\": 5000,
493
+ \"message\": \"File validated and ready for ingestion\"
494
+ }
495
+ ```
496
+
497
+ **Example Usage:**
498
+ ```bash
499
+ # After uploading file to S3 presigned URL
500
+ curl -X PATCH \"https://api.robosystems.ai/v1/graphs/kg123/tables/files/f123\" \
501
+ -H \"Authorization: Bearer YOUR_TOKEN\" \
502
+ -H \"Content-Type: application/json\" \
503
+ -d '{\"status\": \"uploaded\"}'
504
+ ```
505
+
506
+ **Tips:**
507
+ - Always call this after S3 upload completes
508
+ - Check response for actual row count
509
+ - Storage limit errors (413) mean tier upgrade needed
510
+ - DuckDB registration failures are non-fatal (retried later)
511
+
512
+ **Note:**
513
+ Status updates are included - no credit consumption.
514
+
515
+ Args:
516
+ graph_id (str): Graph database identifier
517
+ file_id (str): File identifier
518
+ token (Union[None, Unset, str]): JWT token for SSE authentication
519
+ authorization (Union[None, Unset, str]):
520
+ body (FileStatusUpdate):
521
+
522
+ Raises:
523
+ errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
524
+ httpx.TimeoutException: If the request takes longer than Client.timeout.
525
+
526
+ Returns:
527
+ Union[Any, ErrorResponse, HTTPValidationError, UpdateFileStatusResponseUpdatefilestatus]
528
+ """
529
+
530
+ return (
531
+ await asyncio_detailed(
532
+ graph_id=graph_id,
533
+ file_id=file_id,
534
+ client=client,
535
+ body=body,
536
+ token=token,
537
+ authorization=authorization,
538
+ )
539
+ ).parsed
@@ -58,6 +58,7 @@ class GraphClient:
58
58
  self,
59
59
  metadata: GraphMetadata,
60
60
  initial_entity: Optional[InitialEntityData] = None,
61
+ create_entity: bool = True,
61
62
  timeout: int = 60,
62
63
  poll_interval: int = 2,
63
64
  on_progress: Optional[Callable[[str], None]] = None,
@@ -68,6 +69,9 @@ class GraphClient:
68
69
  Args:
69
70
  metadata: Graph metadata
70
71
  initial_entity: Optional initial entity data
72
+ create_entity: Whether to create the entity node and upload initial data.
73
+ Only applies when initial_entity is provided. Set to False to create
74
+ graph without populating entity data (useful for file-based ingestion).
71
75
  timeout: Maximum time to wait in seconds
72
76
  poll_interval: Time between status checks in seconds
73
77
  on_progress: Callback for progress updates
@@ -121,6 +125,7 @@ class GraphClient:
121
125
  graph_create = CreateGraphRequest(
122
126
  metadata=api_metadata,
123
127
  initial_entity=initial_entity_dict,
128
+ create_entity=create_entity,
124
129
  )
125
130
 
126
131
  if on_progress:
@@ -11,20 +11,20 @@ import json
11
11
  import logging
12
12
  import httpx
13
13
 
14
- from ..api.tables.get_upload_url_v1_graphs_graph_id_tables_table_name_files_post import (
14
+ from ..api.tables.get_upload_url import (
15
15
  sync_detailed as get_upload_url,
16
16
  )
17
- from ..api.tables.update_file_v1_graphs_graph_id_tables_files_file_id_patch import (
18
- sync_detailed as update_file,
17
+ from ..api.tables.update_file_status import (
18
+ sync_detailed as update_file_status,
19
19
  )
20
- from ..api.tables.list_tables_v1_graphs_graph_id_tables_get import (
20
+ from ..api.tables.list_tables import (
21
21
  sync_detailed as list_tables,
22
22
  )
23
- from ..api.tables.ingest_tables_v1_graphs_graph_id_tables_ingest_post import (
23
+ from ..api.tables.ingest_tables import (
24
24
  sync_detailed as ingest_tables,
25
25
  )
26
26
  from ..models.file_upload_request import FileUploadRequest
27
- from ..models.file_update_request import FileUpdateRequest
27
+ from ..models.file_status_update import FileStatusUpdate
28
28
  from ..models.bulk_ingest_request import BulkIngestRequest
29
29
 
30
30
  logger = logging.getLogger(__name__)
@@ -95,7 +95,7 @@ class TableIngestClient:
95
95
  This method handles the complete 3-step upload process:
96
96
  1. Get presigned upload URL
97
97
  2. Upload file to S3
98
- 3. Update file metadata
98
+ 3. Mark file as 'uploaded' (backend validates, calculates size/row count)
99
99
 
100
100
  Args:
101
101
  graph_id: The graph ID
@@ -104,7 +104,7 @@ class TableIngestClient:
104
104
  options: Upload options
105
105
 
106
106
  Returns:
107
- UploadResult with upload details
107
+ UploadResult with upload details (size/row count calculated by backend)
108
108
  """
109
109
  if options is None:
110
110
  options = UploadOptions()
@@ -216,12 +216,10 @@ class TableIngestClient:
216
216
  # BinaryIO or file-like object
217
217
  file_or_buffer.seek(0)
218
218
  file_content = file_or_buffer.read()
219
- file_size = len(file_content)
220
219
  else:
221
220
  # Read from file path
222
221
  with open(file_path, "rb") as f:
223
222
  file_content = f.read()
224
- file_size = len(file_content)
225
223
 
226
224
  s3_response = self._http_client.put(
227
225
  upload_url,
@@ -230,54 +228,47 @@ class TableIngestClient:
230
228
  )
231
229
  s3_response.raise_for_status()
232
230
 
233
- # Step 3: Get row count and update file metadata
231
+ # Step 3: Mark file as uploaded (backend validates and calculates size/row count)
234
232
  if options.on_progress:
235
- options.on_progress(f"Updating file metadata for {file_name}...")
233
+ options.on_progress(f"Marking {file_name} as uploaded...")
236
234
 
237
- try:
238
- import pyarrow.parquet as pq
239
-
240
- if is_buffer:
241
- # Read from buffer for row count
242
- if hasattr(file_or_buffer, "seek"):
243
- file_or_buffer.seek(0)
244
- parquet_table = pq.read_table(file_or_buffer)
245
- else:
246
- # Read from file path
247
- parquet_table = pq.read_table(file_path)
248
-
249
- row_count = parquet_table.num_rows
250
- except ImportError:
251
- logger.warning(
252
- "pyarrow not installed, row count will be estimated from file size"
253
- )
254
- # Rough estimate: ~100 bytes per row for typical data
255
- row_count = file_size // 100
256
-
257
- metadata_update = FileUpdateRequest(
258
- file_size_bytes=file_size, row_count=row_count
259
- )
235
+ status_update = FileStatusUpdate(status="uploaded")
260
236
 
261
237
  kwargs = {
262
238
  "graph_id": graph_id,
263
239
  "file_id": file_id,
264
240
  "client": client,
265
- "body": metadata_update,
241
+ "body": status_update,
266
242
  }
267
243
 
268
- update_response = update_file(**kwargs)
244
+ update_response = update_file_status(**kwargs)
269
245
 
270
246
  if not update_response.parsed:
247
+ logger.error(
248
+ f"No parsed response from update_file_status. Status code: {update_response.status_code}"
249
+ )
271
250
  return UploadResult(
272
251
  file_id=file_id,
273
- file_size=file_size,
274
- row_count=row_count,
252
+ file_size=0,
253
+ row_count=0,
275
254
  table_name=table_name,
276
255
  file_name=file_name,
277
256
  success=False,
278
- error="Failed to update file metadata",
257
+ error="Failed to complete file upload",
279
258
  )
280
259
 
260
+ response_data = update_response.parsed
261
+
262
+ if isinstance(response_data, dict):
263
+ file_size = response_data.get("file_size_bytes", 0)
264
+ row_count = response_data.get("row_count", 0)
265
+ elif hasattr(response_data, "additional_properties"):
266
+ file_size = response_data.additional_properties.get("file_size_bytes", 0)
267
+ row_count = response_data.additional_properties.get("row_count", 0)
268
+ else:
269
+ file_size = getattr(response_data, "file_size_bytes", 0)
270
+ row_count = getattr(response_data, "row_count", 0)
271
+
281
272
  if options.on_progress:
282
273
  options.on_progress(
283
274
  f"✅ Uploaded {file_name} ({file_size:,} bytes, {row_count:,} rows)"