robosystems-client 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of robosystems-client might be problematic. Click here for more details.
- robosystems_client/api/copy/copy_data_to_graph.py +180 -15
- robosystems_client/extensions/README.md +211 -0
- robosystems_client/extensions/__init__.py +29 -0
- robosystems_client/extensions/copy_client.py +469 -0
- robosystems_client/extensions/extensions.py +17 -0
- robosystems_client/models/copy_response.py +60 -8
- robosystems_client/models/copy_response_status.py +1 -0
- robosystems_client/models/s3_copy_request.py +25 -22
- {robosystems_client-0.1.13.dist-info → robosystems_client-0.1.15.dist-info}/METADATA +1 -1
- {robosystems_client-0.1.13.dist-info → robosystems_client-0.1.15.dist-info}/RECORD +11 -10
- {robosystems_client-0.1.13.dist-info → robosystems_client-0.1.15.dist-info}/WHEEL +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from http import HTTPStatus
|
|
2
|
-
from typing import Any, Optional, Union
|
|
2
|
+
from typing import Any, Optional, Union, cast
|
|
3
3
|
|
|
4
4
|
import httpx
|
|
5
5
|
|
|
@@ -50,11 +50,32 @@ def _get_kwargs(
|
|
|
50
50
|
|
|
51
51
|
def _parse_response(
|
|
52
52
|
*, client: Union[AuthenticatedClient, Client], response: httpx.Response
|
|
53
|
-
) -> Optional[Union[CopyResponse, HTTPValidationError]]:
|
|
53
|
+
) -> Optional[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
54
54
|
if response.status_code == 200:
|
|
55
55
|
response_200 = CopyResponse.from_dict(response.json())
|
|
56
56
|
|
|
57
57
|
return response_200
|
|
58
|
+
if response.status_code == 202:
|
|
59
|
+
response_202 = cast(Any, None)
|
|
60
|
+
return response_202
|
|
61
|
+
if response.status_code == 400:
|
|
62
|
+
response_400 = cast(Any, None)
|
|
63
|
+
return response_400
|
|
64
|
+
if response.status_code == 403:
|
|
65
|
+
response_403 = cast(Any, None)
|
|
66
|
+
return response_403
|
|
67
|
+
if response.status_code == 408:
|
|
68
|
+
response_408 = cast(Any, None)
|
|
69
|
+
return response_408
|
|
70
|
+
if response.status_code == 429:
|
|
71
|
+
response_429 = cast(Any, None)
|
|
72
|
+
return response_429
|
|
73
|
+
if response.status_code == 500:
|
|
74
|
+
response_500 = cast(Any, None)
|
|
75
|
+
return response_500
|
|
76
|
+
if response.status_code == 503:
|
|
77
|
+
response_503 = cast(Any, None)
|
|
78
|
+
return response_503
|
|
58
79
|
if response.status_code == 422:
|
|
59
80
|
response_422 = HTTPValidationError.from_dict(response.json())
|
|
60
81
|
|
|
@@ -67,7 +88,7 @@ def _parse_response(
|
|
|
67
88
|
|
|
68
89
|
def _build_response(
|
|
69
90
|
*, client: Union[AuthenticatedClient, Client], response: httpx.Response
|
|
70
|
-
) -> Response[Union[CopyResponse, HTTPValidationError]]:
|
|
91
|
+
) -> Response[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
71
92
|
return Response(
|
|
72
93
|
status_code=HTTPStatus(response.status_code),
|
|
73
94
|
content=response.content,
|
|
@@ -83,7 +104,7 @@ def sync_detailed(
|
|
|
83
104
|
body: Union["DataFrameCopyRequest", "S3CopyRequest", "URLCopyRequest"],
|
|
84
105
|
authorization: Union[None, Unset, str] = UNSET,
|
|
85
106
|
auth_token: Union[None, Unset, str] = UNSET,
|
|
86
|
-
) -> Response[Union[CopyResponse, HTTPValidationError]]:
|
|
107
|
+
) -> Response[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
87
108
|
"""Copy Data to Graph
|
|
88
109
|
|
|
89
110
|
Copy data from external sources into the graph database.
|
|
@@ -105,10 +126,46 @@ def sync_detailed(
|
|
|
105
126
|
- Premium: 100GB max file size, 60 min timeout
|
|
106
127
|
|
|
107
128
|
**Copy Options:**
|
|
108
|
-
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior)
|
|
129
|
+
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior). Note: When enabled,
|
|
130
|
+
row counts may not be accurately reported
|
|
109
131
|
- `extended_timeout`: Use extended timeout for large datasets
|
|
110
132
|
- `validate_schema`: Validate source schema against target table
|
|
111
133
|
|
|
134
|
+
**Asynchronous Execution with SSE:**
|
|
135
|
+
For large data imports, this endpoint returns immediately with an operation ID
|
|
136
|
+
and SSE monitoring endpoint. Connect to the returned stream URL for real-time updates:
|
|
137
|
+
|
|
138
|
+
```javascript
|
|
139
|
+
const eventSource = new EventSource('/v1/operations/{operation_id}/stream');
|
|
140
|
+
eventSource.onmessage = (event) => {
|
|
141
|
+
const data = JSON.parse(event.data);
|
|
142
|
+
console.log('Progress:', data.message);
|
|
143
|
+
};
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
**SSE Events Emitted:**
|
|
147
|
+
- `operation_started`: Copy operation begins
|
|
148
|
+
- `operation_progress`: Progress updates during data transfer
|
|
149
|
+
- `operation_completed`: Copy successful with statistics
|
|
150
|
+
- `operation_error`: Copy failed with error details
|
|
151
|
+
|
|
152
|
+
**SSE Connection Limits:**
|
|
153
|
+
- Maximum 5 concurrent SSE connections per user
|
|
154
|
+
- Rate limited to 10 new connections per minute
|
|
155
|
+
- Automatic circuit breaker for Redis failures
|
|
156
|
+
- Graceful degradation if event system unavailable
|
|
157
|
+
|
|
158
|
+
**Error Handling:**
|
|
159
|
+
- `403 Forbidden`: Attempted copy to shared repository
|
|
160
|
+
- `408 Request Timeout`: Operation exceeded timeout limit
|
|
161
|
+
- `429 Too Many Requests`: Rate limit exceeded
|
|
162
|
+
- `503 Service Unavailable`: Circuit breaker open or service unavailable
|
|
163
|
+
- Clients should implement exponential backoff on errors
|
|
164
|
+
|
|
165
|
+
**Note:**
|
|
166
|
+
Copy operations are FREE - no credit consumption required.
|
|
167
|
+
All copy operations are performed asynchronously with progress monitoring.
|
|
168
|
+
|
|
112
169
|
Args:
|
|
113
170
|
graph_id (str): Target graph identifier (user graphs only - shared repositories not
|
|
114
171
|
allowed)
|
|
@@ -121,7 +178,7 @@ def sync_detailed(
|
|
|
121
178
|
httpx.TimeoutException: If the request takes longer than Client.timeout.
|
|
122
179
|
|
|
123
180
|
Returns:
|
|
124
|
-
Response[Union[CopyResponse, HTTPValidationError]]
|
|
181
|
+
Response[Union[Any, CopyResponse, HTTPValidationError]]
|
|
125
182
|
"""
|
|
126
183
|
|
|
127
184
|
kwargs = _get_kwargs(
|
|
@@ -145,7 +202,7 @@ def sync(
|
|
|
145
202
|
body: Union["DataFrameCopyRequest", "S3CopyRequest", "URLCopyRequest"],
|
|
146
203
|
authorization: Union[None, Unset, str] = UNSET,
|
|
147
204
|
auth_token: Union[None, Unset, str] = UNSET,
|
|
148
|
-
) -> Optional[Union[CopyResponse, HTTPValidationError]]:
|
|
205
|
+
) -> Optional[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
149
206
|
"""Copy Data to Graph
|
|
150
207
|
|
|
151
208
|
Copy data from external sources into the graph database.
|
|
@@ -167,10 +224,46 @@ def sync(
|
|
|
167
224
|
- Premium: 100GB max file size, 60 min timeout
|
|
168
225
|
|
|
169
226
|
**Copy Options:**
|
|
170
|
-
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior)
|
|
227
|
+
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior). Note: When enabled,
|
|
228
|
+
row counts may not be accurately reported
|
|
171
229
|
- `extended_timeout`: Use extended timeout for large datasets
|
|
172
230
|
- `validate_schema`: Validate source schema against target table
|
|
173
231
|
|
|
232
|
+
**Asynchronous Execution with SSE:**
|
|
233
|
+
For large data imports, this endpoint returns immediately with an operation ID
|
|
234
|
+
and SSE monitoring endpoint. Connect to the returned stream URL for real-time updates:
|
|
235
|
+
|
|
236
|
+
```javascript
|
|
237
|
+
const eventSource = new EventSource('/v1/operations/{operation_id}/stream');
|
|
238
|
+
eventSource.onmessage = (event) => {
|
|
239
|
+
const data = JSON.parse(event.data);
|
|
240
|
+
console.log('Progress:', data.message);
|
|
241
|
+
};
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
**SSE Events Emitted:**
|
|
245
|
+
- `operation_started`: Copy operation begins
|
|
246
|
+
- `operation_progress`: Progress updates during data transfer
|
|
247
|
+
- `operation_completed`: Copy successful with statistics
|
|
248
|
+
- `operation_error`: Copy failed with error details
|
|
249
|
+
|
|
250
|
+
**SSE Connection Limits:**
|
|
251
|
+
- Maximum 5 concurrent SSE connections per user
|
|
252
|
+
- Rate limited to 10 new connections per minute
|
|
253
|
+
- Automatic circuit breaker for Redis failures
|
|
254
|
+
- Graceful degradation if event system unavailable
|
|
255
|
+
|
|
256
|
+
**Error Handling:**
|
|
257
|
+
- `403 Forbidden`: Attempted copy to shared repository
|
|
258
|
+
- `408 Request Timeout`: Operation exceeded timeout limit
|
|
259
|
+
- `429 Too Many Requests`: Rate limit exceeded
|
|
260
|
+
- `503 Service Unavailable`: Circuit breaker open or service unavailable
|
|
261
|
+
- Clients should implement exponential backoff on errors
|
|
262
|
+
|
|
263
|
+
**Note:**
|
|
264
|
+
Copy operations are FREE - no credit consumption required.
|
|
265
|
+
All copy operations are performed asynchronously with progress monitoring.
|
|
266
|
+
|
|
174
267
|
Args:
|
|
175
268
|
graph_id (str): Target graph identifier (user graphs only - shared repositories not
|
|
176
269
|
allowed)
|
|
@@ -183,7 +276,7 @@ def sync(
|
|
|
183
276
|
httpx.TimeoutException: If the request takes longer than Client.timeout.
|
|
184
277
|
|
|
185
278
|
Returns:
|
|
186
|
-
Union[CopyResponse, HTTPValidationError]
|
|
279
|
+
Union[Any, CopyResponse, HTTPValidationError]
|
|
187
280
|
"""
|
|
188
281
|
|
|
189
282
|
return sync_detailed(
|
|
@@ -202,7 +295,7 @@ async def asyncio_detailed(
|
|
|
202
295
|
body: Union["DataFrameCopyRequest", "S3CopyRequest", "URLCopyRequest"],
|
|
203
296
|
authorization: Union[None, Unset, str] = UNSET,
|
|
204
297
|
auth_token: Union[None, Unset, str] = UNSET,
|
|
205
|
-
) -> Response[Union[CopyResponse, HTTPValidationError]]:
|
|
298
|
+
) -> Response[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
206
299
|
"""Copy Data to Graph
|
|
207
300
|
|
|
208
301
|
Copy data from external sources into the graph database.
|
|
@@ -224,10 +317,46 @@ async def asyncio_detailed(
|
|
|
224
317
|
- Premium: 100GB max file size, 60 min timeout
|
|
225
318
|
|
|
226
319
|
**Copy Options:**
|
|
227
|
-
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior)
|
|
320
|
+
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior). Note: When enabled,
|
|
321
|
+
row counts may not be accurately reported
|
|
228
322
|
- `extended_timeout`: Use extended timeout for large datasets
|
|
229
323
|
- `validate_schema`: Validate source schema against target table
|
|
230
324
|
|
|
325
|
+
**Asynchronous Execution with SSE:**
|
|
326
|
+
For large data imports, this endpoint returns immediately with an operation ID
|
|
327
|
+
and SSE monitoring endpoint. Connect to the returned stream URL for real-time updates:
|
|
328
|
+
|
|
329
|
+
```javascript
|
|
330
|
+
const eventSource = new EventSource('/v1/operations/{operation_id}/stream');
|
|
331
|
+
eventSource.onmessage = (event) => {
|
|
332
|
+
const data = JSON.parse(event.data);
|
|
333
|
+
console.log('Progress:', data.message);
|
|
334
|
+
};
|
|
335
|
+
```
|
|
336
|
+
|
|
337
|
+
**SSE Events Emitted:**
|
|
338
|
+
- `operation_started`: Copy operation begins
|
|
339
|
+
- `operation_progress`: Progress updates during data transfer
|
|
340
|
+
- `operation_completed`: Copy successful with statistics
|
|
341
|
+
- `operation_error`: Copy failed with error details
|
|
342
|
+
|
|
343
|
+
**SSE Connection Limits:**
|
|
344
|
+
- Maximum 5 concurrent SSE connections per user
|
|
345
|
+
- Rate limited to 10 new connections per minute
|
|
346
|
+
- Automatic circuit breaker for Redis failures
|
|
347
|
+
- Graceful degradation if event system unavailable
|
|
348
|
+
|
|
349
|
+
**Error Handling:**
|
|
350
|
+
- `403 Forbidden`: Attempted copy to shared repository
|
|
351
|
+
- `408 Request Timeout`: Operation exceeded timeout limit
|
|
352
|
+
- `429 Too Many Requests`: Rate limit exceeded
|
|
353
|
+
- `503 Service Unavailable`: Circuit breaker open or service unavailable
|
|
354
|
+
- Clients should implement exponential backoff on errors
|
|
355
|
+
|
|
356
|
+
**Note:**
|
|
357
|
+
Copy operations are FREE - no credit consumption required.
|
|
358
|
+
All copy operations are performed asynchronously with progress monitoring.
|
|
359
|
+
|
|
231
360
|
Args:
|
|
232
361
|
graph_id (str): Target graph identifier (user graphs only - shared repositories not
|
|
233
362
|
allowed)
|
|
@@ -240,7 +369,7 @@ async def asyncio_detailed(
|
|
|
240
369
|
httpx.TimeoutException: If the request takes longer than Client.timeout.
|
|
241
370
|
|
|
242
371
|
Returns:
|
|
243
|
-
Response[Union[CopyResponse, HTTPValidationError]]
|
|
372
|
+
Response[Union[Any, CopyResponse, HTTPValidationError]]
|
|
244
373
|
"""
|
|
245
374
|
|
|
246
375
|
kwargs = _get_kwargs(
|
|
@@ -262,7 +391,7 @@ async def asyncio(
|
|
|
262
391
|
body: Union["DataFrameCopyRequest", "S3CopyRequest", "URLCopyRequest"],
|
|
263
392
|
authorization: Union[None, Unset, str] = UNSET,
|
|
264
393
|
auth_token: Union[None, Unset, str] = UNSET,
|
|
265
|
-
) -> Optional[Union[CopyResponse, HTTPValidationError]]:
|
|
394
|
+
) -> Optional[Union[Any, CopyResponse, HTTPValidationError]]:
|
|
266
395
|
"""Copy Data to Graph
|
|
267
396
|
|
|
268
397
|
Copy data from external sources into the graph database.
|
|
@@ -284,10 +413,46 @@ async def asyncio(
|
|
|
284
413
|
- Premium: 100GB max file size, 60 min timeout
|
|
285
414
|
|
|
286
415
|
**Copy Options:**
|
|
287
|
-
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior)
|
|
416
|
+
- `ignore_errors`: Skip duplicate/invalid rows (enables upsert-like behavior). Note: When enabled,
|
|
417
|
+
row counts may not be accurately reported
|
|
288
418
|
- `extended_timeout`: Use extended timeout for large datasets
|
|
289
419
|
- `validate_schema`: Validate source schema against target table
|
|
290
420
|
|
|
421
|
+
**Asynchronous Execution with SSE:**
|
|
422
|
+
For large data imports, this endpoint returns immediately with an operation ID
|
|
423
|
+
and SSE monitoring endpoint. Connect to the returned stream URL for real-time updates:
|
|
424
|
+
|
|
425
|
+
```javascript
|
|
426
|
+
const eventSource = new EventSource('/v1/operations/{operation_id}/stream');
|
|
427
|
+
eventSource.onmessage = (event) => {
|
|
428
|
+
const data = JSON.parse(event.data);
|
|
429
|
+
console.log('Progress:', data.message);
|
|
430
|
+
};
|
|
431
|
+
```
|
|
432
|
+
|
|
433
|
+
**SSE Events Emitted:**
|
|
434
|
+
- `operation_started`: Copy operation begins
|
|
435
|
+
- `operation_progress`: Progress updates during data transfer
|
|
436
|
+
- `operation_completed`: Copy successful with statistics
|
|
437
|
+
- `operation_error`: Copy failed with error details
|
|
438
|
+
|
|
439
|
+
**SSE Connection Limits:**
|
|
440
|
+
- Maximum 5 concurrent SSE connections per user
|
|
441
|
+
- Rate limited to 10 new connections per minute
|
|
442
|
+
- Automatic circuit breaker for Redis failures
|
|
443
|
+
- Graceful degradation if event system unavailable
|
|
444
|
+
|
|
445
|
+
**Error Handling:**
|
|
446
|
+
- `403 Forbidden`: Attempted copy to shared repository
|
|
447
|
+
- `408 Request Timeout`: Operation exceeded timeout limit
|
|
448
|
+
- `429 Too Many Requests`: Rate limit exceeded
|
|
449
|
+
- `503 Service Unavailable`: Circuit breaker open or service unavailable
|
|
450
|
+
- Clients should implement exponential backoff on errors
|
|
451
|
+
|
|
452
|
+
**Note:**
|
|
453
|
+
Copy operations are FREE - no credit consumption required.
|
|
454
|
+
All copy operations are performed asynchronously with progress monitoring.
|
|
455
|
+
|
|
291
456
|
Args:
|
|
292
457
|
graph_id (str): Target graph identifier (user graphs only - shared repositories not
|
|
293
458
|
allowed)
|
|
@@ -300,7 +465,7 @@ async def asyncio(
|
|
|
300
465
|
httpx.TimeoutException: If the request takes longer than Client.timeout.
|
|
301
466
|
|
|
302
467
|
Returns:
|
|
303
|
-
Union[CopyResponse, HTTPValidationError]
|
|
468
|
+
Union[Any, CopyResponse, HTTPValidationError]
|
|
304
469
|
"""
|
|
305
470
|
|
|
306
471
|
return (
|
|
@@ -11,6 +11,7 @@ The RoboSystems Python Client Extensions provide enhanced functionality for the
|
|
|
11
11
|
|
|
12
12
|
- **Server-Sent Events (SSE)** streaming with automatic reconnection
|
|
13
13
|
- **Smart Query Execution** with automatic strategy selection
|
|
14
|
+
- **Data Copy Operations** with S3 import and real-time progress tracking
|
|
14
15
|
- **Operation Monitoring** for long-running operations
|
|
15
16
|
- **Connection Pooling** and intelligent resource management
|
|
16
17
|
- **Result Processing** and format conversion utilities
|
|
@@ -84,6 +85,62 @@ async def main():
|
|
|
84
85
|
asyncio.run(main())
|
|
85
86
|
```
|
|
86
87
|
|
|
88
|
+
### Data Copy Operations
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from robosystems_client.extensions import CopyClient, CopyOptions
|
|
92
|
+
from robosystems_client.models.s3_copy_request import S3CopyRequest
|
|
93
|
+
from robosystems_client.models.s3_copy_request_file_format import S3CopyRequestFileFormat
|
|
94
|
+
|
|
95
|
+
# Initialize copy client
|
|
96
|
+
copy_client = CopyClient({
|
|
97
|
+
"base_url": "https://api.robosystems.ai",
|
|
98
|
+
"api_key": "your-api-key",
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
# Create S3 copy request
|
|
102
|
+
request = S3CopyRequest(
|
|
103
|
+
table_name="companies",
|
|
104
|
+
s3_path="s3://my-bucket/data/companies.csv",
|
|
105
|
+
s3_access_key_id="AWS_ACCESS_KEY",
|
|
106
|
+
s3_secret_access_key="AWS_SECRET_KEY",
|
|
107
|
+
s3_region="us-east-1",
|
|
108
|
+
file_format=S3CopyRequestFileFormat.CSV,
|
|
109
|
+
ignore_errors=False, # Stop on first error
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Set up progress callbacks
|
|
113
|
+
def on_progress(message, percent):
|
|
114
|
+
if percent:
|
|
115
|
+
print(f"Progress: {message} ({percent}%)")
|
|
116
|
+
else:
|
|
117
|
+
print(f"Progress: {message}")
|
|
118
|
+
|
|
119
|
+
def on_warning(warning):
|
|
120
|
+
print(f"Warning: {warning}")
|
|
121
|
+
|
|
122
|
+
options = CopyOptions(
|
|
123
|
+
on_progress=on_progress,
|
|
124
|
+
on_warning=on_warning,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Execute copy with progress monitoring
|
|
128
|
+
result = copy_client.copy_from_s3("your_graph_id", request, options)
|
|
129
|
+
|
|
130
|
+
# Check results
|
|
131
|
+
if result.status == "completed":
|
|
132
|
+
print(f"✅ Successfully imported {result.rows_imported:,} rows")
|
|
133
|
+
stats = copy_client.calculate_statistics(result)
|
|
134
|
+
if stats:
|
|
135
|
+
print(f"Throughput: {stats.throughput:.2f} rows/second")
|
|
136
|
+
elif result.status == "partial":
|
|
137
|
+
print(f"⚠️ Imported {result.rows_imported:,} rows, skipped {result.rows_skipped:,}")
|
|
138
|
+
else:
|
|
139
|
+
print(f"❌ Copy failed: {result.error}")
|
|
140
|
+
|
|
141
|
+
copy_client.close()
|
|
142
|
+
```
|
|
143
|
+
|
|
87
144
|
## 🔐 Authentication
|
|
88
145
|
|
|
89
146
|
### API Key Authentication (Recommended)
|
|
@@ -141,6 +198,79 @@ dev_ext = create_extensions(
|
|
|
141
198
|
|
|
142
199
|
## 🛠 Advanced Features
|
|
143
200
|
|
|
201
|
+
### Copy Operations with Advanced Features
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
from robosystems_client.extensions import CopyClient, CopySourceType
|
|
205
|
+
|
|
206
|
+
# Batch copy multiple tables
|
|
207
|
+
copy_client = CopyClient({
|
|
208
|
+
"base_url": "https://api.robosystems.ai",
|
|
209
|
+
"api_key": "your-api-key",
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
copies = [
|
|
213
|
+
{
|
|
214
|
+
"request": S3CopyRequest(
|
|
215
|
+
table_name="companies",
|
|
216
|
+
s3_path="s3://bucket/companies.csv",
|
|
217
|
+
s3_access_key_id="KEY",
|
|
218
|
+
s3_secret_access_key="SECRET",
|
|
219
|
+
file_format=S3CopyRequestFileFormat.CSV,
|
|
220
|
+
),
|
|
221
|
+
},
|
|
222
|
+
{
|
|
223
|
+
"request": S3CopyRequest(
|
|
224
|
+
table_name="transactions",
|
|
225
|
+
s3_path="s3://bucket/transactions.parquet",
|
|
226
|
+
s3_access_key_id="KEY",
|
|
227
|
+
s3_secret_access_key="SECRET",
|
|
228
|
+
file_format=S3CopyRequestFileFormat.PARQUET,
|
|
229
|
+
ignore_errors=True, # Continue on errors
|
|
230
|
+
),
|
|
231
|
+
},
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
# Execute batch copy
|
|
235
|
+
results = copy_client.batch_copy_from_s3("graph_id", copies)
|
|
236
|
+
|
|
237
|
+
for i, result in enumerate(results):
|
|
238
|
+
table_name = copies[i]["request"].table_name
|
|
239
|
+
print(f"{table_name}: {result.status}")
|
|
240
|
+
if result.rows_imported:
|
|
241
|
+
print(f" Imported: {result.rows_imported:,} rows")
|
|
242
|
+
|
|
243
|
+
# Copy with retry logic for resilient operations
|
|
244
|
+
result = copy_client.copy_with_retry(
|
|
245
|
+
graph_id="graph_id",
|
|
246
|
+
request=S3CopyRequest(
|
|
247
|
+
table_name="large_dataset",
|
|
248
|
+
s3_path="s3://bucket/large-dataset.csv",
|
|
249
|
+
s3_access_key_id="KEY",
|
|
250
|
+
s3_secret_access_key="SECRET",
|
|
251
|
+
max_file_size_gb=50,
|
|
252
|
+
extended_timeout=True,
|
|
253
|
+
),
|
|
254
|
+
source_type=CopySourceType.S3,
|
|
255
|
+
max_retries=3,
|
|
256
|
+
options=CopyOptions(
|
|
257
|
+
on_progress=lambda msg, _: print(msg)
|
|
258
|
+
),
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Monitor multiple concurrent copy operations
|
|
262
|
+
operation_ids = ["op-123", "op-456", "op-789"]
|
|
263
|
+
results = copy_client.monitor_multiple_copies(operation_ids, options)
|
|
264
|
+
|
|
265
|
+
for op_id, result in results.items():
|
|
266
|
+
print(f"Operation {op_id}: {result.status}")
|
|
267
|
+
if result.status == "completed":
|
|
268
|
+
stats = copy_client.calculate_statistics(result)
|
|
269
|
+
print(f" Throughput: {stats.throughput:.2f} rows/sec")
|
|
270
|
+
|
|
271
|
+
copy_client.close()
|
|
272
|
+
```
|
|
273
|
+
|
|
144
274
|
### Query Builder
|
|
145
275
|
|
|
146
276
|
Build complex Cypher queries programmatically:
|
|
@@ -273,6 +403,87 @@ client.close()
|
|
|
273
403
|
|
|
274
404
|
## 📊 Examples
|
|
275
405
|
|
|
406
|
+
### Data Import with Real-Time Monitoring
|
|
407
|
+
|
|
408
|
+
```python
|
|
409
|
+
from robosystems_client.extensions import CopyClient, CopyOptions
|
|
410
|
+
import time
|
|
411
|
+
|
|
412
|
+
def import_financial_data():
|
|
413
|
+
"""Import financial data with comprehensive monitoring"""
|
|
414
|
+
|
|
415
|
+
copy_client = CopyClient({
|
|
416
|
+
"base_url": "https://api.robosystems.ai",
|
|
417
|
+
"api_key": "your-api-key",
|
|
418
|
+
})
|
|
419
|
+
|
|
420
|
+
# Track progress history
|
|
421
|
+
progress_history = []
|
|
422
|
+
warnings_count = 0
|
|
423
|
+
|
|
424
|
+
def on_progress(message, percent):
|
|
425
|
+
timestamp = time.strftime("%H:%M:%S")
|
|
426
|
+
progress_history.append({
|
|
427
|
+
"time": timestamp,
|
|
428
|
+
"message": message,
|
|
429
|
+
"percent": percent,
|
|
430
|
+
})
|
|
431
|
+
print(f"[{timestamp}] {message}" + (f" ({percent}%)" if percent else ""))
|
|
432
|
+
|
|
433
|
+
def on_warning(warning):
|
|
434
|
+
nonlocal warnings_count
|
|
435
|
+
warnings_count += 1
|
|
436
|
+
print(f"⚠️ Warning #{warnings_count}: {warning}")
|
|
437
|
+
|
|
438
|
+
def on_queue_update(position, wait_time):
|
|
439
|
+
print(f"📊 Queue position: {position} (ETA: {wait_time}s)")
|
|
440
|
+
|
|
441
|
+
# Configure copy with all callbacks
|
|
442
|
+
options = CopyOptions(
|
|
443
|
+
on_progress=on_progress,
|
|
444
|
+
on_warning=on_warning,
|
|
445
|
+
on_queue_update=on_queue_update,
|
|
446
|
+
timeout=1800000, # 30 minutes
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Execute copy operation
|
|
450
|
+
start_time = time.time()
|
|
451
|
+
|
|
452
|
+
result = copy_client.copy_s3(
|
|
453
|
+
graph_id="financial_graph",
|
|
454
|
+
table_name="quarterly_reports",
|
|
455
|
+
s3_path="s3://financial-data/reports-2024-q1.parquet",
|
|
456
|
+
access_key_id="AWS_KEY",
|
|
457
|
+
secret_access_key="AWS_SECRET",
|
|
458
|
+
file_format="parquet",
|
|
459
|
+
ignore_errors=True, # Continue on validation errors
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Print summary
|
|
463
|
+
elapsed = time.time() - start_time
|
|
464
|
+
|
|
465
|
+
print("\n" + "="*50)
|
|
466
|
+
print("📈 IMPORT SUMMARY")
|
|
467
|
+
print("="*50)
|
|
468
|
+
print(f"Status: {result.status.upper()}")
|
|
469
|
+
print(f"Rows Imported: {result.rows_imported or 0:,}")
|
|
470
|
+
print(f"Rows Skipped: {result.rows_skipped or 0:,}")
|
|
471
|
+
print(f"Warnings: {warnings_count}")
|
|
472
|
+
print(f"Execution Time: {elapsed:.2f} seconds")
|
|
473
|
+
|
|
474
|
+
if result.status == "completed":
|
|
475
|
+
stats = copy_client.calculate_statistics(result)
|
|
476
|
+
if stats:
|
|
477
|
+
print(f"Throughput: {stats.throughput:.2f} rows/second")
|
|
478
|
+
print(f"Data Processed: {stats.bytes_processed / (1024*1024):.2f} MB")
|
|
479
|
+
|
|
480
|
+
copy_client.close()
|
|
481
|
+
return result
|
|
482
|
+
|
|
483
|
+
# Run the import
|
|
484
|
+
result = import_financial_data()
|
|
485
|
+
```
|
|
486
|
+
|
|
276
487
|
### Financial Data Analysis
|
|
277
488
|
|
|
278
489
|
```python
|
|
@@ -20,6 +20,14 @@ from .operation_client import (
|
|
|
20
20
|
OperationProgress,
|
|
21
21
|
OperationResult,
|
|
22
22
|
)
|
|
23
|
+
from .copy_client import (
|
|
24
|
+
CopyClient,
|
|
25
|
+
AsyncCopyClient,
|
|
26
|
+
CopySourceType,
|
|
27
|
+
CopyOptions,
|
|
28
|
+
CopyResult,
|
|
29
|
+
CopyStatistics,
|
|
30
|
+
)
|
|
23
31
|
from .extensions import (
|
|
24
32
|
RoboSystemsExtensions,
|
|
25
33
|
RoboSystemsExtensionConfig,
|
|
@@ -68,6 +76,13 @@ __all__ = [
|
|
|
68
76
|
"OperationStatus",
|
|
69
77
|
"OperationProgress",
|
|
70
78
|
"OperationResult",
|
|
79
|
+
# Copy Client
|
|
80
|
+
"CopyClient",
|
|
81
|
+
"AsyncCopyClient",
|
|
82
|
+
"CopySourceType",
|
|
83
|
+
"CopyOptions",
|
|
84
|
+
"CopyResult",
|
|
85
|
+
"CopyStatistics",
|
|
71
86
|
# Utilities
|
|
72
87
|
"QueryBuilder",
|
|
73
88
|
"ResultProcessor",
|
|
@@ -106,3 +121,17 @@ def execute_query(graph_id: str, query: str, parameters=None):
|
|
|
106
121
|
def stream_query(graph_id: str, query: str, parameters=None, chunk_size=None):
|
|
107
122
|
"""Stream a query using the default extensions instance"""
|
|
108
123
|
return extensions.query.stream_query(graph_id, query, parameters, chunk_size)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def copy_from_s3(
|
|
127
|
+
graph_id: str,
|
|
128
|
+
table_name: str,
|
|
129
|
+
s3_path: str,
|
|
130
|
+
access_key_id: str,
|
|
131
|
+
secret_access_key: str,
|
|
132
|
+
**kwargs,
|
|
133
|
+
):
|
|
134
|
+
"""Copy data from S3 using the default extensions instance"""
|
|
135
|
+
return extensions.copy_from_s3(
|
|
136
|
+
graph_id, table_name, s3_path, access_key_id, secret_access_key, **kwargs
|
|
137
|
+
)
|
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
"""Enhanced Copy Client with SSE support
|
|
2
|
+
|
|
3
|
+
Provides intelligent data copy operations with progress monitoring.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Dict, Any, Optional, Callable, Union, List
|
|
8
|
+
from enum import Enum
|
|
9
|
+
import time
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
from ..api.copy.copy_data_to_graph import sync_detailed as copy_data_to_graph
|
|
13
|
+
from ..models.s3_copy_request import S3CopyRequest
|
|
14
|
+
from ..models.url_copy_request import URLCopyRequest
|
|
15
|
+
from ..models.data_frame_copy_request import DataFrameCopyRequest
|
|
16
|
+
from ..models.copy_response import CopyResponse
|
|
17
|
+
from ..models.copy_response_status import CopyResponseStatus
|
|
18
|
+
from ..models.s3_copy_request_file_format import S3CopyRequestFileFormat
|
|
19
|
+
from .sse_client import SSEClient, AsyncSSEClient, SSEConfig, EventType
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class CopySourceType(Enum):
|
|
25
|
+
"""Types of copy sources"""
|
|
26
|
+
|
|
27
|
+
S3 = "s3"
|
|
28
|
+
URL = "url"
|
|
29
|
+
DATAFRAME = "dataframe"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class CopyOptions:
|
|
34
|
+
"""Options for copy operations"""
|
|
35
|
+
|
|
36
|
+
on_progress: Optional[Callable[[str, Optional[float]], None]] = None
|
|
37
|
+
on_queue_update: Optional[Callable[[int, int], None]] = None
|
|
38
|
+
on_warning: Optional[Callable[[str], None]] = None
|
|
39
|
+
timeout: Optional[int] = None
|
|
40
|
+
test_mode: Optional[bool] = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class CopyResult:
|
|
45
|
+
"""Result from copy operation"""
|
|
46
|
+
|
|
47
|
+
status: str # 'completed', 'failed', 'partial', 'accepted'
|
|
48
|
+
rows_imported: Optional[int] = None
|
|
49
|
+
rows_skipped: Optional[int] = None
|
|
50
|
+
bytes_processed: Optional[int] = None
|
|
51
|
+
execution_time_ms: Optional[float] = None
|
|
52
|
+
warnings: Optional[List[str]] = None
|
|
53
|
+
error: Optional[str] = None
|
|
54
|
+
operation_id: Optional[str] = None
|
|
55
|
+
sse_url: Optional[str] = None
|
|
56
|
+
message: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class CopyStatistics:
|
|
61
|
+
"""Statistics from copy operation"""
|
|
62
|
+
|
|
63
|
+
total_rows: int
|
|
64
|
+
imported_rows: int
|
|
65
|
+
skipped_rows: int
|
|
66
|
+
bytes_processed: int
|
|
67
|
+
duration: float # seconds
|
|
68
|
+
throughput: float # rows per second
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class CopyClient:
|
|
72
|
+
"""Enhanced copy client with SSE streaming support"""
|
|
73
|
+
|
|
74
|
+
def __init__(self, config: Dict[str, Any]):
|
|
75
|
+
self.config = config
|
|
76
|
+
self.base_url = config["base_url"]
|
|
77
|
+
self.sse_client: Optional[SSEClient] = None
|
|
78
|
+
|
|
79
|
+
# Get client authentication if provided
|
|
80
|
+
self.auth_token = config.get("auth_token")
|
|
81
|
+
self.api_key = config.get("api_key")
|
|
82
|
+
|
|
83
|
+
def copy_from_s3(
|
|
84
|
+
self, graph_id: str, request: S3CopyRequest, options: Optional[CopyOptions] = None
|
|
85
|
+
) -> CopyResult:
|
|
86
|
+
"""Copy data from S3 to graph database"""
|
|
87
|
+
return self._execute_copy(graph_id, request, CopySourceType.S3, options)
|
|
88
|
+
|
|
89
|
+
def copy_from_url(
|
|
90
|
+
self, graph_id: str, request: URLCopyRequest, options: Optional[CopyOptions] = None
|
|
91
|
+
) -> CopyResult:
|
|
92
|
+
"""Copy data from URL to graph database (when available)"""
|
|
93
|
+
return self._execute_copy(graph_id, request, CopySourceType.URL, options)
|
|
94
|
+
|
|
95
|
+
def copy_from_dataframe(
|
|
96
|
+
self,
|
|
97
|
+
graph_id: str,
|
|
98
|
+
request: DataFrameCopyRequest,
|
|
99
|
+
options: Optional[CopyOptions] = None,
|
|
100
|
+
) -> CopyResult:
|
|
101
|
+
"""Copy data from DataFrame to graph database (when available)"""
|
|
102
|
+
return self._execute_copy(graph_id, request, CopySourceType.DATAFRAME, options)
|
|
103
|
+
|
|
104
|
+
def _execute_copy(
|
|
105
|
+
self,
|
|
106
|
+
graph_id: str,
|
|
107
|
+
request: Union[S3CopyRequest, URLCopyRequest, DataFrameCopyRequest],
|
|
108
|
+
source_type: CopySourceType,
|
|
109
|
+
options: Optional[CopyOptions] = None,
|
|
110
|
+
) -> CopyResult:
|
|
111
|
+
"""Execute copy operation with automatic SSE monitoring for long-running operations"""
|
|
112
|
+
if options is None:
|
|
113
|
+
options = CopyOptions()
|
|
114
|
+
|
|
115
|
+
start_time = time.time()
|
|
116
|
+
|
|
117
|
+
# Import client here to avoid circular imports
|
|
118
|
+
from ..client import AuthenticatedClient
|
|
119
|
+
|
|
120
|
+
# Create authenticated client
|
|
121
|
+
client = AuthenticatedClient(
|
|
122
|
+
base_url=self.base_url,
|
|
123
|
+
token=self.auth_token,
|
|
124
|
+
headers={"X-API-Key": self.api_key} if self.api_key else None,
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
# Execute the copy request
|
|
129
|
+
response = copy_data_to_graph(graph_id=graph_id, client=client, body=request)
|
|
130
|
+
|
|
131
|
+
if response.parsed:
|
|
132
|
+
response_data: CopyResponse = response.parsed
|
|
133
|
+
|
|
134
|
+
# Check if this is an accepted (async) operation
|
|
135
|
+
if (
|
|
136
|
+
response_data.status == CopyResponseStatus.ACCEPTED
|
|
137
|
+
and response_data.operation_id
|
|
138
|
+
):
|
|
139
|
+
# This is a long-running operation with SSE monitoring
|
|
140
|
+
if options.on_progress:
|
|
141
|
+
options.on_progress("Copy operation started. Monitoring progress...", None)
|
|
142
|
+
|
|
143
|
+
# If SSE URL is provided, use it for monitoring
|
|
144
|
+
if response_data.sse_url:
|
|
145
|
+
return self._monitor_copy_operation(
|
|
146
|
+
response_data.operation_id, options, start_time
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Otherwise return the accepted response
|
|
150
|
+
return CopyResult(
|
|
151
|
+
status="accepted",
|
|
152
|
+
operation_id=response_data.operation_id,
|
|
153
|
+
sse_url=response_data.sse_url,
|
|
154
|
+
message=response_data.message,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# This is a synchronous response - operation completed immediately
|
|
158
|
+
return self._build_copy_result(response_data, time.time() - start_time)
|
|
159
|
+
else:
|
|
160
|
+
return CopyResult(
|
|
161
|
+
status="failed",
|
|
162
|
+
error="No response data received",
|
|
163
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
except Exception as e:
|
|
167
|
+
return CopyResult(
|
|
168
|
+
status="failed",
|
|
169
|
+
error=str(e),
|
|
170
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
def _monitor_copy_operation(
|
|
174
|
+
self, operation_id: str, options: CopyOptions, start_time: float
|
|
175
|
+
) -> CopyResult:
|
|
176
|
+
"""Monitor a copy operation using SSE"""
|
|
177
|
+
timeout_ms = options.timeout or 3600000 # Default 1 hour for copy operations
|
|
178
|
+
timeout_time = time.time() + (timeout_ms / 1000)
|
|
179
|
+
|
|
180
|
+
result = CopyResult(status="failed")
|
|
181
|
+
warnings: List[str] = []
|
|
182
|
+
|
|
183
|
+
# Set up SSE connection
|
|
184
|
+
sse_config = SSEConfig(base_url=self.base_url, timeout=timeout_ms // 1000)
|
|
185
|
+
sse_client = SSEClient(sse_config)
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
sse_client.connect(operation_id)
|
|
189
|
+
|
|
190
|
+
# Set up event handlers
|
|
191
|
+
def on_queue_update(data):
|
|
192
|
+
if options.on_queue_update:
|
|
193
|
+
position = data.get("position", data.get("queue_position", 0))
|
|
194
|
+
wait_time = data.get("estimated_wait_seconds", 0)
|
|
195
|
+
options.on_queue_update(position, wait_time)
|
|
196
|
+
|
|
197
|
+
def on_progress(data):
|
|
198
|
+
if options.on_progress:
|
|
199
|
+
message = data.get("message", data.get("status", "Processing..."))
|
|
200
|
+
progress_percent = data.get("progress_percent", data.get("progress"))
|
|
201
|
+
options.on_progress(message, progress_percent)
|
|
202
|
+
|
|
203
|
+
# Check for warnings in progress updates
|
|
204
|
+
if "warnings" in data and data["warnings"]:
|
|
205
|
+
warnings.extend(data["warnings"])
|
|
206
|
+
if options.on_warning:
|
|
207
|
+
for warning in data["warnings"]:
|
|
208
|
+
options.on_warning(warning)
|
|
209
|
+
|
|
210
|
+
def on_completed(data):
|
|
211
|
+
nonlocal result
|
|
212
|
+
completion_data = data.get("result", data)
|
|
213
|
+
result = CopyResult(
|
|
214
|
+
status=completion_data.get("status", "completed"),
|
|
215
|
+
rows_imported=completion_data.get("rows_imported"),
|
|
216
|
+
rows_skipped=completion_data.get("rows_skipped"),
|
|
217
|
+
bytes_processed=completion_data.get("bytes_processed"),
|
|
218
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
219
|
+
warnings=warnings if warnings else completion_data.get("warnings"),
|
|
220
|
+
message=completion_data.get("message"),
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def on_error(data):
|
|
224
|
+
nonlocal result
|
|
225
|
+
result = CopyResult(
|
|
226
|
+
status="failed",
|
|
227
|
+
error=data.get("message", data.get("error", "Copy operation failed")),
|
|
228
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
229
|
+
warnings=warnings if warnings else None,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def on_cancelled(data):
|
|
233
|
+
nonlocal result
|
|
234
|
+
result = CopyResult(
|
|
235
|
+
status="failed",
|
|
236
|
+
error="Copy operation cancelled",
|
|
237
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
238
|
+
warnings=warnings if warnings else None,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Register event handlers
|
|
242
|
+
sse_client.on(EventType.QUEUE_UPDATE.value, on_queue_update)
|
|
243
|
+
sse_client.on(EventType.OPERATION_PROGRESS.value, on_progress)
|
|
244
|
+
sse_client.on(EventType.OPERATION_COMPLETED.value, on_completed)
|
|
245
|
+
sse_client.on(EventType.OPERATION_ERROR.value, on_error)
|
|
246
|
+
sse_client.on(EventType.OPERATION_CANCELLED.value, on_cancelled)
|
|
247
|
+
|
|
248
|
+
# Listen for events until completion or timeout
|
|
249
|
+
while time.time() < timeout_time:
|
|
250
|
+
sse_client.listen(timeout=1) # Process events for 1 second
|
|
251
|
+
|
|
252
|
+
# Check if operation is complete
|
|
253
|
+
if result.status in ["completed", "failed", "partial"]:
|
|
254
|
+
break
|
|
255
|
+
|
|
256
|
+
if time.time() >= timeout_time:
|
|
257
|
+
result = CopyResult(
|
|
258
|
+
status="failed",
|
|
259
|
+
error=f"Copy operation timeout after {timeout_ms}ms",
|
|
260
|
+
execution_time_ms=(time.time() - start_time) * 1000,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
finally:
|
|
264
|
+
sse_client.close()
|
|
265
|
+
|
|
266
|
+
return result
|
|
267
|
+
|
|
268
|
+
def _build_copy_result(
|
|
269
|
+
self, response_data: CopyResponse, execution_time: float
|
|
270
|
+
) -> CopyResult:
|
|
271
|
+
"""Build copy result from response data"""
|
|
272
|
+
return CopyResult(
|
|
273
|
+
status=response_data.status.value,
|
|
274
|
+
rows_imported=response_data.rows_imported,
|
|
275
|
+
rows_skipped=response_data.rows_skipped,
|
|
276
|
+
bytes_processed=response_data.bytes_processed,
|
|
277
|
+
execution_time_ms=response_data.execution_time_ms or (execution_time * 1000),
|
|
278
|
+
warnings=response_data.warnings,
|
|
279
|
+
message=response_data.message,
|
|
280
|
+
error=str(response_data.error_details) if response_data.error_details else None,
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
def calculate_statistics(self, result: CopyResult) -> Optional[CopyStatistics]:
|
|
284
|
+
"""Calculate copy statistics from result"""
|
|
285
|
+
if result.status == "failed" or not result.rows_imported:
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
total_rows = (result.rows_imported or 0) + (result.rows_skipped or 0)
|
|
289
|
+
duration = (result.execution_time_ms or 0) / 1000 # Convert to seconds
|
|
290
|
+
throughput = (result.rows_imported or 0) / duration if duration > 0 else 0
|
|
291
|
+
|
|
292
|
+
return CopyStatistics(
|
|
293
|
+
total_rows=total_rows,
|
|
294
|
+
imported_rows=result.rows_imported or 0,
|
|
295
|
+
skipped_rows=result.rows_skipped or 0,
|
|
296
|
+
bytes_processed=result.bytes_processed or 0,
|
|
297
|
+
duration=duration,
|
|
298
|
+
throughput=throughput,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
def copy_s3(
|
|
302
|
+
self,
|
|
303
|
+
graph_id: str,
|
|
304
|
+
table_name: str,
|
|
305
|
+
s3_path: str,
|
|
306
|
+
access_key_id: str,
|
|
307
|
+
secret_access_key: str,
|
|
308
|
+
region: str = "us-east-1",
|
|
309
|
+
file_format: Optional[str] = None,
|
|
310
|
+
ignore_errors: bool = False,
|
|
311
|
+
) -> CopyResult:
|
|
312
|
+
"""Convenience method for simple S3 copy with default options"""
|
|
313
|
+
|
|
314
|
+
# Map string format to enum
|
|
315
|
+
format_enum = S3CopyRequestFileFormat.PARQUET
|
|
316
|
+
if file_format:
|
|
317
|
+
format_map = {
|
|
318
|
+
"csv": S3CopyRequestFileFormat.CSV,
|
|
319
|
+
"parquet": S3CopyRequestFileFormat.PARQUET,
|
|
320
|
+
"json": S3CopyRequestFileFormat.JSON,
|
|
321
|
+
"delta": S3CopyRequestFileFormat.DELTA,
|
|
322
|
+
"iceberg": S3CopyRequestFileFormat.ICEBERG,
|
|
323
|
+
}
|
|
324
|
+
format_enum = format_map.get(file_format.lower(), S3CopyRequestFileFormat.PARQUET)
|
|
325
|
+
|
|
326
|
+
request = S3CopyRequest(
|
|
327
|
+
table_name=table_name,
|
|
328
|
+
s3_path=s3_path,
|
|
329
|
+
s3_access_key_id=access_key_id,
|
|
330
|
+
s3_secret_access_key=secret_access_key,
|
|
331
|
+
s3_region=region,
|
|
332
|
+
file_format=format_enum,
|
|
333
|
+
ignore_errors=ignore_errors,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
return self.copy_from_s3(graph_id, request)
|
|
337
|
+
|
|
338
|
+
def monitor_multiple_copies(
|
|
339
|
+
self, operation_ids: List[str], options: Optional[CopyOptions] = None
|
|
340
|
+
) -> Dict[str, CopyResult]:
|
|
341
|
+
"""Monitor multiple copy operations concurrently"""
|
|
342
|
+
results = {}
|
|
343
|
+
for operation_id in operation_ids:
|
|
344
|
+
result = self._monitor_copy_operation(
|
|
345
|
+
operation_id, options or CopyOptions(), time.time()
|
|
346
|
+
)
|
|
347
|
+
results[operation_id] = result
|
|
348
|
+
return results
|
|
349
|
+
|
|
350
|
+
def batch_copy_from_s3(
|
|
351
|
+
self, graph_id: str, copies: List[Dict[str, Any]]
|
|
352
|
+
) -> List[CopyResult]:
|
|
353
|
+
"""Batch copy multiple tables from S3"""
|
|
354
|
+
results = []
|
|
355
|
+
for copy_config in copies:
|
|
356
|
+
request = copy_config.get("request")
|
|
357
|
+
options = copy_config.get("options")
|
|
358
|
+
if request:
|
|
359
|
+
result = self.copy_from_s3(graph_id, request, options)
|
|
360
|
+
results.append(result)
|
|
361
|
+
return results
|
|
362
|
+
|
|
363
|
+
def copy_with_retry(
|
|
364
|
+
self,
|
|
365
|
+
graph_id: str,
|
|
366
|
+
request: Union[S3CopyRequest, URLCopyRequest, DataFrameCopyRequest],
|
|
367
|
+
source_type: CopySourceType,
|
|
368
|
+
max_retries: int = 3,
|
|
369
|
+
options: Optional[CopyOptions] = None,
|
|
370
|
+
) -> CopyResult:
|
|
371
|
+
"""Copy with retry logic for transient failures"""
|
|
372
|
+
if options is None:
|
|
373
|
+
options = CopyOptions()
|
|
374
|
+
|
|
375
|
+
last_error: Optional[Exception] = None
|
|
376
|
+
attempt = 0
|
|
377
|
+
|
|
378
|
+
while attempt < max_retries:
|
|
379
|
+
attempt += 1
|
|
380
|
+
|
|
381
|
+
try:
|
|
382
|
+
result = self._execute_copy(graph_id, request, source_type, options)
|
|
383
|
+
|
|
384
|
+
# If successful or partially successful, return
|
|
385
|
+
if result.status in ["completed", "partial"]:
|
|
386
|
+
return result
|
|
387
|
+
|
|
388
|
+
# If failed, check if it's retryable
|
|
389
|
+
if result.status == "failed":
|
|
390
|
+
is_retryable = self._is_retryable_error(result.error)
|
|
391
|
+
if not is_retryable or attempt == max_retries:
|
|
392
|
+
return result
|
|
393
|
+
|
|
394
|
+
# Wait before retry with exponential backoff
|
|
395
|
+
wait_time = min(1000 * (2 ** (attempt - 1)), 30000) / 1000
|
|
396
|
+
if options.on_progress:
|
|
397
|
+
options.on_progress(
|
|
398
|
+
f"Retrying copy operation (attempt {attempt}/{max_retries}) in {wait_time}s...",
|
|
399
|
+
None,
|
|
400
|
+
)
|
|
401
|
+
time.sleep(wait_time)
|
|
402
|
+
|
|
403
|
+
except Exception as e:
|
|
404
|
+
last_error = e
|
|
405
|
+
|
|
406
|
+
if attempt == max_retries:
|
|
407
|
+
raise last_error
|
|
408
|
+
|
|
409
|
+
# Wait before retry
|
|
410
|
+
wait_time = min(1000 * (2 ** (attempt - 1)), 30000) / 1000
|
|
411
|
+
if options.on_progress:
|
|
412
|
+
options.on_progress(
|
|
413
|
+
f"Retrying after error (attempt {attempt}/{max_retries}) in {wait_time}s...",
|
|
414
|
+
None,
|
|
415
|
+
)
|
|
416
|
+
time.sleep(wait_time)
|
|
417
|
+
|
|
418
|
+
raise last_error or Exception("Copy operation failed after all retries")
|
|
419
|
+
|
|
420
|
+
def _is_retryable_error(self, error: Optional[str]) -> bool:
|
|
421
|
+
"""Check if an error is retryable"""
|
|
422
|
+
if not error:
|
|
423
|
+
return False
|
|
424
|
+
|
|
425
|
+
retryable_patterns = [
|
|
426
|
+
"timeout",
|
|
427
|
+
"network",
|
|
428
|
+
"connection",
|
|
429
|
+
"temporary",
|
|
430
|
+
"unavailable",
|
|
431
|
+
"rate limit",
|
|
432
|
+
"throttl",
|
|
433
|
+
]
|
|
434
|
+
|
|
435
|
+
lower_error = error.lower()
|
|
436
|
+
return any(pattern in lower_error for pattern in retryable_patterns)
|
|
437
|
+
|
|
438
|
+
def close(self):
|
|
439
|
+
"""Cancel any active SSE connections"""
|
|
440
|
+
if self.sse_client:
|
|
441
|
+
self.sse_client.close()
|
|
442
|
+
self.sse_client = None
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
class AsyncCopyClient:
|
|
446
|
+
"""Async version of CopyClient for async/await usage"""
|
|
447
|
+
|
|
448
|
+
def __init__(self, config: Dict[str, Any]):
|
|
449
|
+
self.config = config
|
|
450
|
+
self.base_url = config["base_url"]
|
|
451
|
+
self.sse_client: Optional[AsyncSSEClient] = None
|
|
452
|
+
|
|
453
|
+
# Get client authentication if provided
|
|
454
|
+
self.auth_token = config.get("auth_token")
|
|
455
|
+
self.api_key = config.get("api_key")
|
|
456
|
+
|
|
457
|
+
async def copy_from_s3(
|
|
458
|
+
self, graph_id: str, request: S3CopyRequest, options: Optional[CopyOptions] = None
|
|
459
|
+
) -> CopyResult:
|
|
460
|
+
"""Copy data from S3 to graph database asynchronously"""
|
|
461
|
+
# Async implementation would go here
|
|
462
|
+
# For now, this is a placeholder
|
|
463
|
+
raise NotImplementedError("Async copy client not yet implemented")
|
|
464
|
+
|
|
465
|
+
async def close(self):
|
|
466
|
+
"""Close any active connections"""
|
|
467
|
+
if self.sse_client:
|
|
468
|
+
await self.sse_client.close()
|
|
469
|
+
self.sse_client = None
|
|
@@ -8,6 +8,7 @@ from typing import Dict, Any, Optional, Callable
|
|
|
8
8
|
|
|
9
9
|
from .query_client import QueryClient
|
|
10
10
|
from .operation_client import OperationClient
|
|
11
|
+
from .copy_client import CopyClient
|
|
11
12
|
from .sse_client import SSEClient
|
|
12
13
|
|
|
13
14
|
|
|
@@ -39,6 +40,7 @@ class RoboSystemsExtensions:
|
|
|
39
40
|
}
|
|
40
41
|
|
|
41
42
|
# Initialize clients
|
|
43
|
+
self.copy = CopyClient(self.config)
|
|
42
44
|
self.query = QueryClient(self.config)
|
|
43
45
|
self.operations = OperationClient(self.config)
|
|
44
46
|
|
|
@@ -67,6 +69,7 @@ class RoboSystemsExtensions:
|
|
|
67
69
|
|
|
68
70
|
def close(self):
|
|
69
71
|
"""Clean up all active connections"""
|
|
72
|
+
self.copy.close()
|
|
70
73
|
self.query.close()
|
|
71
74
|
self.operations.close_all()
|
|
72
75
|
|
|
@@ -93,6 +96,20 @@ class RoboSystemsExtensions:
|
|
|
93
96
|
"""Cancel an operation using the operation client"""
|
|
94
97
|
return self.operations.cancel_operation(operation_id)
|
|
95
98
|
|
|
99
|
+
def copy_from_s3(
|
|
100
|
+
self,
|
|
101
|
+
graph_id: str,
|
|
102
|
+
table_name: str,
|
|
103
|
+
s3_path: str,
|
|
104
|
+
access_key_id: str,
|
|
105
|
+
secret_access_key: str,
|
|
106
|
+
**kwargs,
|
|
107
|
+
):
|
|
108
|
+
"""Copy data from S3 using the copy client"""
|
|
109
|
+
return self.copy.copy_s3(
|
|
110
|
+
graph_id, table_name, s3_path, access_key_id, secret_access_key, **kwargs
|
|
111
|
+
)
|
|
112
|
+
|
|
96
113
|
|
|
97
114
|
class AsyncRoboSystemsExtensions:
|
|
98
115
|
"""Async version of the extensions class"""
|
|
@@ -21,8 +21,10 @@ class CopyResponse:
|
|
|
21
21
|
Attributes:
|
|
22
22
|
status (CopyResponseStatus): Operation status
|
|
23
23
|
source_type (str): Type of source that was copied from
|
|
24
|
-
execution_time_ms (float): Total execution time in milliseconds
|
|
25
24
|
message (str): Human-readable status message
|
|
25
|
+
operation_id (Union[None, Unset, str]): Operation ID for SSE monitoring (for long-running operations)
|
|
26
|
+
sse_url (Union[None, Unset, str]): SSE endpoint URL for monitoring operation progress
|
|
27
|
+
execution_time_ms (Union[None, Unset, float]): Total execution time in milliseconds (for synchronous operations)
|
|
26
28
|
rows_imported (Union[None, Unset, int]): Number of rows successfully imported
|
|
27
29
|
rows_skipped (Union[None, Unset, int]): Number of rows skipped due to errors (when ignore_errors=true)
|
|
28
30
|
warnings (Union[None, Unset, list[str]]): List of warnings encountered during import
|
|
@@ -33,8 +35,10 @@ class CopyResponse:
|
|
|
33
35
|
|
|
34
36
|
status: CopyResponseStatus
|
|
35
37
|
source_type: str
|
|
36
|
-
execution_time_ms: float
|
|
37
38
|
message: str
|
|
39
|
+
operation_id: Union[None, Unset, str] = UNSET
|
|
40
|
+
sse_url: Union[None, Unset, str] = UNSET
|
|
41
|
+
execution_time_ms: Union[None, Unset, float] = UNSET
|
|
38
42
|
rows_imported: Union[None, Unset, int] = UNSET
|
|
39
43
|
rows_skipped: Union[None, Unset, int] = UNSET
|
|
40
44
|
warnings: Union[None, Unset, list[str]] = UNSET
|
|
@@ -51,10 +55,26 @@ class CopyResponse:
|
|
|
51
55
|
|
|
52
56
|
source_type = self.source_type
|
|
53
57
|
|
|
54
|
-
execution_time_ms = self.execution_time_ms
|
|
55
|
-
|
|
56
58
|
message = self.message
|
|
57
59
|
|
|
60
|
+
operation_id: Union[None, Unset, str]
|
|
61
|
+
if isinstance(self.operation_id, Unset):
|
|
62
|
+
operation_id = UNSET
|
|
63
|
+
else:
|
|
64
|
+
operation_id = self.operation_id
|
|
65
|
+
|
|
66
|
+
sse_url: Union[None, Unset, str]
|
|
67
|
+
if isinstance(self.sse_url, Unset):
|
|
68
|
+
sse_url = UNSET
|
|
69
|
+
else:
|
|
70
|
+
sse_url = self.sse_url
|
|
71
|
+
|
|
72
|
+
execution_time_ms: Union[None, Unset, float]
|
|
73
|
+
if isinstance(self.execution_time_ms, Unset):
|
|
74
|
+
execution_time_ms = UNSET
|
|
75
|
+
else:
|
|
76
|
+
execution_time_ms = self.execution_time_ms
|
|
77
|
+
|
|
58
78
|
rows_imported: Union[None, Unset, int]
|
|
59
79
|
if isinstance(self.rows_imported, Unset):
|
|
60
80
|
rows_imported = UNSET
|
|
@@ -96,10 +116,15 @@ class CopyResponse:
|
|
|
96
116
|
{
|
|
97
117
|
"status": status,
|
|
98
118
|
"source_type": source_type,
|
|
99
|
-
"execution_time_ms": execution_time_ms,
|
|
100
119
|
"message": message,
|
|
101
120
|
}
|
|
102
121
|
)
|
|
122
|
+
if operation_id is not UNSET:
|
|
123
|
+
field_dict["operation_id"] = operation_id
|
|
124
|
+
if sse_url is not UNSET:
|
|
125
|
+
field_dict["sse_url"] = sse_url
|
|
126
|
+
if execution_time_ms is not UNSET:
|
|
127
|
+
field_dict["execution_time_ms"] = execution_time_ms
|
|
103
128
|
if rows_imported is not UNSET:
|
|
104
129
|
field_dict["rows_imported"] = rows_imported
|
|
105
130
|
if rows_skipped is not UNSET:
|
|
@@ -124,10 +149,35 @@ class CopyResponse:
|
|
|
124
149
|
|
|
125
150
|
source_type = d.pop("source_type")
|
|
126
151
|
|
|
127
|
-
execution_time_ms = d.pop("execution_time_ms")
|
|
128
|
-
|
|
129
152
|
message = d.pop("message")
|
|
130
153
|
|
|
154
|
+
def _parse_operation_id(data: object) -> Union[None, Unset, str]:
|
|
155
|
+
if data is None:
|
|
156
|
+
return data
|
|
157
|
+
if isinstance(data, Unset):
|
|
158
|
+
return data
|
|
159
|
+
return cast(Union[None, Unset, str], data)
|
|
160
|
+
|
|
161
|
+
operation_id = _parse_operation_id(d.pop("operation_id", UNSET))
|
|
162
|
+
|
|
163
|
+
def _parse_sse_url(data: object) -> Union[None, Unset, str]:
|
|
164
|
+
if data is None:
|
|
165
|
+
return data
|
|
166
|
+
if isinstance(data, Unset):
|
|
167
|
+
return data
|
|
168
|
+
return cast(Union[None, Unset, str], data)
|
|
169
|
+
|
|
170
|
+
sse_url = _parse_sse_url(d.pop("sse_url", UNSET))
|
|
171
|
+
|
|
172
|
+
def _parse_execution_time_ms(data: object) -> Union[None, Unset, float]:
|
|
173
|
+
if data is None:
|
|
174
|
+
return data
|
|
175
|
+
if isinstance(data, Unset):
|
|
176
|
+
return data
|
|
177
|
+
return cast(Union[None, Unset, float], data)
|
|
178
|
+
|
|
179
|
+
execution_time_ms = _parse_execution_time_ms(d.pop("execution_time_ms", UNSET))
|
|
180
|
+
|
|
131
181
|
def _parse_rows_imported(data: object) -> Union[None, Unset, int]:
|
|
132
182
|
if data is None:
|
|
133
183
|
return data
|
|
@@ -194,8 +244,10 @@ class CopyResponse:
|
|
|
194
244
|
copy_response = cls(
|
|
195
245
|
status=status,
|
|
196
246
|
source_type=source_type,
|
|
197
|
-
execution_time_ms=execution_time_ms,
|
|
198
247
|
message=message,
|
|
248
|
+
operation_id=operation_id,
|
|
249
|
+
sse_url=sse_url,
|
|
250
|
+
execution_time_ms=execution_time_ms,
|
|
199
251
|
rows_imported=rows_imported,
|
|
200
252
|
rows_skipped=rows_skipped,
|
|
201
253
|
warnings=warnings,
|
|
@@ -15,28 +15,31 @@ T = TypeVar("T", bound="S3CopyRequest")
|
|
|
15
15
|
class S3CopyRequest:
|
|
16
16
|
r"""Request model for S3 copy operations.
|
|
17
17
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
18
|
+
Copies data from S3 buckets into graph database tables using user-provided
|
|
19
|
+
AWS credentials. Supports various file formats and bulk loading options.
|
|
20
|
+
|
|
21
|
+
Attributes:
|
|
22
|
+
table_name (str): Target Kuzu table name
|
|
23
|
+
s3_path (str): Full S3 path (s3://bucket/key or s3://bucket/prefix/*.parquet)
|
|
24
|
+
s3_access_key_id (str): AWS access key ID for S3 access
|
|
25
|
+
s3_secret_access_key (str): AWS secret access key for S3 access
|
|
26
|
+
ignore_errors (Union[Unset, bool]): Skip duplicate/invalid rows (enables upsert-like behavior) Default: True.
|
|
27
|
+
extended_timeout (Union[Unset, bool]): Use extended timeout for large datasets Default: False.
|
|
28
|
+
validate_schema (Union[Unset, bool]): Validate source schema against target table Default: True.
|
|
29
|
+
source_type (Union[Literal['s3'], Unset]): Source type identifier Default: 's3'.
|
|
30
|
+
s3_session_token (Union[None, Unset, str]): AWS session token (for temporary credentials)
|
|
31
|
+
s3_region (Union[None, Unset, str]): S3 region Default: 'us-east-1'.
|
|
32
|
+
s3_endpoint (Union[None, Unset, str]): Custom S3 endpoint (for S3-compatible storage)
|
|
33
|
+
s3_url_style (Union[None, S3CopyRequestS3UrlStyleType0, Unset]): S3 URL style (vhost or path)
|
|
34
|
+
file_format (Union[Unset, S3CopyRequestFileFormat]): File format of the S3 data Default:
|
|
35
|
+
S3CopyRequestFileFormat.PARQUET.
|
|
36
|
+
csv_delimiter (Union[None, Unset, str]): CSV delimiter Default: ','.
|
|
37
|
+
csv_header (Union[None, Unset, bool]): CSV has header row Default: True.
|
|
38
|
+
csv_quote (Union[None, Unset, str]): CSV quote character Default: '\\"'.
|
|
39
|
+
csv_escape (Union[None, Unset, str]): CSV escape character Default: '\\'.
|
|
40
|
+
csv_skip (Union[None, Unset, int]): Number of rows to skip Default: 0.
|
|
41
|
+
allow_moved_paths (Union[None, Unset, bool]): Allow moved paths for Iceberg tables Default: False.
|
|
42
|
+
max_file_size_gb (Union[None, Unset, int]): Maximum total file size limit in GB Default: 10.
|
|
40
43
|
"""
|
|
41
44
|
|
|
42
45
|
table_name: str
|
|
@@ -39,7 +39,7 @@ robosystems_client/api/connections/list_connections.py,sha256=hLDqUAeeaMrCym0Onh
|
|
|
39
39
|
robosystems_client/api/connections/oauth_callback.py,sha256=9zX9fA7V_nJTBAi6crXQweS2VJSYvYq0Y7pMzefID0E,9103
|
|
40
40
|
robosystems_client/api/connections/sync_connection.py,sha256=9RvBM9zCqb4nGQCmVpK9JgzOoVbn83ojRnUzKaF8-AM,9839
|
|
41
41
|
robosystems_client/api/copy/__init__.py,sha256=5vd9uJWAjRqa9xzxzYkLD1yoZ12Ld_bAaNB5WX4fbE8,56
|
|
42
|
-
robosystems_client/api/copy/copy_data_to_graph.py,sha256=
|
|
42
|
+
robosystems_client/api/copy/copy_data_to_graph.py,sha256=d4hhuZVtNxddzoSS1tc6B4WzwbSfbK3tVBNjM3wQAwg,17172
|
|
43
43
|
robosystems_client/api/create/__init__.py,sha256=5vd9uJWAjRqa9xzxzYkLD1yoZ12Ld_bAaNB5WX4fbE8,56
|
|
44
44
|
robosystems_client/api/create/create_graph.py,sha256=dwzKgdBkLd9QithUt0ERPeh3shOdsIQ8b6Xvgc_hAQo,13152
|
|
45
45
|
robosystems_client/api/create/get_available_extensions.py,sha256=sFf-YdbDy9VGOK8WZ4JBoxrcVe2PtYW6XLp-EkyMt7E,3502
|
|
@@ -113,10 +113,11 @@ robosystems_client/api/user_subscriptions/get_shared_repository_credits.py,sha25
|
|
|
113
113
|
robosystems_client/api/user_subscriptions/get_user_shared_subscriptions.py,sha256=i-XEy5qLdQB1TSYYO7pM0e4OlhPnEpyV8p9Je18cj1c,6539
|
|
114
114
|
robosystems_client/api/user_subscriptions/subscribe_to_shared_repository.py,sha256=bc7hUPHm_FQ_us30SLAtKSZzz6hOAysSET-DlnylsoE,6373
|
|
115
115
|
robosystems_client/api/user_subscriptions/upgrade_shared_repository_subscription.py,sha256=ihMRdD_fbKoBuS4YAiFXAbN4UUM0bod9eS2Zpb5UtJw,6508
|
|
116
|
-
robosystems_client/extensions/README.md,sha256=
|
|
117
|
-
robosystems_client/extensions/__init__.py,sha256=
|
|
116
|
+
robosystems_client/extensions/README.md,sha256=YmiT_7yNt-7ca8kWF2nTBh7OzQXR0Ws2NugkhN_StMc,22381
|
|
117
|
+
robosystems_client/extensions/__init__.py,sha256=FwFIMcDad4jAHHyBxWee7HMd_fMPSL0FVgJM8GSl5V8,3185
|
|
118
118
|
robosystems_client/extensions/auth_integration.py,sha256=Nlc40PaTGsPcDzLUVDU9IhSAQWYj3Bi-bsbxMAfNo3E,6251
|
|
119
|
-
robosystems_client/extensions/
|
|
119
|
+
robosystems_client/extensions/copy_client.py,sha256=7ikmMkWhymmzuA2vSBuiEX4quIYCdeK48gNfoM-7ODM,15377
|
|
120
|
+
robosystems_client/extensions/extensions.py,sha256=6ZkNacR2k4V2scTg2MjDy50W85DDO7BjQ8o6nLsPO8E,5888
|
|
120
121
|
robosystems_client/extensions/operation_client.py,sha256=INCGhAciIBeLK4sPrG5j2qCNbyITk-Q8DhVqJdr5DxA,11983
|
|
121
122
|
robosystems_client/extensions/query_client.py,sha256=Zo408qxHV7dhwjf_d_bqnGKL5O1k9WPFANTQLk2Nlyk,11471
|
|
122
123
|
robosystems_client/extensions/sse_client.py,sha256=LcEtbzbedPIfXvqfPOpJiQGm0Px65DeobbcKyi6VsEM,14457
|
|
@@ -154,9 +155,9 @@ robosystems_client/models/connection_provider_info_provider.py,sha256=-IsFtqa7td
|
|
|
154
155
|
robosystems_client/models/connection_response.py,sha256=VCOxkvvpwBwZz-TU4FHsmcuvwn3me-2AQyAB_hknCio,4174
|
|
155
156
|
robosystems_client/models/connection_response_metadata.py,sha256=xp9LNJHpuXrfLKSQFLVHwhAF4Ieu26rbSe-odVFWBcM,1233
|
|
156
157
|
robosystems_client/models/connection_response_provider.py,sha256=th7b2inab-PZWaQ9kjsYoM81Xo4coJGJ1kqYh7txnYE,185
|
|
157
|
-
robosystems_client/models/copy_response.py,sha256=
|
|
158
|
+
robosystems_client/models/copy_response.py,sha256=fqmLbYl12xK_feDRJs1lM4_2ZGRvKPs0NIf3aDHfG-Y,8804
|
|
158
159
|
robosystems_client/models/copy_response_error_details_type_0.py,sha256=_Y_o8lWlPlQtQJxIX33URTGkzC4CDslBWloxpUgw4u4,1232
|
|
159
|
-
robosystems_client/models/copy_response_status.py,sha256=
|
|
160
|
+
robosystems_client/models/copy_response_status.py,sha256=Q9U7d7AOb9Ff0G4k4ynqoMgC5MdKXmTKxfIZv4o-7mA,209
|
|
160
161
|
robosystems_client/models/create_api_key_request.py,sha256=yPeQwvtQtcVET6dxfbLjkCDv8UL4Ox_L3D6gXAVhnjM,2134
|
|
161
162
|
robosystems_client/models/create_api_key_response.py,sha256=9cqlZDogqxdSXxxHT6PnfClTP-Q35CvfQjNIvPEe1Pw,1797
|
|
162
163
|
robosystems_client/models/create_connection_request.py,sha256=B9riNF1QK1P3RB680lFAJGsZtYbPHVc14u1TBnIv0QQ,5948
|
|
@@ -246,7 +247,7 @@ robosystems_client/models/repository_credits_response.py,sha256=AMvzo-MOZoBFVVFq
|
|
|
246
247
|
robosystems_client/models/repository_plan.py,sha256=BEdbh0FPIrsomZU_Aq27EAk-ppOqlJxuwNRVCZlNLKo,185
|
|
247
248
|
robosystems_client/models/repository_type.py,sha256=Mw4q6l82iVgalXxOiWCpmtGZAf4MawNxCsIW2QoPd0I,175
|
|
248
249
|
robosystems_client/models/response_mode.py,sha256=0tm3YUxAKHNYS1jn-zdR_88xn6E_bsQl85DwHZM_kkM,181
|
|
249
|
-
robosystems_client/models/s3_copy_request.py,sha256=
|
|
250
|
+
robosystems_client/models/s3_copy_request.py,sha256=0XqKZ6vL46aJozlvIHwGRGqCL2OPBGfOBFZ-AMmg2xo,13171
|
|
250
251
|
robosystems_client/models/s3_copy_request_file_format.py,sha256=VfyvnQl7Mcl3mytceCFHUsYuwZqdD_RTFTi47TpW6l8,214
|
|
251
252
|
robosystems_client/models/s3_copy_request_s3_url_style_type_0.py,sha256=Rwj6q4HT0178ogoLSSAme0QwoFBpWYU9bDoPWqJnMaQ,161
|
|
252
253
|
robosystems_client/models/schema_export_response.py,sha256=Ilk2TwD_m3g_8OwlHHOokMZdjkSmktf1xj-eggGutTQ,5175
|
|
@@ -302,6 +303,6 @@ robosystems_client/models/user_usage_response_graphs.py,sha256=xAH-ZnhaUfWQ_2EpZ
|
|
|
302
303
|
robosystems_client/models/user_usage_summary_response.py,sha256=4hthwTH7bXyzdYlHoekDYOgDLI-stGRH507Bl2rUjYA,3655
|
|
303
304
|
robosystems_client/models/user_usage_summary_response_usage_vs_limits.py,sha256=XrZnRcy1nD3xtKX4svbww7QfEHrN7_XIfeL9j5ZMbyQ,1298
|
|
304
305
|
robosystems_client/models/validation_error.py,sha256=R77OuQG2nJ3WDFfY--xbEhg6x1D7gAAp_1UdnG8Ka2A,1949
|
|
305
|
-
robosystems_client-0.1.
|
|
306
|
-
robosystems_client-0.1.
|
|
307
|
-
robosystems_client-0.1.
|
|
306
|
+
robosystems_client-0.1.15.dist-info/METADATA,sha256=ETZWIpN1ZZXM_oUp7zRdnpKdWGnWtmeaH0v5Q01SxAk,9351
|
|
307
|
+
robosystems_client-0.1.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
308
|
+
robosystems_client-0.1.15.dist-info/RECORD,,
|
|
File without changes
|