robosystems-client 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of robosystems-client might be problematic. Click here for more details.
- robosystems_client/api/backup/create_backup.py +1 -1
- robosystems_client/api/backup/export_backup.py +14 -19
- robosystems_client/api/backup/get_backup_download_url.py +1 -1
- robosystems_client/api/backup/get_backup_stats.py +19 -1
- robosystems_client/api/backup/list_backups.py +19 -1
- robosystems_client/api/backup/restore_backup.py +1 -1
- robosystems_client/api/copy/copy_data_to_graph.py +314 -0
- robosystems_client/api/{credits_ → graph_credits}/check_credit_balance.py +42 -20
- robosystems_client/api/graph_health/__init__.py +1 -0
- robosystems_client/api/{graph_status → graph_health}/get_database_health.py +1 -1
- robosystems_client/api/graph_info/__init__.py +1 -0
- robosystems_client/api/{graph_status → graph_info}/get_database_info.py +1 -1
- robosystems_client/api/graph_limits/__init__.py +1 -0
- robosystems_client/api/graph_limits/get_graph_limits.py +259 -0
- robosystems_client/api/subgraphs/create_subgraph.py +63 -173
- robosystems_client/api/subgraphs/delete_subgraph.py +14 -14
- robosystems_client/api/subgraphs/get_subgraph_info.py +14 -14
- robosystems_client/api/subgraphs/get_subgraph_quota.py +8 -4
- robosystems_client/api/subgraphs/list_subgraphs.py +39 -75
- robosystems_client/api/user/get_all_credit_summaries.py +1 -1
- robosystems_client/models/__init__.py +26 -10
- robosystems_client/models/copy_response.py +223 -0
- robosystems_client/models/{kuzu_backup_health_response_kuzubackuphealth.py → copy_response_error_details_type_0.py} +5 -5
- robosystems_client/models/copy_response_status.py +10 -0
- robosystems_client/models/custom_schema_definition.py +2 -2
- robosystems_client/models/data_frame_copy_request.py +125 -0
- robosystems_client/models/data_frame_copy_request_format.py +10 -0
- robosystems_client/models/get_graph_limits_response_getgraphlimits.py +44 -0
- robosystems_client/models/s3_copy_request.py +375 -0
- robosystems_client/models/s3_copy_request_file_format.py +12 -0
- robosystems_client/models/s3_copy_request_s3_url_style_type_0.py +9 -0
- robosystems_client/models/url_copy_request.py +157 -0
- robosystems_client/models/url_copy_request_file_format.py +10 -0
- robosystems_client/models/url_copy_request_headers_type_0.py +44 -0
- {robosystems_client-0.1.12.dist-info → robosystems_client-0.1.13.dist-info}/METADATA +1 -1
- {robosystems_client-0.1.12.dist-info → robosystems_client-0.1.13.dist-info}/RECORD +48 -41
- robosystems_client/api/backup/kuzu_backup_health.py +0 -202
- robosystems_client/api/billing/get_available_subscription_plans_v1_graph_id_billing_available_plans_get.py +0 -198
- robosystems_client/api/billing/get_credit_billing_info_v1_graph_id_billing_credits_get.py +0 -210
- robosystems_client/api/billing/get_graph_pricing_info_v1_graph_id_billing_pricing_get.py +0 -198
- robosystems_client/api/billing/get_graph_subscription_v1_graph_id_billing_subscription_get.py +0 -198
- robosystems_client/api/billing/upgrade_graph_subscription_v1_graph_id_billing_subscription_upgrade_post.py +0 -216
- robosystems_client/models/backup_export_request.py +0 -72
- robosystems_client/models/credit_check_request.py +0 -82
- robosystems_client/models/upgrade_subscription_request.py +0 -82
- /robosystems_client/api/{billing → copy}/__init__.py +0 -0
- /robosystems_client/api/{credits_ → graph_billing}/__init__.py +0 -0
- /robosystems_client/api/{billing → graph_billing}/get_current_graph_bill.py +0 -0
- /robosystems_client/api/{billing → graph_billing}/get_graph_billing_history.py +0 -0
- /robosystems_client/api/{billing → graph_billing}/get_graph_monthly_bill.py +0 -0
- /robosystems_client/api/{billing → graph_billing}/get_graph_usage_details.py +0 -0
- /robosystems_client/api/{graph_status → graph_credits}/__init__.py +0 -0
- /robosystems_client/api/{credits_ → graph_credits}/check_storage_limits.py +0 -0
- /robosystems_client/api/{credits_ → graph_credits}/get_credit_summary.py +0 -0
- /robosystems_client/api/{credits_ → graph_credits}/get_storage_usage.py +0 -0
- /robosystems_client/api/{credits_ → graph_credits}/list_credit_transactions.py +0 -0
- {robosystems_client-0.1.12.dist-info → robosystems_client-0.1.13.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,375 @@
|
|
|
1
|
+
from collections.abc import Mapping
|
|
2
|
+
from typing import Any, Literal, TypeVar, Union, cast
|
|
3
|
+
|
|
4
|
+
from attrs import define as _attrs_define
|
|
5
|
+
from attrs import field as _attrs_field
|
|
6
|
+
|
|
7
|
+
from ..models.s3_copy_request_file_format import S3CopyRequestFileFormat
|
|
8
|
+
from ..models.s3_copy_request_s3_url_style_type_0 import S3CopyRequestS3UrlStyleType0
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="S3CopyRequest")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class S3CopyRequest:
|
|
16
|
+
r"""Request model for S3 copy operations.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
table_name (str): Target Kuzu table name
|
|
20
|
+
s3_path (str): Full S3 path (s3://bucket/key or s3://bucket/prefix/*.parquet)
|
|
21
|
+
s3_access_key_id (str): AWS access key ID for S3 access
|
|
22
|
+
s3_secret_access_key (str): AWS secret access key for S3 access
|
|
23
|
+
ignore_errors (Union[Unset, bool]): Skip duplicate/invalid rows (enables upsert-like behavior) Default: True.
|
|
24
|
+
extended_timeout (Union[Unset, bool]): Use extended timeout for large datasets Default: False.
|
|
25
|
+
validate_schema (Union[Unset, bool]): Validate source schema against target table Default: True.
|
|
26
|
+
source_type (Union[Literal['s3'], Unset]): Source type identifier Default: 's3'.
|
|
27
|
+
s3_session_token (Union[None, Unset, str]): AWS session token (for temporary credentials)
|
|
28
|
+
s3_region (Union[None, Unset, str]): S3 region Default: 'us-east-1'.
|
|
29
|
+
s3_endpoint (Union[None, Unset, str]): Custom S3 endpoint (for S3-compatible storage)
|
|
30
|
+
s3_url_style (Union[None, S3CopyRequestS3UrlStyleType0, Unset]): S3 URL style (vhost or path)
|
|
31
|
+
file_format (Union[Unset, S3CopyRequestFileFormat]): File format of the S3 data Default:
|
|
32
|
+
S3CopyRequestFileFormat.PARQUET.
|
|
33
|
+
csv_delimiter (Union[None, Unset, str]): CSV delimiter Default: ','.
|
|
34
|
+
csv_header (Union[None, Unset, bool]): CSV has header row Default: True.
|
|
35
|
+
csv_quote (Union[None, Unset, str]): CSV quote character Default: '\\"'.
|
|
36
|
+
csv_escape (Union[None, Unset, str]): CSV escape character Default: '\\'.
|
|
37
|
+
csv_skip (Union[None, Unset, int]): Number of rows to skip Default: 0.
|
|
38
|
+
allow_moved_paths (Union[None, Unset, bool]): Allow moved paths for Iceberg tables Default: False.
|
|
39
|
+
max_file_size_gb (Union[None, Unset, int]): Maximum total file size limit in GB Default: 10.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
table_name: str
|
|
43
|
+
s3_path: str
|
|
44
|
+
s3_access_key_id: str
|
|
45
|
+
s3_secret_access_key: str
|
|
46
|
+
ignore_errors: Union[Unset, bool] = True
|
|
47
|
+
extended_timeout: Union[Unset, bool] = False
|
|
48
|
+
validate_schema: Union[Unset, bool] = True
|
|
49
|
+
source_type: Union[Literal["s3"], Unset] = "s3"
|
|
50
|
+
s3_session_token: Union[None, Unset, str] = UNSET
|
|
51
|
+
s3_region: Union[None, Unset, str] = "us-east-1"
|
|
52
|
+
s3_endpoint: Union[None, Unset, str] = UNSET
|
|
53
|
+
s3_url_style: Union[None, S3CopyRequestS3UrlStyleType0, Unset] = UNSET
|
|
54
|
+
file_format: Union[Unset, S3CopyRequestFileFormat] = S3CopyRequestFileFormat.PARQUET
|
|
55
|
+
csv_delimiter: Union[None, Unset, str] = ","
|
|
56
|
+
csv_header: Union[None, Unset, bool] = True
|
|
57
|
+
csv_quote: Union[None, Unset, str] = '\\"'
|
|
58
|
+
csv_escape: Union[None, Unset, str] = "\\"
|
|
59
|
+
csv_skip: Union[None, Unset, int] = 0
|
|
60
|
+
allow_moved_paths: Union[None, Unset, bool] = False
|
|
61
|
+
max_file_size_gb: Union[None, Unset, int] = 10
|
|
62
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
63
|
+
|
|
64
|
+
def to_dict(self) -> dict[str, Any]:
|
|
65
|
+
table_name = self.table_name
|
|
66
|
+
|
|
67
|
+
s3_path = self.s3_path
|
|
68
|
+
|
|
69
|
+
s3_access_key_id = self.s3_access_key_id
|
|
70
|
+
|
|
71
|
+
s3_secret_access_key = self.s3_secret_access_key
|
|
72
|
+
|
|
73
|
+
ignore_errors = self.ignore_errors
|
|
74
|
+
|
|
75
|
+
extended_timeout = self.extended_timeout
|
|
76
|
+
|
|
77
|
+
validate_schema = self.validate_schema
|
|
78
|
+
|
|
79
|
+
source_type = self.source_type
|
|
80
|
+
|
|
81
|
+
s3_session_token: Union[None, Unset, str]
|
|
82
|
+
if isinstance(self.s3_session_token, Unset):
|
|
83
|
+
s3_session_token = UNSET
|
|
84
|
+
else:
|
|
85
|
+
s3_session_token = self.s3_session_token
|
|
86
|
+
|
|
87
|
+
s3_region: Union[None, Unset, str]
|
|
88
|
+
if isinstance(self.s3_region, Unset):
|
|
89
|
+
s3_region = UNSET
|
|
90
|
+
else:
|
|
91
|
+
s3_region = self.s3_region
|
|
92
|
+
|
|
93
|
+
s3_endpoint: Union[None, Unset, str]
|
|
94
|
+
if isinstance(self.s3_endpoint, Unset):
|
|
95
|
+
s3_endpoint = UNSET
|
|
96
|
+
else:
|
|
97
|
+
s3_endpoint = self.s3_endpoint
|
|
98
|
+
|
|
99
|
+
s3_url_style: Union[None, Unset, str]
|
|
100
|
+
if isinstance(self.s3_url_style, Unset):
|
|
101
|
+
s3_url_style = UNSET
|
|
102
|
+
elif isinstance(self.s3_url_style, S3CopyRequestS3UrlStyleType0):
|
|
103
|
+
s3_url_style = self.s3_url_style.value
|
|
104
|
+
else:
|
|
105
|
+
s3_url_style = self.s3_url_style
|
|
106
|
+
|
|
107
|
+
file_format: Union[Unset, str] = UNSET
|
|
108
|
+
if not isinstance(self.file_format, Unset):
|
|
109
|
+
file_format = self.file_format.value
|
|
110
|
+
|
|
111
|
+
csv_delimiter: Union[None, Unset, str]
|
|
112
|
+
if isinstance(self.csv_delimiter, Unset):
|
|
113
|
+
csv_delimiter = UNSET
|
|
114
|
+
else:
|
|
115
|
+
csv_delimiter = self.csv_delimiter
|
|
116
|
+
|
|
117
|
+
csv_header: Union[None, Unset, bool]
|
|
118
|
+
if isinstance(self.csv_header, Unset):
|
|
119
|
+
csv_header = UNSET
|
|
120
|
+
else:
|
|
121
|
+
csv_header = self.csv_header
|
|
122
|
+
|
|
123
|
+
csv_quote: Union[None, Unset, str]
|
|
124
|
+
if isinstance(self.csv_quote, Unset):
|
|
125
|
+
csv_quote = UNSET
|
|
126
|
+
else:
|
|
127
|
+
csv_quote = self.csv_quote
|
|
128
|
+
|
|
129
|
+
csv_escape: Union[None, Unset, str]
|
|
130
|
+
if isinstance(self.csv_escape, Unset):
|
|
131
|
+
csv_escape = UNSET
|
|
132
|
+
else:
|
|
133
|
+
csv_escape = self.csv_escape
|
|
134
|
+
|
|
135
|
+
csv_skip: Union[None, Unset, int]
|
|
136
|
+
if isinstance(self.csv_skip, Unset):
|
|
137
|
+
csv_skip = UNSET
|
|
138
|
+
else:
|
|
139
|
+
csv_skip = self.csv_skip
|
|
140
|
+
|
|
141
|
+
allow_moved_paths: Union[None, Unset, bool]
|
|
142
|
+
if isinstance(self.allow_moved_paths, Unset):
|
|
143
|
+
allow_moved_paths = UNSET
|
|
144
|
+
else:
|
|
145
|
+
allow_moved_paths = self.allow_moved_paths
|
|
146
|
+
|
|
147
|
+
max_file_size_gb: Union[None, Unset, int]
|
|
148
|
+
if isinstance(self.max_file_size_gb, Unset):
|
|
149
|
+
max_file_size_gb = UNSET
|
|
150
|
+
else:
|
|
151
|
+
max_file_size_gb = self.max_file_size_gb
|
|
152
|
+
|
|
153
|
+
field_dict: dict[str, Any] = {}
|
|
154
|
+
field_dict.update(self.additional_properties)
|
|
155
|
+
field_dict.update(
|
|
156
|
+
{
|
|
157
|
+
"table_name": table_name,
|
|
158
|
+
"s3_path": s3_path,
|
|
159
|
+
"s3_access_key_id": s3_access_key_id,
|
|
160
|
+
"s3_secret_access_key": s3_secret_access_key,
|
|
161
|
+
}
|
|
162
|
+
)
|
|
163
|
+
if ignore_errors is not UNSET:
|
|
164
|
+
field_dict["ignore_errors"] = ignore_errors
|
|
165
|
+
if extended_timeout is not UNSET:
|
|
166
|
+
field_dict["extended_timeout"] = extended_timeout
|
|
167
|
+
if validate_schema is not UNSET:
|
|
168
|
+
field_dict["validate_schema"] = validate_schema
|
|
169
|
+
if source_type is not UNSET:
|
|
170
|
+
field_dict["source_type"] = source_type
|
|
171
|
+
if s3_session_token is not UNSET:
|
|
172
|
+
field_dict["s3_session_token"] = s3_session_token
|
|
173
|
+
if s3_region is not UNSET:
|
|
174
|
+
field_dict["s3_region"] = s3_region
|
|
175
|
+
if s3_endpoint is not UNSET:
|
|
176
|
+
field_dict["s3_endpoint"] = s3_endpoint
|
|
177
|
+
if s3_url_style is not UNSET:
|
|
178
|
+
field_dict["s3_url_style"] = s3_url_style
|
|
179
|
+
if file_format is not UNSET:
|
|
180
|
+
field_dict["file_format"] = file_format
|
|
181
|
+
if csv_delimiter is not UNSET:
|
|
182
|
+
field_dict["csv_delimiter"] = csv_delimiter
|
|
183
|
+
if csv_header is not UNSET:
|
|
184
|
+
field_dict["csv_header"] = csv_header
|
|
185
|
+
if csv_quote is not UNSET:
|
|
186
|
+
field_dict["csv_quote"] = csv_quote
|
|
187
|
+
if csv_escape is not UNSET:
|
|
188
|
+
field_dict["csv_escape"] = csv_escape
|
|
189
|
+
if csv_skip is not UNSET:
|
|
190
|
+
field_dict["csv_skip"] = csv_skip
|
|
191
|
+
if allow_moved_paths is not UNSET:
|
|
192
|
+
field_dict["allow_moved_paths"] = allow_moved_paths
|
|
193
|
+
if max_file_size_gb is not UNSET:
|
|
194
|
+
field_dict["max_file_size_gb"] = max_file_size_gb
|
|
195
|
+
|
|
196
|
+
return field_dict
|
|
197
|
+
|
|
198
|
+
@classmethod
|
|
199
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
200
|
+
d = dict(src_dict)
|
|
201
|
+
table_name = d.pop("table_name")
|
|
202
|
+
|
|
203
|
+
s3_path = d.pop("s3_path")
|
|
204
|
+
|
|
205
|
+
s3_access_key_id = d.pop("s3_access_key_id")
|
|
206
|
+
|
|
207
|
+
s3_secret_access_key = d.pop("s3_secret_access_key")
|
|
208
|
+
|
|
209
|
+
ignore_errors = d.pop("ignore_errors", UNSET)
|
|
210
|
+
|
|
211
|
+
extended_timeout = d.pop("extended_timeout", UNSET)
|
|
212
|
+
|
|
213
|
+
validate_schema = d.pop("validate_schema", UNSET)
|
|
214
|
+
|
|
215
|
+
source_type = cast(Union[Literal["s3"], Unset], d.pop("source_type", UNSET))
|
|
216
|
+
if source_type != "s3" and not isinstance(source_type, Unset):
|
|
217
|
+
raise ValueError(f"source_type must match const 's3', got '{source_type}'")
|
|
218
|
+
|
|
219
|
+
def _parse_s3_session_token(data: object) -> Union[None, Unset, str]:
|
|
220
|
+
if data is None:
|
|
221
|
+
return data
|
|
222
|
+
if isinstance(data, Unset):
|
|
223
|
+
return data
|
|
224
|
+
return cast(Union[None, Unset, str], data)
|
|
225
|
+
|
|
226
|
+
s3_session_token = _parse_s3_session_token(d.pop("s3_session_token", UNSET))
|
|
227
|
+
|
|
228
|
+
def _parse_s3_region(data: object) -> Union[None, Unset, str]:
|
|
229
|
+
if data is None:
|
|
230
|
+
return data
|
|
231
|
+
if isinstance(data, Unset):
|
|
232
|
+
return data
|
|
233
|
+
return cast(Union[None, Unset, str], data)
|
|
234
|
+
|
|
235
|
+
s3_region = _parse_s3_region(d.pop("s3_region", UNSET))
|
|
236
|
+
|
|
237
|
+
def _parse_s3_endpoint(data: object) -> Union[None, Unset, str]:
|
|
238
|
+
if data is None:
|
|
239
|
+
return data
|
|
240
|
+
if isinstance(data, Unset):
|
|
241
|
+
return data
|
|
242
|
+
return cast(Union[None, Unset, str], data)
|
|
243
|
+
|
|
244
|
+
s3_endpoint = _parse_s3_endpoint(d.pop("s3_endpoint", UNSET))
|
|
245
|
+
|
|
246
|
+
def _parse_s3_url_style(
|
|
247
|
+
data: object,
|
|
248
|
+
) -> Union[None, S3CopyRequestS3UrlStyleType0, Unset]:
|
|
249
|
+
if data is None:
|
|
250
|
+
return data
|
|
251
|
+
if isinstance(data, Unset):
|
|
252
|
+
return data
|
|
253
|
+
try:
|
|
254
|
+
if not isinstance(data, str):
|
|
255
|
+
raise TypeError()
|
|
256
|
+
s3_url_style_type_0 = S3CopyRequestS3UrlStyleType0(data)
|
|
257
|
+
|
|
258
|
+
return s3_url_style_type_0
|
|
259
|
+
except: # noqa: E722
|
|
260
|
+
pass
|
|
261
|
+
return cast(Union[None, S3CopyRequestS3UrlStyleType0, Unset], data)
|
|
262
|
+
|
|
263
|
+
s3_url_style = _parse_s3_url_style(d.pop("s3_url_style", UNSET))
|
|
264
|
+
|
|
265
|
+
_file_format = d.pop("file_format", UNSET)
|
|
266
|
+
file_format: Union[Unset, S3CopyRequestFileFormat]
|
|
267
|
+
if isinstance(_file_format, Unset):
|
|
268
|
+
file_format = UNSET
|
|
269
|
+
else:
|
|
270
|
+
file_format = S3CopyRequestFileFormat(_file_format)
|
|
271
|
+
|
|
272
|
+
def _parse_csv_delimiter(data: object) -> Union[None, Unset, str]:
|
|
273
|
+
if data is None:
|
|
274
|
+
return data
|
|
275
|
+
if isinstance(data, Unset):
|
|
276
|
+
return data
|
|
277
|
+
return cast(Union[None, Unset, str], data)
|
|
278
|
+
|
|
279
|
+
csv_delimiter = _parse_csv_delimiter(d.pop("csv_delimiter", UNSET))
|
|
280
|
+
|
|
281
|
+
def _parse_csv_header(data: object) -> Union[None, Unset, bool]:
|
|
282
|
+
if data is None:
|
|
283
|
+
return data
|
|
284
|
+
if isinstance(data, Unset):
|
|
285
|
+
return data
|
|
286
|
+
return cast(Union[None, Unset, bool], data)
|
|
287
|
+
|
|
288
|
+
csv_header = _parse_csv_header(d.pop("csv_header", UNSET))
|
|
289
|
+
|
|
290
|
+
def _parse_csv_quote(data: object) -> Union[None, Unset, str]:
|
|
291
|
+
if data is None:
|
|
292
|
+
return data
|
|
293
|
+
if isinstance(data, Unset):
|
|
294
|
+
return data
|
|
295
|
+
return cast(Union[None, Unset, str], data)
|
|
296
|
+
|
|
297
|
+
csv_quote = _parse_csv_quote(d.pop("csv_quote", UNSET))
|
|
298
|
+
|
|
299
|
+
def _parse_csv_escape(data: object) -> Union[None, Unset, str]:
|
|
300
|
+
if data is None:
|
|
301
|
+
return data
|
|
302
|
+
if isinstance(data, Unset):
|
|
303
|
+
return data
|
|
304
|
+
return cast(Union[None, Unset, str], data)
|
|
305
|
+
|
|
306
|
+
csv_escape = _parse_csv_escape(d.pop("csv_escape", UNSET))
|
|
307
|
+
|
|
308
|
+
def _parse_csv_skip(data: object) -> Union[None, Unset, int]:
|
|
309
|
+
if data is None:
|
|
310
|
+
return data
|
|
311
|
+
if isinstance(data, Unset):
|
|
312
|
+
return data
|
|
313
|
+
return cast(Union[None, Unset, int], data)
|
|
314
|
+
|
|
315
|
+
csv_skip = _parse_csv_skip(d.pop("csv_skip", UNSET))
|
|
316
|
+
|
|
317
|
+
def _parse_allow_moved_paths(data: object) -> Union[None, Unset, bool]:
|
|
318
|
+
if data is None:
|
|
319
|
+
return data
|
|
320
|
+
if isinstance(data, Unset):
|
|
321
|
+
return data
|
|
322
|
+
return cast(Union[None, Unset, bool], data)
|
|
323
|
+
|
|
324
|
+
allow_moved_paths = _parse_allow_moved_paths(d.pop("allow_moved_paths", UNSET))
|
|
325
|
+
|
|
326
|
+
def _parse_max_file_size_gb(data: object) -> Union[None, Unset, int]:
|
|
327
|
+
if data is None:
|
|
328
|
+
return data
|
|
329
|
+
if isinstance(data, Unset):
|
|
330
|
+
return data
|
|
331
|
+
return cast(Union[None, Unset, int], data)
|
|
332
|
+
|
|
333
|
+
max_file_size_gb = _parse_max_file_size_gb(d.pop("max_file_size_gb", UNSET))
|
|
334
|
+
|
|
335
|
+
s3_copy_request = cls(
|
|
336
|
+
table_name=table_name,
|
|
337
|
+
s3_path=s3_path,
|
|
338
|
+
s3_access_key_id=s3_access_key_id,
|
|
339
|
+
s3_secret_access_key=s3_secret_access_key,
|
|
340
|
+
ignore_errors=ignore_errors,
|
|
341
|
+
extended_timeout=extended_timeout,
|
|
342
|
+
validate_schema=validate_schema,
|
|
343
|
+
source_type=source_type,
|
|
344
|
+
s3_session_token=s3_session_token,
|
|
345
|
+
s3_region=s3_region,
|
|
346
|
+
s3_endpoint=s3_endpoint,
|
|
347
|
+
s3_url_style=s3_url_style,
|
|
348
|
+
file_format=file_format,
|
|
349
|
+
csv_delimiter=csv_delimiter,
|
|
350
|
+
csv_header=csv_header,
|
|
351
|
+
csv_quote=csv_quote,
|
|
352
|
+
csv_escape=csv_escape,
|
|
353
|
+
csv_skip=csv_skip,
|
|
354
|
+
allow_moved_paths=allow_moved_paths,
|
|
355
|
+
max_file_size_gb=max_file_size_gb,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
s3_copy_request.additional_properties = d
|
|
359
|
+
return s3_copy_request
|
|
360
|
+
|
|
361
|
+
@property
|
|
362
|
+
def additional_keys(self) -> list[str]:
|
|
363
|
+
return list(self.additional_properties.keys())
|
|
364
|
+
|
|
365
|
+
def __getitem__(self, key: str) -> Any:
|
|
366
|
+
return self.additional_properties[key]
|
|
367
|
+
|
|
368
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
369
|
+
self.additional_properties[key] = value
|
|
370
|
+
|
|
371
|
+
def __delitem__(self, key: str) -> None:
|
|
372
|
+
del self.additional_properties[key]
|
|
373
|
+
|
|
374
|
+
def __contains__(self, key: str) -> bool:
|
|
375
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
from collections.abc import Mapping
|
|
2
|
+
from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast
|
|
3
|
+
|
|
4
|
+
from attrs import define as _attrs_define
|
|
5
|
+
from attrs import field as _attrs_field
|
|
6
|
+
|
|
7
|
+
from ..models.url_copy_request_file_format import URLCopyRequestFileFormat
|
|
8
|
+
from ..types import UNSET, Unset
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from ..models.url_copy_request_headers_type_0 import URLCopyRequestHeadersType0
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
T = TypeVar("T", bound="URLCopyRequest")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@_attrs_define
|
|
18
|
+
class URLCopyRequest:
|
|
19
|
+
"""Request model for URL copy operations (future).
|
|
20
|
+
|
|
21
|
+
Attributes:
|
|
22
|
+
table_name (str): Target Kuzu table name
|
|
23
|
+
url (str): HTTP(S) URL to the data file
|
|
24
|
+
file_format (URLCopyRequestFileFormat): File format of the URL data
|
|
25
|
+
ignore_errors (Union[Unset, bool]): Skip duplicate/invalid rows (enables upsert-like behavior) Default: True.
|
|
26
|
+
extended_timeout (Union[Unset, bool]): Use extended timeout for large datasets Default: False.
|
|
27
|
+
validate_schema (Union[Unset, bool]): Validate source schema against target table Default: True.
|
|
28
|
+
source_type (Union[Literal['url'], Unset]): Source type identifier Default: 'url'.
|
|
29
|
+
headers (Union['URLCopyRequestHeadersType0', None, Unset]): Optional HTTP headers for authentication
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
table_name: str
|
|
33
|
+
url: str
|
|
34
|
+
file_format: URLCopyRequestFileFormat
|
|
35
|
+
ignore_errors: Union[Unset, bool] = True
|
|
36
|
+
extended_timeout: Union[Unset, bool] = False
|
|
37
|
+
validate_schema: Union[Unset, bool] = True
|
|
38
|
+
source_type: Union[Literal["url"], Unset] = "url"
|
|
39
|
+
headers: Union["URLCopyRequestHeadersType0", None, Unset] = UNSET
|
|
40
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
41
|
+
|
|
42
|
+
def to_dict(self) -> dict[str, Any]:
|
|
43
|
+
from ..models.url_copy_request_headers_type_0 import URLCopyRequestHeadersType0
|
|
44
|
+
|
|
45
|
+
table_name = self.table_name
|
|
46
|
+
|
|
47
|
+
url = self.url
|
|
48
|
+
|
|
49
|
+
file_format = self.file_format.value
|
|
50
|
+
|
|
51
|
+
ignore_errors = self.ignore_errors
|
|
52
|
+
|
|
53
|
+
extended_timeout = self.extended_timeout
|
|
54
|
+
|
|
55
|
+
validate_schema = self.validate_schema
|
|
56
|
+
|
|
57
|
+
source_type = self.source_type
|
|
58
|
+
|
|
59
|
+
headers: Union[None, Unset, dict[str, Any]]
|
|
60
|
+
if isinstance(self.headers, Unset):
|
|
61
|
+
headers = UNSET
|
|
62
|
+
elif isinstance(self.headers, URLCopyRequestHeadersType0):
|
|
63
|
+
headers = self.headers.to_dict()
|
|
64
|
+
else:
|
|
65
|
+
headers = self.headers
|
|
66
|
+
|
|
67
|
+
field_dict: dict[str, Any] = {}
|
|
68
|
+
field_dict.update(self.additional_properties)
|
|
69
|
+
field_dict.update(
|
|
70
|
+
{
|
|
71
|
+
"table_name": table_name,
|
|
72
|
+
"url": url,
|
|
73
|
+
"file_format": file_format,
|
|
74
|
+
}
|
|
75
|
+
)
|
|
76
|
+
if ignore_errors is not UNSET:
|
|
77
|
+
field_dict["ignore_errors"] = ignore_errors
|
|
78
|
+
if extended_timeout is not UNSET:
|
|
79
|
+
field_dict["extended_timeout"] = extended_timeout
|
|
80
|
+
if validate_schema is not UNSET:
|
|
81
|
+
field_dict["validate_schema"] = validate_schema
|
|
82
|
+
if source_type is not UNSET:
|
|
83
|
+
field_dict["source_type"] = source_type
|
|
84
|
+
if headers is not UNSET:
|
|
85
|
+
field_dict["headers"] = headers
|
|
86
|
+
|
|
87
|
+
return field_dict
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
91
|
+
from ..models.url_copy_request_headers_type_0 import URLCopyRequestHeadersType0
|
|
92
|
+
|
|
93
|
+
d = dict(src_dict)
|
|
94
|
+
table_name = d.pop("table_name")
|
|
95
|
+
|
|
96
|
+
url = d.pop("url")
|
|
97
|
+
|
|
98
|
+
file_format = URLCopyRequestFileFormat(d.pop("file_format"))
|
|
99
|
+
|
|
100
|
+
ignore_errors = d.pop("ignore_errors", UNSET)
|
|
101
|
+
|
|
102
|
+
extended_timeout = d.pop("extended_timeout", UNSET)
|
|
103
|
+
|
|
104
|
+
validate_schema = d.pop("validate_schema", UNSET)
|
|
105
|
+
|
|
106
|
+
source_type = cast(Union[Literal["url"], Unset], d.pop("source_type", UNSET))
|
|
107
|
+
if source_type != "url" and not isinstance(source_type, Unset):
|
|
108
|
+
raise ValueError(f"source_type must match const 'url', got '{source_type}'")
|
|
109
|
+
|
|
110
|
+
def _parse_headers(
|
|
111
|
+
data: object,
|
|
112
|
+
) -> Union["URLCopyRequestHeadersType0", None, Unset]:
|
|
113
|
+
if data is None:
|
|
114
|
+
return data
|
|
115
|
+
if isinstance(data, Unset):
|
|
116
|
+
return data
|
|
117
|
+
try:
|
|
118
|
+
if not isinstance(data, dict):
|
|
119
|
+
raise TypeError()
|
|
120
|
+
headers_type_0 = URLCopyRequestHeadersType0.from_dict(data)
|
|
121
|
+
|
|
122
|
+
return headers_type_0
|
|
123
|
+
except: # noqa: E722
|
|
124
|
+
pass
|
|
125
|
+
return cast(Union["URLCopyRequestHeadersType0", None, Unset], data)
|
|
126
|
+
|
|
127
|
+
headers = _parse_headers(d.pop("headers", UNSET))
|
|
128
|
+
|
|
129
|
+
url_copy_request = cls(
|
|
130
|
+
table_name=table_name,
|
|
131
|
+
url=url,
|
|
132
|
+
file_format=file_format,
|
|
133
|
+
ignore_errors=ignore_errors,
|
|
134
|
+
extended_timeout=extended_timeout,
|
|
135
|
+
validate_schema=validate_schema,
|
|
136
|
+
source_type=source_type,
|
|
137
|
+
headers=headers,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
url_copy_request.additional_properties = d
|
|
141
|
+
return url_copy_request
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def additional_keys(self) -> list[str]:
|
|
145
|
+
return list(self.additional_properties.keys())
|
|
146
|
+
|
|
147
|
+
def __getitem__(self, key: str) -> Any:
|
|
148
|
+
return self.additional_properties[key]
|
|
149
|
+
|
|
150
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
151
|
+
self.additional_properties[key] = value
|
|
152
|
+
|
|
153
|
+
def __delitem__(self, key: str) -> None:
|
|
154
|
+
del self.additional_properties[key]
|
|
155
|
+
|
|
156
|
+
def __contains__(self, key: str) -> bool:
|
|
157
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from collections.abc import Mapping
|
|
2
|
+
from typing import Any, TypeVar
|
|
3
|
+
|
|
4
|
+
from attrs import define as _attrs_define
|
|
5
|
+
from attrs import field as _attrs_field
|
|
6
|
+
|
|
7
|
+
T = TypeVar("T", bound="URLCopyRequestHeadersType0")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@_attrs_define
|
|
11
|
+
class URLCopyRequestHeadersType0:
|
|
12
|
+
""" """
|
|
13
|
+
|
|
14
|
+
additional_properties: dict[str, str] = _attrs_field(init=False, factory=dict)
|
|
15
|
+
|
|
16
|
+
def to_dict(self) -> dict[str, Any]:
|
|
17
|
+
field_dict: dict[str, Any] = {}
|
|
18
|
+
field_dict.update(self.additional_properties)
|
|
19
|
+
|
|
20
|
+
return field_dict
|
|
21
|
+
|
|
22
|
+
@classmethod
|
|
23
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
24
|
+
d = dict(src_dict)
|
|
25
|
+
url_copy_request_headers_type_0 = cls()
|
|
26
|
+
|
|
27
|
+
url_copy_request_headers_type_0.additional_properties = d
|
|
28
|
+
return url_copy_request_headers_type_0
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def additional_keys(self) -> list[str]:
|
|
32
|
+
return list(self.additional_properties.keys())
|
|
33
|
+
|
|
34
|
+
def __getitem__(self, key: str) -> str:
|
|
35
|
+
return self.additional_properties[key]
|
|
36
|
+
|
|
37
|
+
def __setitem__(self, key: str, value: str) -> None:
|
|
38
|
+
self.additional_properties[key] = value
|
|
39
|
+
|
|
40
|
+
def __delitem__(self, key: str) -> None:
|
|
41
|
+
del self.additional_properties[key]
|
|
42
|
+
|
|
43
|
+
def __contains__(self, key: str) -> bool:
|
|
44
|
+
return key in self.additional_properties
|