llama-cloud 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +12 -0
- llama_cloud/resources/admin/client.py +5 -5
- llama_cloud/resources/alpha/client.py +2 -8
- llama_cloud/resources/beta/client.py +30 -126
- llama_cloud/resources/chat_apps/client.py +8 -32
- llama_cloud/resources/classifier/client.py +8 -32
- llama_cloud/resources/data_sinks/client.py +8 -32
- llama_cloud/resources/data_sources/client.py +8 -32
- llama_cloud/resources/embedding_model_configs/client.py +12 -48
- llama_cloud/resources/files/client.py +42 -176
- llama_cloud/resources/jobs/client.py +2 -8
- llama_cloud/resources/llama_extract/client.py +40 -138
- llama_cloud/resources/organizations/client.py +4 -18
- llama_cloud/resources/parsing/client.py +12 -16
- llama_cloud/resources/pipelines/client.py +45 -32
- llama_cloud/resources/projects/client.py +18 -78
- llama_cloud/resources/reports/client.py +30 -126
- llama_cloud/resources/retrievers/client.py +12 -48
- llama_cloud/types/__init__.py +12 -0
- llama_cloud/types/extract_job_create.py +2 -0
- llama_cloud/types/extract_job_create_priority.py +29 -0
- llama_cloud/types/file.py +1 -1
- llama_cloud/types/job_names.py +0 -4
- llama_cloud/types/llama_extract_feature_availability.py +34 -0
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/parse_job_config.py +1 -0
- llama_cloud/types/pipeline.py +4 -0
- llama_cloud/types/pipeline_create.py +2 -0
- llama_cloud/types/pipeline_file.py +4 -4
- llama_cloud/types/schema_generation_availability.py +33 -0
- llama_cloud/types/schema_generation_availability_status.py +17 -0
- llama_cloud/types/sparse_model_config.py +42 -0
- llama_cloud/types/sparse_model_type.py +33 -0
- llama_cloud/types/webhook_configuration.py +1 -0
- llama_cloud-0.1.40.dist-info/METADATA +106 -0
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/RECORD +38 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/WHEEL +1 -1
- llama_cloud-0.1.38.dist-info/METADATA +0 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/LICENSE +0 -0
|
@@ -33,19 +33,13 @@ class ChatAppsClient:
|
|
|
33
33
|
self._client_wrapper = client_wrapper
|
|
34
34
|
|
|
35
35
|
def get_chat_apps_api_v_1_apps_get(
|
|
36
|
-
self,
|
|
37
|
-
*,
|
|
38
|
-
project_id: typing.Optional[str] = None,
|
|
39
|
-
organization_id: typing.Optional[str] = None,
|
|
40
|
-
project_id: typing.Optional[str] = None,
|
|
36
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
41
37
|
) -> typing.List[ChatAppResponse]:
|
|
42
38
|
"""
|
|
43
39
|
Parameters:
|
|
44
40
|
- project_id: typing.Optional[str].
|
|
45
41
|
|
|
46
42
|
- organization_id: typing.Optional[str].
|
|
47
|
-
|
|
48
|
-
- project_id: typing.Optional[str].
|
|
49
43
|
---
|
|
50
44
|
from llama_cloud.client import LlamaCloud
|
|
51
45
|
|
|
@@ -58,7 +52,7 @@ class ChatAppsClient:
|
|
|
58
52
|
"GET",
|
|
59
53
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
|
|
60
54
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
61
|
-
headers=
|
|
55
|
+
headers=self._client_wrapper.get_headers(),
|
|
62
56
|
timeout=60,
|
|
63
57
|
)
|
|
64
58
|
if 200 <= _response.status_code < 300:
|
|
@@ -80,7 +74,6 @@ class ChatAppsClient:
|
|
|
80
74
|
retriever_id: str,
|
|
81
75
|
llm_config: LlmParameters,
|
|
82
76
|
retrieval_config: PresetCompositeRetrievalParams,
|
|
83
|
-
project_id: typing.Optional[str] = None,
|
|
84
77
|
) -> ChatApp:
|
|
85
78
|
"""
|
|
86
79
|
Create a new chat app.
|
|
@@ -97,8 +90,6 @@ class ChatAppsClient:
|
|
|
97
90
|
- llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
|
|
98
91
|
|
|
99
92
|
- retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
|
|
100
|
-
|
|
101
|
-
- project_id: typing.Optional[str].
|
|
102
93
|
---
|
|
103
94
|
from llama_cloud import (
|
|
104
95
|
CompositeRetrievalMode,
|
|
@@ -139,7 +130,7 @@ class ChatAppsClient:
|
|
|
139
130
|
"retrieval_config": retrieval_config,
|
|
140
131
|
}
|
|
141
132
|
),
|
|
142
|
-
headers=
|
|
133
|
+
headers=self._client_wrapper.get_headers(),
|
|
143
134
|
timeout=60,
|
|
144
135
|
)
|
|
145
136
|
if 200 <= _response.status_code < 300:
|
|
@@ -193,7 +184,6 @@ class ChatAppsClient:
|
|
|
193
184
|
name: typing.Optional[str] = OMIT,
|
|
194
185
|
llm_config: typing.Optional[LlmParameters] = OMIT,
|
|
195
186
|
retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
|
|
196
|
-
project_id: typing.Optional[str] = None,
|
|
197
187
|
) -> ChatApp:
|
|
198
188
|
"""
|
|
199
189
|
Update a chat app.
|
|
@@ -210,8 +200,6 @@ class ChatAppsClient:
|
|
|
210
200
|
- llm_config: typing.Optional[LlmParameters].
|
|
211
201
|
|
|
212
202
|
- retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
|
|
213
|
-
|
|
214
|
-
- project_id: typing.Optional[str].
|
|
215
203
|
---
|
|
216
204
|
from llama_cloud import (
|
|
217
205
|
CompositeRetrievalMode,
|
|
@@ -251,7 +239,7 @@ class ChatAppsClient:
|
|
|
251
239
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
|
|
252
240
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
253
241
|
json=jsonable_encoder(_request),
|
|
254
|
-
headers=
|
|
242
|
+
headers=self._client_wrapper.get_headers(),
|
|
255
243
|
timeout=60,
|
|
256
244
|
)
|
|
257
245
|
if 200 <= _response.status_code < 300:
|
|
@@ -338,19 +326,13 @@ class AsyncChatAppsClient:
|
|
|
338
326
|
self._client_wrapper = client_wrapper
|
|
339
327
|
|
|
340
328
|
async def get_chat_apps_api_v_1_apps_get(
|
|
341
|
-
self,
|
|
342
|
-
*,
|
|
343
|
-
project_id: typing.Optional[str] = None,
|
|
344
|
-
organization_id: typing.Optional[str] = None,
|
|
345
|
-
project_id: typing.Optional[str] = None,
|
|
329
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
346
330
|
) -> typing.List[ChatAppResponse]:
|
|
347
331
|
"""
|
|
348
332
|
Parameters:
|
|
349
333
|
- project_id: typing.Optional[str].
|
|
350
334
|
|
|
351
335
|
- organization_id: typing.Optional[str].
|
|
352
|
-
|
|
353
|
-
- project_id: typing.Optional[str].
|
|
354
336
|
---
|
|
355
337
|
from llama_cloud.client import AsyncLlamaCloud
|
|
356
338
|
|
|
@@ -363,7 +345,7 @@ class AsyncChatAppsClient:
|
|
|
363
345
|
"GET",
|
|
364
346
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
|
|
365
347
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
366
|
-
headers=
|
|
348
|
+
headers=self._client_wrapper.get_headers(),
|
|
367
349
|
timeout=60,
|
|
368
350
|
)
|
|
369
351
|
if 200 <= _response.status_code < 300:
|
|
@@ -385,7 +367,6 @@ class AsyncChatAppsClient:
|
|
|
385
367
|
retriever_id: str,
|
|
386
368
|
llm_config: LlmParameters,
|
|
387
369
|
retrieval_config: PresetCompositeRetrievalParams,
|
|
388
|
-
project_id: typing.Optional[str] = None,
|
|
389
370
|
) -> ChatApp:
|
|
390
371
|
"""
|
|
391
372
|
Create a new chat app.
|
|
@@ -402,8 +383,6 @@ class AsyncChatAppsClient:
|
|
|
402
383
|
- llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
|
|
403
384
|
|
|
404
385
|
- retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
|
|
405
|
-
|
|
406
|
-
- project_id: typing.Optional[str].
|
|
407
386
|
---
|
|
408
387
|
from llama_cloud import (
|
|
409
388
|
CompositeRetrievalMode,
|
|
@@ -444,7 +423,7 @@ class AsyncChatAppsClient:
|
|
|
444
423
|
"retrieval_config": retrieval_config,
|
|
445
424
|
}
|
|
446
425
|
),
|
|
447
|
-
headers=
|
|
426
|
+
headers=self._client_wrapper.get_headers(),
|
|
448
427
|
timeout=60,
|
|
449
428
|
)
|
|
450
429
|
if 200 <= _response.status_code < 300:
|
|
@@ -498,7 +477,6 @@ class AsyncChatAppsClient:
|
|
|
498
477
|
name: typing.Optional[str] = OMIT,
|
|
499
478
|
llm_config: typing.Optional[LlmParameters] = OMIT,
|
|
500
479
|
retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
|
|
501
|
-
project_id: typing.Optional[str] = None,
|
|
502
480
|
) -> ChatApp:
|
|
503
481
|
"""
|
|
504
482
|
Update a chat app.
|
|
@@ -515,8 +493,6 @@ class AsyncChatAppsClient:
|
|
|
515
493
|
- llm_config: typing.Optional[LlmParameters].
|
|
516
494
|
|
|
517
495
|
- retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
|
|
518
|
-
|
|
519
|
-
- project_id: typing.Optional[str].
|
|
520
496
|
---
|
|
521
497
|
from llama_cloud import (
|
|
522
498
|
CompositeRetrievalMode,
|
|
@@ -556,7 +532,7 @@ class AsyncChatAppsClient:
|
|
|
556
532
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
|
|
557
533
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
558
534
|
json=jsonable_encoder(_request),
|
|
559
|
-
headers=
|
|
535
|
+
headers=self._client_wrapper.get_headers(),
|
|
560
536
|
timeout=60,
|
|
561
537
|
)
|
|
562
538
|
if 200 <= _response.status_code < 300:
|
|
@@ -39,7 +39,6 @@ class ClassifierClient:
|
|
|
39
39
|
organization_id: typing.Optional[str] = None,
|
|
40
40
|
page_size: typing.Optional[int] = None,
|
|
41
41
|
page_token: typing.Optional[str] = None,
|
|
42
|
-
project_id: typing.Optional[str] = None,
|
|
43
42
|
) -> PaginatedResponseClassifyJob:
|
|
44
43
|
"""
|
|
45
44
|
List classify jobs.
|
|
@@ -53,8 +52,6 @@ class ClassifierClient:
|
|
|
53
52
|
- page_size: typing.Optional[int].
|
|
54
53
|
|
|
55
54
|
- page_token: typing.Optional[str].
|
|
56
|
-
|
|
57
|
-
- project_id: typing.Optional[str].
|
|
58
55
|
---
|
|
59
56
|
from llama_cloud.client import LlamaCloud
|
|
60
57
|
|
|
@@ -74,7 +71,7 @@ class ClassifierClient:
|
|
|
74
71
|
"page_token": page_token,
|
|
75
72
|
}
|
|
76
73
|
),
|
|
77
|
-
headers=
|
|
74
|
+
headers=self._client_wrapper.get_headers(),
|
|
78
75
|
timeout=60,
|
|
79
76
|
)
|
|
80
77
|
if 200 <= _response.status_code < 300:
|
|
@@ -95,7 +92,6 @@ class ClassifierClient:
|
|
|
95
92
|
rules: typing.List[ClassifierRule],
|
|
96
93
|
file_ids: typing.List[str],
|
|
97
94
|
parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = OMIT,
|
|
98
|
-
project_id: typing.Optional[str] = None,
|
|
99
95
|
) -> ClassifyJob:
|
|
100
96
|
"""
|
|
101
97
|
Create a classify job.
|
|
@@ -111,8 +107,6 @@ class ClassifierClient:
|
|
|
111
107
|
- file_ids: typing.List[str]. The IDs of the files to classify
|
|
112
108
|
|
|
113
109
|
- parsing_configuration: typing.Optional[ClassifyParsingConfiguration]. The configuration for the parsing job
|
|
114
|
-
|
|
115
|
-
- project_id: typing.Optional[str].
|
|
116
110
|
---
|
|
117
111
|
from llama_cloud import ClassifyParsingConfiguration, ParserLanguages
|
|
118
112
|
from llama_cloud.client import LlamaCloud
|
|
@@ -136,7 +130,7 @@ class ClassifierClient:
|
|
|
136
130
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
|
|
137
131
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
138
132
|
json=jsonable_encoder(_request),
|
|
139
|
-
headers=
|
|
133
|
+
headers=self._client_wrapper.get_headers(),
|
|
140
134
|
timeout=60,
|
|
141
135
|
)
|
|
142
136
|
if 200 <= _response.status_code < 300:
|
|
@@ -155,7 +149,6 @@ class ClassifierClient:
|
|
|
155
149
|
*,
|
|
156
150
|
project_id: typing.Optional[str] = None,
|
|
157
151
|
organization_id: typing.Optional[str] = None,
|
|
158
|
-
project_id: typing.Optional[str] = None,
|
|
159
152
|
) -> ClassifyJob:
|
|
160
153
|
"""
|
|
161
154
|
Get a classify job.
|
|
@@ -167,8 +160,6 @@ class ClassifierClient:
|
|
|
167
160
|
- project_id: typing.Optional[str].
|
|
168
161
|
|
|
169
162
|
- organization_id: typing.Optional[str].
|
|
170
|
-
|
|
171
|
-
- project_id: typing.Optional[str].
|
|
172
163
|
---
|
|
173
164
|
from llama_cloud.client import LlamaCloud
|
|
174
165
|
|
|
@@ -185,7 +176,7 @@ class ClassifierClient:
|
|
|
185
176
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}"
|
|
186
177
|
),
|
|
187
178
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
188
|
-
headers=
|
|
179
|
+
headers=self._client_wrapper.get_headers(),
|
|
189
180
|
timeout=60,
|
|
190
181
|
)
|
|
191
182
|
if 200 <= _response.status_code < 300:
|
|
@@ -204,7 +195,6 @@ class ClassifierClient:
|
|
|
204
195
|
*,
|
|
205
196
|
project_id: typing.Optional[str] = None,
|
|
206
197
|
organization_id: typing.Optional[str] = None,
|
|
207
|
-
project_id: typing.Optional[str] = None,
|
|
208
198
|
) -> ClassifyJobResults:
|
|
209
199
|
"""
|
|
210
200
|
Get the results of a classify job.
|
|
@@ -216,8 +206,6 @@ class ClassifierClient:
|
|
|
216
206
|
- project_id: typing.Optional[str].
|
|
217
207
|
|
|
218
208
|
- organization_id: typing.Optional[str].
|
|
219
|
-
|
|
220
|
-
- project_id: typing.Optional[str].
|
|
221
209
|
---
|
|
222
210
|
from llama_cloud.client import LlamaCloud
|
|
223
211
|
|
|
@@ -234,7 +222,7 @@ class ClassifierClient:
|
|
|
234
222
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}/results"
|
|
235
223
|
),
|
|
236
224
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
237
|
-
headers=
|
|
225
|
+
headers=self._client_wrapper.get_headers(),
|
|
238
226
|
timeout=60,
|
|
239
227
|
)
|
|
240
228
|
if 200 <= _response.status_code < 300:
|
|
@@ -259,7 +247,6 @@ class AsyncClassifierClient:
|
|
|
259
247
|
organization_id: typing.Optional[str] = None,
|
|
260
248
|
page_size: typing.Optional[int] = None,
|
|
261
249
|
page_token: typing.Optional[str] = None,
|
|
262
|
-
project_id: typing.Optional[str] = None,
|
|
263
250
|
) -> PaginatedResponseClassifyJob:
|
|
264
251
|
"""
|
|
265
252
|
List classify jobs.
|
|
@@ -273,8 +260,6 @@ class AsyncClassifierClient:
|
|
|
273
260
|
- page_size: typing.Optional[int].
|
|
274
261
|
|
|
275
262
|
- page_token: typing.Optional[str].
|
|
276
|
-
|
|
277
|
-
- project_id: typing.Optional[str].
|
|
278
263
|
---
|
|
279
264
|
from llama_cloud.client import AsyncLlamaCloud
|
|
280
265
|
|
|
@@ -294,7 +279,7 @@ class AsyncClassifierClient:
|
|
|
294
279
|
"page_token": page_token,
|
|
295
280
|
}
|
|
296
281
|
),
|
|
297
|
-
headers=
|
|
282
|
+
headers=self._client_wrapper.get_headers(),
|
|
298
283
|
timeout=60,
|
|
299
284
|
)
|
|
300
285
|
if 200 <= _response.status_code < 300:
|
|
@@ -315,7 +300,6 @@ class AsyncClassifierClient:
|
|
|
315
300
|
rules: typing.List[ClassifierRule],
|
|
316
301
|
file_ids: typing.List[str],
|
|
317
302
|
parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = OMIT,
|
|
318
|
-
project_id: typing.Optional[str] = None,
|
|
319
303
|
) -> ClassifyJob:
|
|
320
304
|
"""
|
|
321
305
|
Create a classify job.
|
|
@@ -331,8 +315,6 @@ class AsyncClassifierClient:
|
|
|
331
315
|
- file_ids: typing.List[str]. The IDs of the files to classify
|
|
332
316
|
|
|
333
317
|
- parsing_configuration: typing.Optional[ClassifyParsingConfiguration]. The configuration for the parsing job
|
|
334
|
-
|
|
335
|
-
- project_id: typing.Optional[str].
|
|
336
318
|
---
|
|
337
319
|
from llama_cloud import ClassifyParsingConfiguration, ParserLanguages
|
|
338
320
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -356,7 +338,7 @@ class AsyncClassifierClient:
|
|
|
356
338
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
|
|
357
339
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
358
340
|
json=jsonable_encoder(_request),
|
|
359
|
-
headers=
|
|
341
|
+
headers=self._client_wrapper.get_headers(),
|
|
360
342
|
timeout=60,
|
|
361
343
|
)
|
|
362
344
|
if 200 <= _response.status_code < 300:
|
|
@@ -375,7 +357,6 @@ class AsyncClassifierClient:
|
|
|
375
357
|
*,
|
|
376
358
|
project_id: typing.Optional[str] = None,
|
|
377
359
|
organization_id: typing.Optional[str] = None,
|
|
378
|
-
project_id: typing.Optional[str] = None,
|
|
379
360
|
) -> ClassifyJob:
|
|
380
361
|
"""
|
|
381
362
|
Get a classify job.
|
|
@@ -387,8 +368,6 @@ class AsyncClassifierClient:
|
|
|
387
368
|
- project_id: typing.Optional[str].
|
|
388
369
|
|
|
389
370
|
- organization_id: typing.Optional[str].
|
|
390
|
-
|
|
391
|
-
- project_id: typing.Optional[str].
|
|
392
371
|
---
|
|
393
372
|
from llama_cloud.client import AsyncLlamaCloud
|
|
394
373
|
|
|
@@ -405,7 +384,7 @@ class AsyncClassifierClient:
|
|
|
405
384
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}"
|
|
406
385
|
),
|
|
407
386
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
408
|
-
headers=
|
|
387
|
+
headers=self._client_wrapper.get_headers(),
|
|
409
388
|
timeout=60,
|
|
410
389
|
)
|
|
411
390
|
if 200 <= _response.status_code < 300:
|
|
@@ -424,7 +403,6 @@ class AsyncClassifierClient:
|
|
|
424
403
|
*,
|
|
425
404
|
project_id: typing.Optional[str] = None,
|
|
426
405
|
organization_id: typing.Optional[str] = None,
|
|
427
|
-
project_id: typing.Optional[str] = None,
|
|
428
406
|
) -> ClassifyJobResults:
|
|
429
407
|
"""
|
|
430
408
|
Get the results of a classify job.
|
|
@@ -436,8 +414,6 @@ class AsyncClassifierClient:
|
|
|
436
414
|
- project_id: typing.Optional[str].
|
|
437
415
|
|
|
438
416
|
- organization_id: typing.Optional[str].
|
|
439
|
-
|
|
440
|
-
- project_id: typing.Optional[str].
|
|
441
417
|
---
|
|
442
418
|
from llama_cloud.client import AsyncLlamaCloud
|
|
443
419
|
|
|
@@ -454,7 +430,7 @@ class AsyncClassifierClient:
|
|
|
454
430
|
f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}/results"
|
|
455
431
|
),
|
|
456
432
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
457
|
-
headers=
|
|
433
|
+
headers=self._client_wrapper.get_headers(),
|
|
458
434
|
timeout=60,
|
|
459
435
|
)
|
|
460
436
|
if 200 <= _response.status_code < 300:
|
|
@@ -32,11 +32,7 @@ class DataSinksClient:
|
|
|
32
32
|
self._client_wrapper = client_wrapper
|
|
33
33
|
|
|
34
34
|
def list_data_sinks(
|
|
35
|
-
self,
|
|
36
|
-
*,
|
|
37
|
-
project_id: typing.Optional[str] = None,
|
|
38
|
-
organization_id: typing.Optional[str] = None,
|
|
39
|
-
project_id: typing.Optional[str] = None,
|
|
35
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
40
36
|
) -> typing.List[DataSink]:
|
|
41
37
|
"""
|
|
42
38
|
List data sinks for a given project.
|
|
@@ -45,8 +41,6 @@ class DataSinksClient:
|
|
|
45
41
|
- project_id: typing.Optional[str].
|
|
46
42
|
|
|
47
43
|
- organization_id: typing.Optional[str].
|
|
48
|
-
|
|
49
|
-
- project_id: typing.Optional[str].
|
|
50
44
|
---
|
|
51
45
|
from llama_cloud.client import LlamaCloud
|
|
52
46
|
|
|
@@ -59,7 +53,7 @@ class DataSinksClient:
|
|
|
59
53
|
"GET",
|
|
60
54
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
61
55
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
62
|
-
headers=
|
|
56
|
+
headers=self._client_wrapper.get_headers(),
|
|
63
57
|
timeout=60,
|
|
64
58
|
)
|
|
65
59
|
if 200 <= _response.status_code < 300:
|
|
@@ -78,7 +72,6 @@ class DataSinksClient:
|
|
|
78
72
|
project_id: typing.Optional[str] = None,
|
|
79
73
|
organization_id: typing.Optional[str] = None,
|
|
80
74
|
request: DataSinkCreate,
|
|
81
|
-
project_id: typing.Optional[str] = None,
|
|
82
75
|
) -> DataSink:
|
|
83
76
|
"""
|
|
84
77
|
Create a new data sink.
|
|
@@ -89,8 +82,6 @@ class DataSinksClient:
|
|
|
89
82
|
- organization_id: typing.Optional[str].
|
|
90
83
|
|
|
91
84
|
- request: DataSinkCreate.
|
|
92
|
-
|
|
93
|
-
- project_id: typing.Optional[str].
|
|
94
85
|
---
|
|
95
86
|
from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
|
|
96
87
|
from llama_cloud.client import LlamaCloud
|
|
@@ -110,7 +101,7 @@ class DataSinksClient:
|
|
|
110
101
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
111
102
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
112
103
|
json=jsonable_encoder(request),
|
|
113
|
-
headers=
|
|
104
|
+
headers=self._client_wrapper.get_headers(),
|
|
114
105
|
timeout=60,
|
|
115
106
|
)
|
|
116
107
|
if 200 <= _response.status_code < 300:
|
|
@@ -129,7 +120,6 @@ class DataSinksClient:
|
|
|
129
120
|
project_id: typing.Optional[str] = None,
|
|
130
121
|
organization_id: typing.Optional[str] = None,
|
|
131
122
|
request: DataSinkCreate,
|
|
132
|
-
project_id: typing.Optional[str] = None,
|
|
133
123
|
) -> DataSink:
|
|
134
124
|
"""
|
|
135
125
|
Upserts a data sink.
|
|
@@ -141,8 +131,6 @@ class DataSinksClient:
|
|
|
141
131
|
- organization_id: typing.Optional[str].
|
|
142
132
|
|
|
143
133
|
- request: DataSinkCreate.
|
|
144
|
-
|
|
145
|
-
- project_id: typing.Optional[str].
|
|
146
134
|
---
|
|
147
135
|
from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
|
|
148
136
|
from llama_cloud.client import LlamaCloud
|
|
@@ -162,7 +150,7 @@ class DataSinksClient:
|
|
|
162
150
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
163
151
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
164
152
|
json=jsonable_encoder(request),
|
|
165
|
-
headers=
|
|
153
|
+
headers=self._client_wrapper.get_headers(),
|
|
166
154
|
timeout=60,
|
|
167
155
|
)
|
|
168
156
|
if 200 <= _response.status_code < 300:
|
|
@@ -298,11 +286,7 @@ class AsyncDataSinksClient:
|
|
|
298
286
|
self._client_wrapper = client_wrapper
|
|
299
287
|
|
|
300
288
|
async def list_data_sinks(
|
|
301
|
-
self,
|
|
302
|
-
*,
|
|
303
|
-
project_id: typing.Optional[str] = None,
|
|
304
|
-
organization_id: typing.Optional[str] = None,
|
|
305
|
-
project_id: typing.Optional[str] = None,
|
|
289
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
306
290
|
) -> typing.List[DataSink]:
|
|
307
291
|
"""
|
|
308
292
|
List data sinks for a given project.
|
|
@@ -311,8 +295,6 @@ class AsyncDataSinksClient:
|
|
|
311
295
|
- project_id: typing.Optional[str].
|
|
312
296
|
|
|
313
297
|
- organization_id: typing.Optional[str].
|
|
314
|
-
|
|
315
|
-
- project_id: typing.Optional[str].
|
|
316
298
|
---
|
|
317
299
|
from llama_cloud.client import AsyncLlamaCloud
|
|
318
300
|
|
|
@@ -325,7 +307,7 @@ class AsyncDataSinksClient:
|
|
|
325
307
|
"GET",
|
|
326
308
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
327
309
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
328
|
-
headers=
|
|
310
|
+
headers=self._client_wrapper.get_headers(),
|
|
329
311
|
timeout=60,
|
|
330
312
|
)
|
|
331
313
|
if 200 <= _response.status_code < 300:
|
|
@@ -344,7 +326,6 @@ class AsyncDataSinksClient:
|
|
|
344
326
|
project_id: typing.Optional[str] = None,
|
|
345
327
|
organization_id: typing.Optional[str] = None,
|
|
346
328
|
request: DataSinkCreate,
|
|
347
|
-
project_id: typing.Optional[str] = None,
|
|
348
329
|
) -> DataSink:
|
|
349
330
|
"""
|
|
350
331
|
Create a new data sink.
|
|
@@ -355,8 +336,6 @@ class AsyncDataSinksClient:
|
|
|
355
336
|
- organization_id: typing.Optional[str].
|
|
356
337
|
|
|
357
338
|
- request: DataSinkCreate.
|
|
358
|
-
|
|
359
|
-
- project_id: typing.Optional[str].
|
|
360
339
|
---
|
|
361
340
|
from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
|
|
362
341
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -376,7 +355,7 @@ class AsyncDataSinksClient:
|
|
|
376
355
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
377
356
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
378
357
|
json=jsonable_encoder(request),
|
|
379
|
-
headers=
|
|
358
|
+
headers=self._client_wrapper.get_headers(),
|
|
380
359
|
timeout=60,
|
|
381
360
|
)
|
|
382
361
|
if 200 <= _response.status_code < 300:
|
|
@@ -395,7 +374,6 @@ class AsyncDataSinksClient:
|
|
|
395
374
|
project_id: typing.Optional[str] = None,
|
|
396
375
|
organization_id: typing.Optional[str] = None,
|
|
397
376
|
request: DataSinkCreate,
|
|
398
|
-
project_id: typing.Optional[str] = None,
|
|
399
377
|
) -> DataSink:
|
|
400
378
|
"""
|
|
401
379
|
Upserts a data sink.
|
|
@@ -407,8 +385,6 @@ class AsyncDataSinksClient:
|
|
|
407
385
|
- organization_id: typing.Optional[str].
|
|
408
386
|
|
|
409
387
|
- request: DataSinkCreate.
|
|
410
|
-
|
|
411
|
-
- project_id: typing.Optional[str].
|
|
412
388
|
---
|
|
413
389
|
from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
|
|
414
390
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -428,7 +404,7 @@ class AsyncDataSinksClient:
|
|
|
428
404
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
429
405
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
430
406
|
json=jsonable_encoder(request),
|
|
431
|
-
headers=
|
|
407
|
+
headers=self._client_wrapper.get_headers(),
|
|
432
408
|
timeout=60,
|
|
433
409
|
)
|
|
434
410
|
if 200 <= _response.status_code < 300:
|
|
@@ -33,11 +33,7 @@ class DataSourcesClient:
|
|
|
33
33
|
self._client_wrapper = client_wrapper
|
|
34
34
|
|
|
35
35
|
def list_data_sources(
|
|
36
|
-
self,
|
|
37
|
-
*,
|
|
38
|
-
project_id: typing.Optional[str] = None,
|
|
39
|
-
organization_id: typing.Optional[str] = None,
|
|
40
|
-
project_id: typing.Optional[str] = None,
|
|
36
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
41
37
|
) -> typing.List[DataSource]:
|
|
42
38
|
"""
|
|
43
39
|
List data sources for a given project.
|
|
@@ -47,8 +43,6 @@ class DataSourcesClient:
|
|
|
47
43
|
- project_id: typing.Optional[str].
|
|
48
44
|
|
|
49
45
|
- organization_id: typing.Optional[str].
|
|
50
|
-
|
|
51
|
-
- project_id: typing.Optional[str].
|
|
52
46
|
---
|
|
53
47
|
from llama_cloud.client import LlamaCloud
|
|
54
48
|
|
|
@@ -61,7 +55,7 @@ class DataSourcesClient:
|
|
|
61
55
|
"GET",
|
|
62
56
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
63
57
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
64
|
-
headers=
|
|
58
|
+
headers=self._client_wrapper.get_headers(),
|
|
65
59
|
timeout=60,
|
|
66
60
|
)
|
|
67
61
|
if 200 <= _response.status_code < 300:
|
|
@@ -80,7 +74,6 @@ class DataSourcesClient:
|
|
|
80
74
|
project_id: typing.Optional[str] = None,
|
|
81
75
|
organization_id: typing.Optional[str] = None,
|
|
82
76
|
request: DataSourceCreate,
|
|
83
|
-
project_id: typing.Optional[str] = None,
|
|
84
77
|
) -> DataSource:
|
|
85
78
|
"""
|
|
86
79
|
Create a new data source.
|
|
@@ -91,8 +84,6 @@ class DataSourcesClient:
|
|
|
91
84
|
- organization_id: typing.Optional[str].
|
|
92
85
|
|
|
93
86
|
- request: DataSourceCreate.
|
|
94
|
-
|
|
95
|
-
- project_id: typing.Optional[str].
|
|
96
87
|
---
|
|
97
88
|
from llama_cloud import ConfigurableDataSourceNames, DataSourceCreate
|
|
98
89
|
from llama_cloud.client import LlamaCloud
|
|
@@ -112,7 +103,7 @@ class DataSourcesClient:
|
|
|
112
103
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
113
104
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
114
105
|
json=jsonable_encoder(request),
|
|
115
|
-
headers=
|
|
106
|
+
headers=self._client_wrapper.get_headers(),
|
|
116
107
|
timeout=60,
|
|
117
108
|
)
|
|
118
109
|
if 200 <= _response.status_code < 300:
|
|
@@ -131,7 +122,6 @@ class DataSourcesClient:
|
|
|
131
122
|
project_id: typing.Optional[str] = None,
|
|
132
123
|
organization_id: typing.Optional[str] = None,
|
|
133
124
|
request: DataSourceCreate,
|
|
134
|
-
project_id: typing.Optional[str] = None,
|
|
135
125
|
) -> DataSource:
|
|
136
126
|
"""
|
|
137
127
|
Upserts a data source.
|
|
@@ -143,8 +133,6 @@ class DataSourcesClient:
|
|
|
143
133
|
- organization_id: typing.Optional[str].
|
|
144
134
|
|
|
145
135
|
- request: DataSourceCreate.
|
|
146
|
-
|
|
147
|
-
- project_id: typing.Optional[str].
|
|
148
136
|
---
|
|
149
137
|
from llama_cloud import ConfigurableDataSourceNames, DataSourceCreate
|
|
150
138
|
from llama_cloud.client import LlamaCloud
|
|
@@ -164,7 +152,7 @@ class DataSourcesClient:
|
|
|
164
152
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
165
153
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
166
154
|
json=jsonable_encoder(request),
|
|
167
|
-
headers=
|
|
155
|
+
headers=self._client_wrapper.get_headers(),
|
|
168
156
|
timeout=60,
|
|
169
157
|
)
|
|
170
158
|
if 200 <= _response.status_code < 300:
|
|
@@ -305,11 +293,7 @@ class AsyncDataSourcesClient:
|
|
|
305
293
|
self._client_wrapper = client_wrapper
|
|
306
294
|
|
|
307
295
|
async def list_data_sources(
|
|
308
|
-
self,
|
|
309
|
-
*,
|
|
310
|
-
project_id: typing.Optional[str] = None,
|
|
311
|
-
organization_id: typing.Optional[str] = None,
|
|
312
|
-
project_id: typing.Optional[str] = None,
|
|
296
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
313
297
|
) -> typing.List[DataSource]:
|
|
314
298
|
"""
|
|
315
299
|
List data sources for a given project.
|
|
@@ -319,8 +303,6 @@ class AsyncDataSourcesClient:
|
|
|
319
303
|
- project_id: typing.Optional[str].
|
|
320
304
|
|
|
321
305
|
- organization_id: typing.Optional[str].
|
|
322
|
-
|
|
323
|
-
- project_id: typing.Optional[str].
|
|
324
306
|
---
|
|
325
307
|
from llama_cloud.client import AsyncLlamaCloud
|
|
326
308
|
|
|
@@ -333,7 +315,7 @@ class AsyncDataSourcesClient:
|
|
|
333
315
|
"GET",
|
|
334
316
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
335
317
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
336
|
-
headers=
|
|
318
|
+
headers=self._client_wrapper.get_headers(),
|
|
337
319
|
timeout=60,
|
|
338
320
|
)
|
|
339
321
|
if 200 <= _response.status_code < 300:
|
|
@@ -352,7 +334,6 @@ class AsyncDataSourcesClient:
|
|
|
352
334
|
project_id: typing.Optional[str] = None,
|
|
353
335
|
organization_id: typing.Optional[str] = None,
|
|
354
336
|
request: DataSourceCreate,
|
|
355
|
-
project_id: typing.Optional[str] = None,
|
|
356
337
|
) -> DataSource:
|
|
357
338
|
"""
|
|
358
339
|
Create a new data source.
|
|
@@ -363,8 +344,6 @@ class AsyncDataSourcesClient:
|
|
|
363
344
|
- organization_id: typing.Optional[str].
|
|
364
345
|
|
|
365
346
|
- request: DataSourceCreate.
|
|
366
|
-
|
|
367
|
-
- project_id: typing.Optional[str].
|
|
368
347
|
---
|
|
369
348
|
from llama_cloud import ConfigurableDataSourceNames, DataSourceCreate
|
|
370
349
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -384,7 +363,7 @@ class AsyncDataSourcesClient:
|
|
|
384
363
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
385
364
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
386
365
|
json=jsonable_encoder(request),
|
|
387
|
-
headers=
|
|
366
|
+
headers=self._client_wrapper.get_headers(),
|
|
388
367
|
timeout=60,
|
|
389
368
|
)
|
|
390
369
|
if 200 <= _response.status_code < 300:
|
|
@@ -403,7 +382,6 @@ class AsyncDataSourcesClient:
|
|
|
403
382
|
project_id: typing.Optional[str] = None,
|
|
404
383
|
organization_id: typing.Optional[str] = None,
|
|
405
384
|
request: DataSourceCreate,
|
|
406
|
-
project_id: typing.Optional[str] = None,
|
|
407
385
|
) -> DataSource:
|
|
408
386
|
"""
|
|
409
387
|
Upserts a data source.
|
|
@@ -415,8 +393,6 @@ class AsyncDataSourcesClient:
|
|
|
415
393
|
- organization_id: typing.Optional[str].
|
|
416
394
|
|
|
417
395
|
- request: DataSourceCreate.
|
|
418
|
-
|
|
419
|
-
- project_id: typing.Optional[str].
|
|
420
396
|
---
|
|
421
397
|
from llama_cloud import ConfigurableDataSourceNames, DataSourceCreate
|
|
422
398
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -436,7 +412,7 @@ class AsyncDataSourcesClient:
|
|
|
436
412
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sources"),
|
|
437
413
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
438
414
|
json=jsonable_encoder(request),
|
|
439
|
-
headers=
|
|
415
|
+
headers=self._client_wrapper.get_headers(),
|
|
440
416
|
timeout=60,
|
|
441
417
|
)
|
|
442
418
|
if 200 <= _response.status_code < 300:
|