llama-cloud 0.1.37__py3-none-any.whl → 0.1.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (37) hide show
  1. llama_cloud/__init__.py +8 -2
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +2 -0
  4. llama_cloud/resources/alpha/__init__.py +2 -0
  5. llama_cloud/resources/alpha/client.py +118 -0
  6. llama_cloud/resources/beta/client.py +126 -30
  7. llama_cloud/resources/chat_apps/client.py +32 -8
  8. llama_cloud/resources/classifier/client.py +139 -11
  9. llama_cloud/resources/data_sinks/client.py +32 -8
  10. llama_cloud/resources/data_sources/client.py +32 -8
  11. llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -0
  12. llama_cloud/resources/embedding_model_configs/client.py +48 -12
  13. llama_cloud/resources/files/client.py +176 -42
  14. llama_cloud/resources/jobs/client.py +12 -6
  15. llama_cloud/resources/llama_extract/client.py +138 -32
  16. llama_cloud/resources/organizations/client.py +18 -4
  17. llama_cloud/resources/parsing/client.py +16 -4
  18. llama_cloud/resources/pipelines/client.py +32 -8
  19. llama_cloud/resources/projects/client.py +78 -18
  20. llama_cloud/resources/reports/client.py +126 -30
  21. llama_cloud/resources/retrievers/client.py +48 -12
  22. llama_cloud/types/__init__.py +6 -2
  23. llama_cloud/types/agent_deployment_summary.py +1 -0
  24. llama_cloud/types/classify_job.py +2 -0
  25. llama_cloud/types/cloud_jira_data_source_v_2.py +52 -0
  26. llama_cloud/types/cloud_jira_data_source_v_2_api_version.py +21 -0
  27. llama_cloud/types/configurable_data_source_names.py +4 -0
  28. llama_cloud/types/data_source_component.py +2 -0
  29. llama_cloud/types/data_source_create_component.py +2 -0
  30. llama_cloud/types/data_source_reader_version_metadata_reader_version.py +9 -1
  31. llama_cloud/types/{classify_job_with_status.py → paginated_response_classify_job.py} +5 -18
  32. llama_cloud/types/pipeline_data_source_component.py +2 -0
  33. llama_cloud/types/usage_response_active_alerts_item.py +4 -0
  34. {llama_cloud-0.1.37.dist-info → llama_cloud-0.1.38.dist-info}/METADATA +2 -3
  35. {llama_cloud-0.1.37.dist-info → llama_cloud-0.1.38.dist-info}/RECORD +37 -33
  36. {llama_cloud-0.1.37.dist-info → llama_cloud-0.1.38.dist-info}/WHEEL +1 -1
  37. {llama_cloud-0.1.37.dist-info → llama_cloud-0.1.38.dist-info}/LICENSE +0 -0
@@ -33,13 +33,19 @@ class ChatAppsClient:
33
33
  self._client_wrapper = client_wrapper
34
34
 
35
35
  def get_chat_apps_api_v_1_apps_get(
36
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
36
+ self,
37
+ *,
38
+ project_id: typing.Optional[str] = None,
39
+ organization_id: typing.Optional[str] = None,
40
+ project_id: typing.Optional[str] = None,
37
41
  ) -> typing.List[ChatAppResponse]:
38
42
  """
39
43
  Parameters:
40
44
  - project_id: typing.Optional[str].
41
45
 
42
46
  - organization_id: typing.Optional[str].
47
+
48
+ - project_id: typing.Optional[str].
43
49
  ---
44
50
  from llama_cloud.client import LlamaCloud
45
51
 
@@ -52,7 +58,7 @@ class ChatAppsClient:
52
58
  "GET",
53
59
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
54
60
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
55
- headers=self._client_wrapper.get_headers(),
61
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
56
62
  timeout=60,
57
63
  )
58
64
  if 200 <= _response.status_code < 300:
@@ -74,6 +80,7 @@ class ChatAppsClient:
74
80
  retriever_id: str,
75
81
  llm_config: LlmParameters,
76
82
  retrieval_config: PresetCompositeRetrievalParams,
83
+ project_id: typing.Optional[str] = None,
77
84
  ) -> ChatApp:
78
85
  """
79
86
  Create a new chat app.
@@ -90,6 +97,8 @@ class ChatAppsClient:
90
97
  - llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
91
98
 
92
99
  - retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
100
+
101
+ - project_id: typing.Optional[str].
93
102
  ---
94
103
  from llama_cloud import (
95
104
  CompositeRetrievalMode,
@@ -130,7 +139,7 @@ class ChatAppsClient:
130
139
  "retrieval_config": retrieval_config,
131
140
  }
132
141
  ),
133
- headers=self._client_wrapper.get_headers(),
142
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
134
143
  timeout=60,
135
144
  )
136
145
  if 200 <= _response.status_code < 300:
@@ -184,6 +193,7 @@ class ChatAppsClient:
184
193
  name: typing.Optional[str] = OMIT,
185
194
  llm_config: typing.Optional[LlmParameters] = OMIT,
186
195
  retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
196
+ project_id: typing.Optional[str] = None,
187
197
  ) -> ChatApp:
188
198
  """
189
199
  Update a chat app.
@@ -200,6 +210,8 @@ class ChatAppsClient:
200
210
  - llm_config: typing.Optional[LlmParameters].
201
211
 
202
212
  - retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
213
+
214
+ - project_id: typing.Optional[str].
203
215
  ---
204
216
  from llama_cloud import (
205
217
  CompositeRetrievalMode,
@@ -239,7 +251,7 @@ class ChatAppsClient:
239
251
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
240
252
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
241
253
  json=jsonable_encoder(_request),
242
- headers=self._client_wrapper.get_headers(),
254
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
243
255
  timeout=60,
244
256
  )
245
257
  if 200 <= _response.status_code < 300:
@@ -326,13 +338,19 @@ class AsyncChatAppsClient:
326
338
  self._client_wrapper = client_wrapper
327
339
 
328
340
  async def get_chat_apps_api_v_1_apps_get(
329
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
341
+ self,
342
+ *,
343
+ project_id: typing.Optional[str] = None,
344
+ organization_id: typing.Optional[str] = None,
345
+ project_id: typing.Optional[str] = None,
330
346
  ) -> typing.List[ChatAppResponse]:
331
347
  """
332
348
  Parameters:
333
349
  - project_id: typing.Optional[str].
334
350
 
335
351
  - organization_id: typing.Optional[str].
352
+
353
+ - project_id: typing.Optional[str].
336
354
  ---
337
355
  from llama_cloud.client import AsyncLlamaCloud
338
356
 
@@ -345,7 +363,7 @@ class AsyncChatAppsClient:
345
363
  "GET",
346
364
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/apps"),
347
365
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
348
- headers=self._client_wrapper.get_headers(),
366
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
349
367
  timeout=60,
350
368
  )
351
369
  if 200 <= _response.status_code < 300:
@@ -367,6 +385,7 @@ class AsyncChatAppsClient:
367
385
  retriever_id: str,
368
386
  llm_config: LlmParameters,
369
387
  retrieval_config: PresetCompositeRetrievalParams,
388
+ project_id: typing.Optional[str] = None,
370
389
  ) -> ChatApp:
371
390
  """
372
391
  Create a new chat app.
@@ -383,6 +402,8 @@ class AsyncChatAppsClient:
383
402
  - llm_config: LlmParameters. Configuration for the LLM model to use for the chat app
384
403
 
385
404
  - retrieval_config: PresetCompositeRetrievalParams. Configuration for the retrieval model to use for the chat app
405
+
406
+ - project_id: typing.Optional[str].
386
407
  ---
387
408
  from llama_cloud import (
388
409
  CompositeRetrievalMode,
@@ -423,7 +444,7 @@ class AsyncChatAppsClient:
423
444
  "retrieval_config": retrieval_config,
424
445
  }
425
446
  ),
426
- headers=self._client_wrapper.get_headers(),
447
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
427
448
  timeout=60,
428
449
  )
429
450
  if 200 <= _response.status_code < 300:
@@ -477,6 +498,7 @@ class AsyncChatAppsClient:
477
498
  name: typing.Optional[str] = OMIT,
478
499
  llm_config: typing.Optional[LlmParameters] = OMIT,
479
500
  retrieval_config: typing.Optional[PresetCompositeRetrievalParams] = OMIT,
501
+ project_id: typing.Optional[str] = None,
480
502
  ) -> ChatApp:
481
503
  """
482
504
  Update a chat app.
@@ -493,6 +515,8 @@ class AsyncChatAppsClient:
493
515
  - llm_config: typing.Optional[LlmParameters].
494
516
 
495
517
  - retrieval_config: typing.Optional[PresetCompositeRetrievalParams].
518
+
519
+ - project_id: typing.Optional[str].
496
520
  ---
497
521
  from llama_cloud import (
498
522
  CompositeRetrievalMode,
@@ -532,7 +556,7 @@ class AsyncChatAppsClient:
532
556
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/apps/{id}"),
533
557
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
534
558
  json=jsonable_encoder(_request),
535
- headers=self._client_wrapper.get_headers(),
559
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
536
560
  timeout=60,
537
561
  )
538
562
  if 200 <= _response.status_code < 300:
@@ -12,9 +12,9 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.classifier_rule import ClassifierRule
13
13
  from ...types.classify_job import ClassifyJob
14
14
  from ...types.classify_job_results import ClassifyJobResults
15
- from ...types.classify_job_with_status import ClassifyJobWithStatus
16
15
  from ...types.classify_parsing_configuration import ClassifyParsingConfiguration
17
16
  from ...types.http_validation_error import HttpValidationError
17
+ from ...types.paginated_response_classify_job import PaginatedResponseClassifyJob
18
18
 
19
19
  try:
20
20
  import pydantic
@@ -32,6 +32,61 @@ class ClassifierClient:
32
32
  def __init__(self, *, client_wrapper: SyncClientWrapper):
33
33
  self._client_wrapper = client_wrapper
34
34
 
35
+ def list_classify_jobs(
36
+ self,
37
+ *,
38
+ project_id: typing.Optional[str] = None,
39
+ organization_id: typing.Optional[str] = None,
40
+ page_size: typing.Optional[int] = None,
41
+ page_token: typing.Optional[str] = None,
42
+ project_id: typing.Optional[str] = None,
43
+ ) -> PaginatedResponseClassifyJob:
44
+ """
45
+ List classify jobs.
46
+ Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
47
+
48
+ Parameters:
49
+ - project_id: typing.Optional[str].
50
+
51
+ - organization_id: typing.Optional[str].
52
+
53
+ - page_size: typing.Optional[int].
54
+
55
+ - page_token: typing.Optional[str].
56
+
57
+ - project_id: typing.Optional[str].
58
+ ---
59
+ from llama_cloud.client import LlamaCloud
60
+
61
+ client = LlamaCloud(
62
+ token="YOUR_TOKEN",
63
+ )
64
+ client.classifier.list_classify_jobs()
65
+ """
66
+ _response = self._client_wrapper.httpx_client.request(
67
+ "GET",
68
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
69
+ params=remove_none_from_dict(
70
+ {
71
+ "project_id": project_id,
72
+ "organization_id": organization_id,
73
+ "page_size": page_size,
74
+ "page_token": page_token,
75
+ }
76
+ ),
77
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
78
+ timeout=60,
79
+ )
80
+ if 200 <= _response.status_code < 300:
81
+ return pydantic.parse_obj_as(PaginatedResponseClassifyJob, _response.json()) # type: ignore
82
+ if _response.status_code == 422:
83
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
84
+ try:
85
+ _response_json = _response.json()
86
+ except JSONDecodeError:
87
+ raise ApiError(status_code=_response.status_code, body=_response.text)
88
+ raise ApiError(status_code=_response.status_code, body=_response_json)
89
+
35
90
  def create_classify_job(
36
91
  self,
37
92
  *,
@@ -40,6 +95,7 @@ class ClassifierClient:
40
95
  rules: typing.List[ClassifierRule],
41
96
  file_ids: typing.List[str],
42
97
  parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = OMIT,
98
+ project_id: typing.Optional[str] = None,
43
99
  ) -> ClassifyJob:
44
100
  """
45
101
  Create a classify job.
@@ -55,6 +111,8 @@ class ClassifierClient:
55
111
  - file_ids: typing.List[str]. The IDs of the files to classify
56
112
 
57
113
  - parsing_configuration: typing.Optional[ClassifyParsingConfiguration]. The configuration for the parsing job
114
+
115
+ - project_id: typing.Optional[str].
58
116
  ---
59
117
  from llama_cloud import ClassifyParsingConfiguration, ParserLanguages
60
118
  from llama_cloud.client import LlamaCloud
@@ -78,7 +136,7 @@ class ClassifierClient:
78
136
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
79
137
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
80
138
  json=jsonable_encoder(_request),
81
- headers=self._client_wrapper.get_headers(),
139
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
82
140
  timeout=60,
83
141
  )
84
142
  if 200 <= _response.status_code < 300:
@@ -97,7 +155,8 @@ class ClassifierClient:
97
155
  *,
98
156
  project_id: typing.Optional[str] = None,
99
157
  organization_id: typing.Optional[str] = None,
100
- ) -> ClassifyJobWithStatus:
158
+ project_id: typing.Optional[str] = None,
159
+ ) -> ClassifyJob:
101
160
  """
102
161
  Get a classify job.
103
162
  Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
@@ -108,6 +167,8 @@ class ClassifierClient:
108
167
  - project_id: typing.Optional[str].
109
168
 
110
169
  - organization_id: typing.Optional[str].
170
+
171
+ - project_id: typing.Optional[str].
111
172
  ---
112
173
  from llama_cloud.client import LlamaCloud
113
174
 
@@ -124,11 +185,11 @@ class ClassifierClient:
124
185
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}"
125
186
  ),
126
187
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
127
- headers=self._client_wrapper.get_headers(),
188
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
128
189
  timeout=60,
129
190
  )
130
191
  if 200 <= _response.status_code < 300:
131
- return pydantic.parse_obj_as(ClassifyJobWithStatus, _response.json()) # type: ignore
192
+ return pydantic.parse_obj_as(ClassifyJob, _response.json()) # type: ignore
132
193
  if _response.status_code == 422:
133
194
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
134
195
  try:
@@ -143,6 +204,7 @@ class ClassifierClient:
143
204
  *,
144
205
  project_id: typing.Optional[str] = None,
145
206
  organization_id: typing.Optional[str] = None,
207
+ project_id: typing.Optional[str] = None,
146
208
  ) -> ClassifyJobResults:
147
209
  """
148
210
  Get the results of a classify job.
@@ -154,6 +216,8 @@ class ClassifierClient:
154
216
  - project_id: typing.Optional[str].
155
217
 
156
218
  - organization_id: typing.Optional[str].
219
+
220
+ - project_id: typing.Optional[str].
157
221
  ---
158
222
  from llama_cloud.client import LlamaCloud
159
223
 
@@ -170,7 +234,7 @@ class ClassifierClient:
170
234
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}/results"
171
235
  ),
172
236
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
173
- headers=self._client_wrapper.get_headers(),
237
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
174
238
  timeout=60,
175
239
  )
176
240
  if 200 <= _response.status_code < 300:
@@ -188,6 +252,61 @@ class AsyncClassifierClient:
188
252
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
189
253
  self._client_wrapper = client_wrapper
190
254
 
255
+ async def list_classify_jobs(
256
+ self,
257
+ *,
258
+ project_id: typing.Optional[str] = None,
259
+ organization_id: typing.Optional[str] = None,
260
+ page_size: typing.Optional[int] = None,
261
+ page_token: typing.Optional[str] = None,
262
+ project_id: typing.Optional[str] = None,
263
+ ) -> PaginatedResponseClassifyJob:
264
+ """
265
+ List classify jobs.
266
+ Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
267
+
268
+ Parameters:
269
+ - project_id: typing.Optional[str].
270
+
271
+ - organization_id: typing.Optional[str].
272
+
273
+ - page_size: typing.Optional[int].
274
+
275
+ - page_token: typing.Optional[str].
276
+
277
+ - project_id: typing.Optional[str].
278
+ ---
279
+ from llama_cloud.client import AsyncLlamaCloud
280
+
281
+ client = AsyncLlamaCloud(
282
+ token="YOUR_TOKEN",
283
+ )
284
+ await client.classifier.list_classify_jobs()
285
+ """
286
+ _response = await self._client_wrapper.httpx_client.request(
287
+ "GET",
288
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
289
+ params=remove_none_from_dict(
290
+ {
291
+ "project_id": project_id,
292
+ "organization_id": organization_id,
293
+ "page_size": page_size,
294
+ "page_token": page_token,
295
+ }
296
+ ),
297
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
298
+ timeout=60,
299
+ )
300
+ if 200 <= _response.status_code < 300:
301
+ return pydantic.parse_obj_as(PaginatedResponseClassifyJob, _response.json()) # type: ignore
302
+ if _response.status_code == 422:
303
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
304
+ try:
305
+ _response_json = _response.json()
306
+ except JSONDecodeError:
307
+ raise ApiError(status_code=_response.status_code, body=_response.text)
308
+ raise ApiError(status_code=_response.status_code, body=_response_json)
309
+
191
310
  async def create_classify_job(
192
311
  self,
193
312
  *,
@@ -196,6 +315,7 @@ class AsyncClassifierClient:
196
315
  rules: typing.List[ClassifierRule],
197
316
  file_ids: typing.List[str],
198
317
  parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = OMIT,
318
+ project_id: typing.Optional[str] = None,
199
319
  ) -> ClassifyJob:
200
320
  """
201
321
  Create a classify job.
@@ -211,6 +331,8 @@ class AsyncClassifierClient:
211
331
  - file_ids: typing.List[str]. The IDs of the files to classify
212
332
 
213
333
  - parsing_configuration: typing.Optional[ClassifyParsingConfiguration]. The configuration for the parsing job
334
+
335
+ - project_id: typing.Optional[str].
214
336
  ---
215
337
  from llama_cloud import ClassifyParsingConfiguration, ParserLanguages
216
338
  from llama_cloud.client import AsyncLlamaCloud
@@ -234,7 +356,7 @@ class AsyncClassifierClient:
234
356
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/jobs"),
235
357
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
236
358
  json=jsonable_encoder(_request),
237
- headers=self._client_wrapper.get_headers(),
359
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
238
360
  timeout=60,
239
361
  )
240
362
  if 200 <= _response.status_code < 300:
@@ -253,7 +375,8 @@ class AsyncClassifierClient:
253
375
  *,
254
376
  project_id: typing.Optional[str] = None,
255
377
  organization_id: typing.Optional[str] = None,
256
- ) -> ClassifyJobWithStatus:
378
+ project_id: typing.Optional[str] = None,
379
+ ) -> ClassifyJob:
257
380
  """
258
381
  Get a classify job.
259
382
  Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
@@ -264,6 +387,8 @@ class AsyncClassifierClient:
264
387
  - project_id: typing.Optional[str].
265
388
 
266
389
  - organization_id: typing.Optional[str].
390
+
391
+ - project_id: typing.Optional[str].
267
392
  ---
268
393
  from llama_cloud.client import AsyncLlamaCloud
269
394
 
@@ -280,11 +405,11 @@ class AsyncClassifierClient:
280
405
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}"
281
406
  ),
282
407
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
283
- headers=self._client_wrapper.get_headers(),
408
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
284
409
  timeout=60,
285
410
  )
286
411
  if 200 <= _response.status_code < 300:
287
- return pydantic.parse_obj_as(ClassifyJobWithStatus, _response.json()) # type: ignore
412
+ return pydantic.parse_obj_as(ClassifyJob, _response.json()) # type: ignore
288
413
  if _response.status_code == 422:
289
414
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
290
415
  try:
@@ -299,6 +424,7 @@ class AsyncClassifierClient:
299
424
  *,
300
425
  project_id: typing.Optional[str] = None,
301
426
  organization_id: typing.Optional[str] = None,
427
+ project_id: typing.Optional[str] = None,
302
428
  ) -> ClassifyJobResults:
303
429
  """
304
430
  Get the results of a classify job.
@@ -310,6 +436,8 @@ class AsyncClassifierClient:
310
436
  - project_id: typing.Optional[str].
311
437
 
312
438
  - organization_id: typing.Optional[str].
439
+
440
+ - project_id: typing.Optional[str].
313
441
  ---
314
442
  from llama_cloud.client import AsyncLlamaCloud
315
443
 
@@ -326,7 +454,7 @@ class AsyncClassifierClient:
326
454
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/classifier/jobs/{classify_job_id}/results"
327
455
  ),
328
456
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
329
- headers=self._client_wrapper.get_headers(),
457
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
330
458
  timeout=60,
331
459
  )
332
460
  if 200 <= _response.status_code < 300:
@@ -32,7 +32,11 @@ class DataSinksClient:
32
32
  self._client_wrapper = client_wrapper
33
33
 
34
34
  def list_data_sinks(
35
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
35
+ self,
36
+ *,
37
+ project_id: typing.Optional[str] = None,
38
+ organization_id: typing.Optional[str] = None,
39
+ project_id: typing.Optional[str] = None,
36
40
  ) -> typing.List[DataSink]:
37
41
  """
38
42
  List data sinks for a given project.
@@ -41,6 +45,8 @@ class DataSinksClient:
41
45
  - project_id: typing.Optional[str].
42
46
 
43
47
  - organization_id: typing.Optional[str].
48
+
49
+ - project_id: typing.Optional[str].
44
50
  ---
45
51
  from llama_cloud.client import LlamaCloud
46
52
 
@@ -53,7 +59,7 @@ class DataSinksClient:
53
59
  "GET",
54
60
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
55
61
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
56
- headers=self._client_wrapper.get_headers(),
62
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
57
63
  timeout=60,
58
64
  )
59
65
  if 200 <= _response.status_code < 300:
@@ -72,6 +78,7 @@ class DataSinksClient:
72
78
  project_id: typing.Optional[str] = None,
73
79
  organization_id: typing.Optional[str] = None,
74
80
  request: DataSinkCreate,
81
+ project_id: typing.Optional[str] = None,
75
82
  ) -> DataSink:
76
83
  """
77
84
  Create a new data sink.
@@ -82,6 +89,8 @@ class DataSinksClient:
82
89
  - organization_id: typing.Optional[str].
83
90
 
84
91
  - request: DataSinkCreate.
92
+
93
+ - project_id: typing.Optional[str].
85
94
  ---
86
95
  from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
87
96
  from llama_cloud.client import LlamaCloud
@@ -101,7 +110,7 @@ class DataSinksClient:
101
110
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
102
111
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
103
112
  json=jsonable_encoder(request),
104
- headers=self._client_wrapper.get_headers(),
113
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
105
114
  timeout=60,
106
115
  )
107
116
  if 200 <= _response.status_code < 300:
@@ -120,6 +129,7 @@ class DataSinksClient:
120
129
  project_id: typing.Optional[str] = None,
121
130
  organization_id: typing.Optional[str] = None,
122
131
  request: DataSinkCreate,
132
+ project_id: typing.Optional[str] = None,
123
133
  ) -> DataSink:
124
134
  """
125
135
  Upserts a data sink.
@@ -131,6 +141,8 @@ class DataSinksClient:
131
141
  - organization_id: typing.Optional[str].
132
142
 
133
143
  - request: DataSinkCreate.
144
+
145
+ - project_id: typing.Optional[str].
134
146
  ---
135
147
  from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
136
148
  from llama_cloud.client import LlamaCloud
@@ -150,7 +162,7 @@ class DataSinksClient:
150
162
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
151
163
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
152
164
  json=jsonable_encoder(request),
153
- headers=self._client_wrapper.get_headers(),
165
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
154
166
  timeout=60,
155
167
  )
156
168
  if 200 <= _response.status_code < 300:
@@ -286,7 +298,11 @@ class AsyncDataSinksClient:
286
298
  self._client_wrapper = client_wrapper
287
299
 
288
300
  async def list_data_sinks(
289
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
301
+ self,
302
+ *,
303
+ project_id: typing.Optional[str] = None,
304
+ organization_id: typing.Optional[str] = None,
305
+ project_id: typing.Optional[str] = None,
290
306
  ) -> typing.List[DataSink]:
291
307
  """
292
308
  List data sinks for a given project.
@@ -295,6 +311,8 @@ class AsyncDataSinksClient:
295
311
  - project_id: typing.Optional[str].
296
312
 
297
313
  - organization_id: typing.Optional[str].
314
+
315
+ - project_id: typing.Optional[str].
298
316
  ---
299
317
  from llama_cloud.client import AsyncLlamaCloud
300
318
 
@@ -307,7 +325,7 @@ class AsyncDataSinksClient:
307
325
  "GET",
308
326
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
309
327
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
310
- headers=self._client_wrapper.get_headers(),
328
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
311
329
  timeout=60,
312
330
  )
313
331
  if 200 <= _response.status_code < 300:
@@ -326,6 +344,7 @@ class AsyncDataSinksClient:
326
344
  project_id: typing.Optional[str] = None,
327
345
  organization_id: typing.Optional[str] = None,
328
346
  request: DataSinkCreate,
347
+ project_id: typing.Optional[str] = None,
329
348
  ) -> DataSink:
330
349
  """
331
350
  Create a new data sink.
@@ -336,6 +355,8 @@ class AsyncDataSinksClient:
336
355
  - organization_id: typing.Optional[str].
337
356
 
338
357
  - request: DataSinkCreate.
358
+
359
+ - project_id: typing.Optional[str].
339
360
  ---
340
361
  from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
341
362
  from llama_cloud.client import AsyncLlamaCloud
@@ -355,7 +376,7 @@ class AsyncDataSinksClient:
355
376
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
356
377
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
357
378
  json=jsonable_encoder(request),
358
- headers=self._client_wrapper.get_headers(),
379
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
359
380
  timeout=60,
360
381
  )
361
382
  if 200 <= _response.status_code < 300:
@@ -374,6 +395,7 @@ class AsyncDataSinksClient:
374
395
  project_id: typing.Optional[str] = None,
375
396
  organization_id: typing.Optional[str] = None,
376
397
  request: DataSinkCreate,
398
+ project_id: typing.Optional[str] = None,
377
399
  ) -> DataSink:
378
400
  """
379
401
  Upserts a data sink.
@@ -385,6 +407,8 @@ class AsyncDataSinksClient:
385
407
  - organization_id: typing.Optional[str].
386
408
 
387
409
  - request: DataSinkCreate.
410
+
411
+ - project_id: typing.Optional[str].
388
412
  ---
389
413
  from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
390
414
  from llama_cloud.client import AsyncLlamaCloud
@@ -404,7 +428,7 @@ class AsyncDataSinksClient:
404
428
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
405
429
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
406
430
  json=jsonable_encoder(request),
407
- headers=self._client_wrapper.get_headers(),
431
+ headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
408
432
  timeout=60,
409
433
  )
410
434
  if 200 <= _response.status_code < 300: