llama-cloud 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (39) hide show
  1. llama_cloud/__init__.py +12 -0
  2. llama_cloud/resources/admin/client.py +5 -5
  3. llama_cloud/resources/alpha/client.py +2 -8
  4. llama_cloud/resources/beta/client.py +30 -126
  5. llama_cloud/resources/chat_apps/client.py +8 -32
  6. llama_cloud/resources/classifier/client.py +8 -32
  7. llama_cloud/resources/data_sinks/client.py +8 -32
  8. llama_cloud/resources/data_sources/client.py +8 -32
  9. llama_cloud/resources/embedding_model_configs/client.py +12 -48
  10. llama_cloud/resources/files/client.py +42 -176
  11. llama_cloud/resources/jobs/client.py +2 -8
  12. llama_cloud/resources/llama_extract/client.py +40 -138
  13. llama_cloud/resources/organizations/client.py +4 -18
  14. llama_cloud/resources/parsing/client.py +12 -16
  15. llama_cloud/resources/pipelines/client.py +45 -32
  16. llama_cloud/resources/projects/client.py +18 -78
  17. llama_cloud/resources/reports/client.py +30 -126
  18. llama_cloud/resources/retrievers/client.py +12 -48
  19. llama_cloud/types/__init__.py +12 -0
  20. llama_cloud/types/extract_job_create.py +2 -0
  21. llama_cloud/types/extract_job_create_priority.py +29 -0
  22. llama_cloud/types/file.py +1 -1
  23. llama_cloud/types/job_names.py +0 -4
  24. llama_cloud/types/llama_extract_feature_availability.py +34 -0
  25. llama_cloud/types/llama_parse_parameters.py +1 -0
  26. llama_cloud/types/parse_job_config.py +1 -0
  27. llama_cloud/types/pipeline.py +4 -0
  28. llama_cloud/types/pipeline_create.py +2 -0
  29. llama_cloud/types/pipeline_file.py +4 -4
  30. llama_cloud/types/schema_generation_availability.py +33 -0
  31. llama_cloud/types/schema_generation_availability_status.py +17 -0
  32. llama_cloud/types/sparse_model_config.py +42 -0
  33. llama_cloud/types/sparse_model_type.py +33 -0
  34. llama_cloud/types/webhook_configuration.py +1 -0
  35. llama_cloud-0.1.40.dist-info/METADATA +106 -0
  36. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/RECORD +38 -32
  37. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/WHEEL +1 -1
  38. llama_cloud-0.1.38.dist-info/METADATA +0 -32
  39. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/LICENSE +0 -0
@@ -657,12 +657,7 @@ class OrganizationsClient:
657
657
  raise ApiError(status_code=_response.status_code, body=_response_json)
658
658
 
659
659
  def add_user_to_project(
660
- self,
661
- organization_id: typing.Optional[str],
662
- user_id: str,
663
- *,
664
- project_id: typing.Optional[str] = None,
665
- project_id: typing.Optional[str] = None,
660
+ self, organization_id: typing.Optional[str], user_id: str, *, project_id: typing.Optional[str] = None
666
661
  ) -> typing.Any:
667
662
  """
668
663
  Add a user to a project.
@@ -673,8 +668,6 @@ class OrganizationsClient:
673
668
  - user_id: str.
674
669
 
675
670
  - project_id: typing.Optional[str].
676
-
677
- - project_id: typing.Optional[str].
678
671
  ---
679
672
  from llama_cloud.client import LlamaCloud
680
673
 
@@ -692,7 +685,7 @@ class OrganizationsClient:
692
685
  f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
693
686
  ),
694
687
  params=remove_none_from_dict({"project_id": project_id}),
695
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
688
+ headers=self._client_wrapper.get_headers(),
696
689
  timeout=60,
697
690
  )
698
691
  if 200 <= _response.status_code < 300:
@@ -1372,12 +1365,7 @@ class AsyncOrganizationsClient:
1372
1365
  raise ApiError(status_code=_response.status_code, body=_response_json)
1373
1366
 
1374
1367
  async def add_user_to_project(
1375
- self,
1376
- organization_id: typing.Optional[str],
1377
- user_id: str,
1378
- *,
1379
- project_id: typing.Optional[str] = None,
1380
- project_id: typing.Optional[str] = None,
1368
+ self, organization_id: typing.Optional[str], user_id: str, *, project_id: typing.Optional[str] = None
1381
1369
  ) -> typing.Any:
1382
1370
  """
1383
1371
  Add a user to a project.
@@ -1388,8 +1376,6 @@ class AsyncOrganizationsClient:
1388
1376
  - user_id: str.
1389
1377
 
1390
1378
  - project_id: typing.Optional[str].
1391
-
1392
- - project_id: typing.Optional[str].
1393
1379
  ---
1394
1380
  from llama_cloud.client import AsyncLlamaCloud
1395
1381
 
@@ -1407,7 +1393,7 @@ class AsyncOrganizationsClient:
1407
1393
  f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
1408
1394
  ),
1409
1395
  params=remove_none_from_dict({"project_id": project_id}),
1410
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1396
+ headers=self._client_wrapper.get_headers(),
1411
1397
  timeout=60,
1412
1398
  )
1413
1399
  if 200 <= _response.status_code < 300:
@@ -121,7 +121,6 @@ class ParsingClient:
121
121
  webhook_configurations: str,
122
122
  job_timeout_in_seconds: float,
123
123
  job_timeout_extra_time_per_page_in_seconds: float,
124
- project_id: typing.Optional[str] = None,
125
124
  ) -> ParsingJob:
126
125
  """
127
126
  Parameters:
@@ -158,8 +157,6 @@ class ParsingClient:
158
157
  - job_timeout_in_seconds: float.
159
158
 
160
159
  - job_timeout_extra_time_per_page_in_seconds: float.
161
-
162
- - project_id: typing.Optional[str].
163
160
  """
164
161
  _request: typing.Dict[str, typing.Any] = {
165
162
  "do_not_cache": do_not_cache,
@@ -185,7 +182,7 @@ class ParsingClient:
185
182
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
186
183
  params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
187
184
  json=jsonable_encoder(_request),
188
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
185
+ headers=self._client_wrapper.get_headers(),
189
186
  timeout=60,
190
187
  )
191
188
  if 200 <= _response.status_code < 300:
@@ -230,6 +227,7 @@ class ParsingClient:
230
227
  guess_xlsx_sheet_name: bool,
231
228
  high_res_ocr: bool,
232
229
  html_make_all_elements_visible: bool,
230
+ layout_aware: bool,
233
231
  html_remove_fixed_elements: bool,
234
232
  html_remove_navigation_elements: bool,
235
233
  http_proxy: str,
@@ -299,7 +297,6 @@ class ParsingClient:
299
297
  page_header_suffix: str,
300
298
  page_footer_prefix: str,
301
299
  page_footer_suffix: str,
302
- project_id: typing.Optional[str] = None,
303
300
  ) -> ParsingJob:
304
301
  """
305
302
  Parameters:
@@ -361,6 +358,8 @@ class ParsingClient:
361
358
 
362
359
  - html_make_all_elements_visible: bool.
363
360
 
361
+ - layout_aware: bool.
362
+
364
363
  - html_remove_fixed_elements: bool.
365
364
 
366
365
  - html_remove_navigation_elements: bool.
@@ -498,8 +497,6 @@ class ParsingClient:
498
497
  - page_footer_prefix: str.
499
498
 
500
499
  - page_footer_suffix: str.
501
-
502
- - project_id: typing.Optional[str].
503
500
  """
504
501
  _request: typing.Dict[str, typing.Any] = {
505
502
  "adaptive_long_table": adaptive_long_table,
@@ -528,6 +525,7 @@ class ParsingClient:
528
525
  "guess_xlsx_sheet_name": guess_xlsx_sheet_name,
529
526
  "high_res_ocr": high_res_ocr,
530
527
  "html_make_all_elements_visible": html_make_all_elements_visible,
528
+ "layout_aware": layout_aware,
531
529
  "html_remove_fixed_elements": html_remove_fixed_elements,
532
530
  "html_remove_navigation_elements": html_remove_navigation_elements,
533
531
  "http_proxy": http_proxy,
@@ -608,7 +606,7 @@ class ParsingClient:
608
606
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/upload"),
609
607
  params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
610
608
  json=jsonable_encoder(_request),
611
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
609
+ headers=self._client_wrapper.get_headers(),
612
610
  timeout=60,
613
611
  )
614
612
  if 200 <= _response.status_code < 300:
@@ -1299,7 +1297,6 @@ class AsyncParsingClient:
1299
1297
  webhook_configurations: str,
1300
1298
  job_timeout_in_seconds: float,
1301
1299
  job_timeout_extra_time_per_page_in_seconds: float,
1302
- project_id: typing.Optional[str] = None,
1303
1300
  ) -> ParsingJob:
1304
1301
  """
1305
1302
  Parameters:
@@ -1336,8 +1333,6 @@ class AsyncParsingClient:
1336
1333
  - job_timeout_in_seconds: float.
1337
1334
 
1338
1335
  - job_timeout_extra_time_per_page_in_seconds: float.
1339
-
1340
- - project_id: typing.Optional[str].
1341
1336
  """
1342
1337
  _request: typing.Dict[str, typing.Any] = {
1343
1338
  "do_not_cache": do_not_cache,
@@ -1363,7 +1358,7 @@ class AsyncParsingClient:
1363
1358
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
1364
1359
  params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
1365
1360
  json=jsonable_encoder(_request),
1366
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1361
+ headers=self._client_wrapper.get_headers(),
1367
1362
  timeout=60,
1368
1363
  )
1369
1364
  if 200 <= _response.status_code < 300:
@@ -1408,6 +1403,7 @@ class AsyncParsingClient:
1408
1403
  guess_xlsx_sheet_name: bool,
1409
1404
  high_res_ocr: bool,
1410
1405
  html_make_all_elements_visible: bool,
1406
+ layout_aware: bool,
1411
1407
  html_remove_fixed_elements: bool,
1412
1408
  html_remove_navigation_elements: bool,
1413
1409
  http_proxy: str,
@@ -1477,7 +1473,6 @@ class AsyncParsingClient:
1477
1473
  page_header_suffix: str,
1478
1474
  page_footer_prefix: str,
1479
1475
  page_footer_suffix: str,
1480
- project_id: typing.Optional[str] = None,
1481
1476
  ) -> ParsingJob:
1482
1477
  """
1483
1478
  Parameters:
@@ -1539,6 +1534,8 @@ class AsyncParsingClient:
1539
1534
 
1540
1535
  - html_make_all_elements_visible: bool.
1541
1536
 
1537
+ - layout_aware: bool.
1538
+
1542
1539
  - html_remove_fixed_elements: bool.
1543
1540
 
1544
1541
  - html_remove_navigation_elements: bool.
@@ -1676,8 +1673,6 @@ class AsyncParsingClient:
1676
1673
  - page_footer_prefix: str.
1677
1674
 
1678
1675
  - page_footer_suffix: str.
1679
-
1680
- - project_id: typing.Optional[str].
1681
1676
  """
1682
1677
  _request: typing.Dict[str, typing.Any] = {
1683
1678
  "adaptive_long_table": adaptive_long_table,
@@ -1706,6 +1701,7 @@ class AsyncParsingClient:
1706
1701
  "guess_xlsx_sheet_name": guess_xlsx_sheet_name,
1707
1702
  "high_res_ocr": high_res_ocr,
1708
1703
  "html_make_all_elements_visible": html_make_all_elements_visible,
1704
+ "layout_aware": layout_aware,
1709
1705
  "html_remove_fixed_elements": html_remove_fixed_elements,
1710
1706
  "html_remove_navigation_elements": html_remove_navigation_elements,
1711
1707
  "http_proxy": http_proxy,
@@ -1786,7 +1782,7 @@ class AsyncParsingClient:
1786
1782
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/upload"),
1787
1783
  params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
1788
1784
  json=jsonable_encoder(_request),
1789
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1785
+ headers=self._client_wrapper.get_headers(),
1790
1786
  timeout=60,
1791
1787
  )
1792
1788
  if 200 <= _response.status_code < 300:
@@ -35,6 +35,7 @@ from ...types.playground_session import PlaygroundSession
35
35
  from ...types.preset_retrieval_params import PresetRetrievalParams
36
36
  from ...types.retrieval_mode import RetrievalMode
37
37
  from ...types.retrieve_results import RetrieveResults
38
+ from ...types.sparse_model_config import SparseModelConfig
38
39
  from ...types.text_node import TextNode
39
40
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
40
41
  from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
@@ -67,7 +68,6 @@ class PipelinesClient:
67
68
  pipeline_name: typing.Optional[str] = None,
68
69
  pipeline_type: typing.Optional[PipelineType] = None,
69
70
  organization_id: typing.Optional[str] = None,
70
- project_id: typing.Optional[str] = None,
71
71
  ) -> typing.List[Pipeline]:
72
72
  """
73
73
  Search for pipelines by various parameters.
@@ -82,8 +82,6 @@ class PipelinesClient:
82
82
  - pipeline_type: typing.Optional[PipelineType].
83
83
 
84
84
  - organization_id: typing.Optional[str].
85
-
86
- - project_id: typing.Optional[str].
87
85
  ---
88
86
  from llama_cloud import PipelineType
89
87
  from llama_cloud.client import LlamaCloud
@@ -107,7 +105,7 @@ class PipelinesClient:
107
105
  "organization_id": organization_id,
108
106
  }
109
107
  ),
110
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
108
+ headers=self._client_wrapper.get_headers(),
111
109
  timeout=60,
112
110
  )
113
111
  if 200 <= _response.status_code < 300:
@@ -126,7 +124,6 @@ class PipelinesClient:
126
124
  project_id: typing.Optional[str] = None,
127
125
  organization_id: typing.Optional[str] = None,
128
126
  request: PipelineCreate,
129
- project_id: typing.Optional[str] = None,
130
127
  ) -> Pipeline:
131
128
  """
132
129
  Create a new pipeline for a project.
@@ -137,15 +134,13 @@ class PipelinesClient:
137
134
  - organization_id: typing.Optional[str].
138
135
 
139
136
  - request: PipelineCreate.
140
-
141
- - project_id: typing.Optional[str].
142
137
  """
143
138
  _response = self._client_wrapper.httpx_client.request(
144
139
  "POST",
145
140
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
146
141
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
147
142
  json=jsonable_encoder(request),
148
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
143
+ headers=self._client_wrapper.get_headers(),
149
144
  timeout=60,
150
145
  )
151
146
  if 200 <= _response.status_code < 300:
@@ -164,7 +159,6 @@ class PipelinesClient:
164
159
  project_id: typing.Optional[str] = None,
165
160
  organization_id: typing.Optional[str] = None,
166
161
  request: PipelineCreate,
167
- project_id: typing.Optional[str] = None,
168
162
  ) -> Pipeline:
169
163
  """
170
164
  Upsert a pipeline for a project.
@@ -176,15 +170,13 @@ class PipelinesClient:
176
170
  - organization_id: typing.Optional[str].
177
171
 
178
172
  - request: PipelineCreate.
179
-
180
- - project_id: typing.Optional[str].
181
173
  """
182
174
  _response = self._client_wrapper.httpx_client.request(
183
175
  "PUT",
184
176
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
185
177
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
186
178
  json=jsonable_encoder(request),
187
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
179
+ headers=self._client_wrapper.get_headers(),
188
180
  timeout=60,
189
181
  )
190
182
  if 200 <= _response.status_code < 300:
@@ -226,6 +218,7 @@ class PipelinesClient:
226
218
  *,
227
219
  embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
228
220
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
221
+ sparse_model_config: typing.Optional[SparseModelConfig] = OMIT,
229
222
  data_sink_id: typing.Optional[str] = OMIT,
230
223
  embedding_model_config_id: typing.Optional[str] = OMIT,
231
224
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
@@ -247,6 +240,8 @@ class PipelinesClient:
247
240
 
248
241
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
249
242
 
243
+ - sparse_model_config: typing.Optional[SparseModelConfig].
244
+
250
245
  - data_sink_id: typing.Optional[str].
251
246
 
252
247
  - embedding_model_config_id: typing.Optional[str].
@@ -272,6 +267,8 @@ class PipelinesClient:
272
267
  _request["embedding_config"] = embedding_config
273
268
  if transform_config is not OMIT:
274
269
  _request["transform_config"] = transform_config
270
+ if sparse_model_config is not OMIT:
271
+ _request["sparse_model_config"] = sparse_model_config
275
272
  if data_sink_id is not OMIT:
276
273
  _request["data_sink_id"] = data_sink_id
277
274
  if embedding_model_config_id is not OMIT:
@@ -563,6 +560,7 @@ class PipelinesClient:
563
560
  *,
564
561
  data_source_id: typing.Optional[str] = None,
565
562
  only_manually_uploaded: typing.Optional[bool] = None,
563
+ file_name_contains: typing.Optional[str] = None,
566
564
  limit: typing.Optional[int] = None,
567
565
  offset: typing.Optional[int] = None,
568
566
  order_by: typing.Optional[str] = None,
@@ -570,6 +568,15 @@ class PipelinesClient:
570
568
  """
571
569
  Get files for a pipeline.
572
570
 
571
+ Args:
572
+ pipeline_id: ID of the pipeline
573
+ data_source_id: Optional filter by data source ID
574
+ only_manually_uploaded: Filter for only manually uploaded files
575
+ file_name_contains: Optional filter by file name (substring match)
576
+ limit: Limit number of results
577
+ offset: Offset for pagination
578
+ order_by: Field to order by
579
+
573
580
  Parameters:
574
581
  - pipeline_id: str.
575
582
 
@@ -577,6 +584,8 @@ class PipelinesClient:
577
584
 
578
585
  - only_manually_uploaded: typing.Optional[bool].
579
586
 
587
+ - file_name_contains: typing.Optional[str].
588
+
580
589
  - limit: typing.Optional[int].
581
590
 
582
591
  - offset: typing.Optional[int].
@@ -599,6 +608,7 @@ class PipelinesClient:
599
608
  {
600
609
  "data_source_id": data_source_id,
601
610
  "only_manually_uploaded": only_manually_uploaded,
611
+ "file_name_contains": file_name_contains,
602
612
  "limit": limit,
603
613
  "offset": offset,
604
614
  "order_by": order_by,
@@ -1095,7 +1105,6 @@ class PipelinesClient:
1095
1105
  retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
1096
1106
  query: str,
1097
1107
  class_name: typing.Optional[str] = OMIT,
1098
- project_id: typing.Optional[str] = None,
1099
1108
  ) -> RetrieveResults:
1100
1109
  """
1101
1110
  Get retrieval results for a managed pipeline and a query
@@ -1136,8 +1145,6 @@ class PipelinesClient:
1136
1145
  - query: str. The query to retrieve against.
1137
1146
 
1138
1147
  - class_name: typing.Optional[str].
1139
-
1140
- - project_id: typing.Optional[str].
1141
1148
  ---
1142
1149
  from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
1143
1150
  from llama_cloud.client import LlamaCloud
@@ -1189,7 +1196,7 @@ class PipelinesClient:
1189
1196
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
1190
1197
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1191
1198
  json=jsonable_encoder(_request),
1192
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1199
+ headers=self._client_wrapper.get_headers(),
1193
1200
  timeout=60,
1194
1201
  )
1195
1202
  if 200 <= _response.status_code < 300:
@@ -1750,7 +1757,6 @@ class AsyncPipelinesClient:
1750
1757
  pipeline_name: typing.Optional[str] = None,
1751
1758
  pipeline_type: typing.Optional[PipelineType] = None,
1752
1759
  organization_id: typing.Optional[str] = None,
1753
- project_id: typing.Optional[str] = None,
1754
1760
  ) -> typing.List[Pipeline]:
1755
1761
  """
1756
1762
  Search for pipelines by various parameters.
@@ -1765,8 +1771,6 @@ class AsyncPipelinesClient:
1765
1771
  - pipeline_type: typing.Optional[PipelineType].
1766
1772
 
1767
1773
  - organization_id: typing.Optional[str].
1768
-
1769
- - project_id: typing.Optional[str].
1770
1774
  ---
1771
1775
  from llama_cloud import PipelineType
1772
1776
  from llama_cloud.client import AsyncLlamaCloud
@@ -1790,7 +1794,7 @@ class AsyncPipelinesClient:
1790
1794
  "organization_id": organization_id,
1791
1795
  }
1792
1796
  ),
1793
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1797
+ headers=self._client_wrapper.get_headers(),
1794
1798
  timeout=60,
1795
1799
  )
1796
1800
  if 200 <= _response.status_code < 300:
@@ -1809,7 +1813,6 @@ class AsyncPipelinesClient:
1809
1813
  project_id: typing.Optional[str] = None,
1810
1814
  organization_id: typing.Optional[str] = None,
1811
1815
  request: PipelineCreate,
1812
- project_id: typing.Optional[str] = None,
1813
1816
  ) -> Pipeline:
1814
1817
  """
1815
1818
  Create a new pipeline for a project.
@@ -1820,15 +1823,13 @@ class AsyncPipelinesClient:
1820
1823
  - organization_id: typing.Optional[str].
1821
1824
 
1822
1825
  - request: PipelineCreate.
1823
-
1824
- - project_id: typing.Optional[str].
1825
1826
  """
1826
1827
  _response = await self._client_wrapper.httpx_client.request(
1827
1828
  "POST",
1828
1829
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1829
1830
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1830
1831
  json=jsonable_encoder(request),
1831
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1832
+ headers=self._client_wrapper.get_headers(),
1832
1833
  timeout=60,
1833
1834
  )
1834
1835
  if 200 <= _response.status_code < 300:
@@ -1847,7 +1848,6 @@ class AsyncPipelinesClient:
1847
1848
  project_id: typing.Optional[str] = None,
1848
1849
  organization_id: typing.Optional[str] = None,
1849
1850
  request: PipelineCreate,
1850
- project_id: typing.Optional[str] = None,
1851
1851
  ) -> Pipeline:
1852
1852
  """
1853
1853
  Upsert a pipeline for a project.
@@ -1859,15 +1859,13 @@ class AsyncPipelinesClient:
1859
1859
  - organization_id: typing.Optional[str].
1860
1860
 
1861
1861
  - request: PipelineCreate.
1862
-
1863
- - project_id: typing.Optional[str].
1864
1862
  """
1865
1863
  _response = await self._client_wrapper.httpx_client.request(
1866
1864
  "PUT",
1867
1865
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1868
1866
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1869
1867
  json=jsonable_encoder(request),
1870
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1868
+ headers=self._client_wrapper.get_headers(),
1871
1869
  timeout=60,
1872
1870
  )
1873
1871
  if 200 <= _response.status_code < 300:
@@ -1909,6 +1907,7 @@ class AsyncPipelinesClient:
1909
1907
  *,
1910
1908
  embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
1911
1909
  transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
1910
+ sparse_model_config: typing.Optional[SparseModelConfig] = OMIT,
1912
1911
  data_sink_id: typing.Optional[str] = OMIT,
1913
1912
  embedding_model_config_id: typing.Optional[str] = OMIT,
1914
1913
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
@@ -1930,6 +1929,8 @@ class AsyncPipelinesClient:
1930
1929
 
1931
1930
  - transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
1932
1931
 
1932
+ - sparse_model_config: typing.Optional[SparseModelConfig].
1933
+
1933
1934
  - data_sink_id: typing.Optional[str].
1934
1935
 
1935
1936
  - embedding_model_config_id: typing.Optional[str].
@@ -1955,6 +1956,8 @@ class AsyncPipelinesClient:
1955
1956
  _request["embedding_config"] = embedding_config
1956
1957
  if transform_config is not OMIT:
1957
1958
  _request["transform_config"] = transform_config
1959
+ if sparse_model_config is not OMIT:
1960
+ _request["sparse_model_config"] = sparse_model_config
1958
1961
  if data_sink_id is not OMIT:
1959
1962
  _request["data_sink_id"] = data_sink_id
1960
1963
  if embedding_model_config_id is not OMIT:
@@ -2246,6 +2249,7 @@ class AsyncPipelinesClient:
2246
2249
  *,
2247
2250
  data_source_id: typing.Optional[str] = None,
2248
2251
  only_manually_uploaded: typing.Optional[bool] = None,
2252
+ file_name_contains: typing.Optional[str] = None,
2249
2253
  limit: typing.Optional[int] = None,
2250
2254
  offset: typing.Optional[int] = None,
2251
2255
  order_by: typing.Optional[str] = None,
@@ -2253,6 +2257,15 @@ class AsyncPipelinesClient:
2253
2257
  """
2254
2258
  Get files for a pipeline.
2255
2259
 
2260
+ Args:
2261
+ pipeline_id: ID of the pipeline
2262
+ data_source_id: Optional filter by data source ID
2263
+ only_manually_uploaded: Filter for only manually uploaded files
2264
+ file_name_contains: Optional filter by file name (substring match)
2265
+ limit: Limit number of results
2266
+ offset: Offset for pagination
2267
+ order_by: Field to order by
2268
+
2256
2269
  Parameters:
2257
2270
  - pipeline_id: str.
2258
2271
 
@@ -2260,6 +2273,8 @@ class AsyncPipelinesClient:
2260
2273
 
2261
2274
  - only_manually_uploaded: typing.Optional[bool].
2262
2275
 
2276
+ - file_name_contains: typing.Optional[str].
2277
+
2263
2278
  - limit: typing.Optional[int].
2264
2279
 
2265
2280
  - offset: typing.Optional[int].
@@ -2282,6 +2297,7 @@ class AsyncPipelinesClient:
2282
2297
  {
2283
2298
  "data_source_id": data_source_id,
2284
2299
  "only_manually_uploaded": only_manually_uploaded,
2300
+ "file_name_contains": file_name_contains,
2285
2301
  "limit": limit,
2286
2302
  "offset": offset,
2287
2303
  "order_by": order_by,
@@ -2780,7 +2796,6 @@ class AsyncPipelinesClient:
2780
2796
  retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
2781
2797
  query: str,
2782
2798
  class_name: typing.Optional[str] = OMIT,
2783
- project_id: typing.Optional[str] = None,
2784
2799
  ) -> RetrieveResults:
2785
2800
  """
2786
2801
  Get retrieval results for a managed pipeline and a query
@@ -2821,8 +2836,6 @@ class AsyncPipelinesClient:
2821
2836
  - query: str. The query to retrieve against.
2822
2837
 
2823
2838
  - class_name: typing.Optional[str].
2824
-
2825
- - project_id: typing.Optional[str].
2826
2839
  ---
2827
2840
  from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
2828
2841
  from llama_cloud.client import AsyncLlamaCloud
@@ -2874,7 +2887,7 @@ class AsyncPipelinesClient:
2874
2887
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
2875
2888
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
2876
2889
  json=jsonable_encoder(_request),
2877
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
2890
+ headers=self._client_wrapper.get_headers(),
2878
2891
  timeout=60,
2879
2892
  )
2880
2893
  if 200 <= _response.status_code < 300: