llama-cloud 0.1.23__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (88) hide show
  1. llama_cloud/__init__.py +6 -70
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +1 -20
  4. llama_cloud/resources/data_sources/__init__.py +2 -2
  5. llama_cloud/resources/data_sources/client.py +5 -5
  6. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  7. llama_cloud/resources/files/__init__.py +0 -3
  8. llama_cloud/resources/files/client.py +18 -19
  9. llama_cloud/resources/jobs/client.py +8 -0
  10. llama_cloud/resources/llama_extract/__init__.py +0 -8
  11. llama_cloud/resources/llama_extract/client.py +92 -24
  12. llama_cloud/resources/llama_extract/types/__init__.py +0 -8
  13. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +2 -4
  14. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +2 -4
  15. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +2 -4
  16. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +2 -4
  17. llama_cloud/resources/organizations/client.py +14 -4
  18. llama_cloud/resources/parsing/client.py +8 -0
  19. llama_cloud/resources/pipelines/__init__.py +0 -4
  20. llama_cloud/resources/pipelines/client.py +29 -20
  21. llama_cloud/resources/pipelines/types/__init__.py +0 -4
  22. llama_cloud/types/__init__.py +6 -50
  23. llama_cloud/types/composite_retrieval_result.py +5 -1
  24. llama_cloud/types/data_source.py +2 -2
  25. llama_cloud/types/data_source_create.py +2 -2
  26. llama_cloud/types/extract_agent.py +2 -4
  27. llama_cloud/types/extract_job_create_data_schema_override.py +2 -4
  28. llama_cloud/types/extract_models.py +20 -8
  29. llama_cloud/types/extract_resultset.py +2 -2
  30. llama_cloud/types/extract_resultset_data.py +2 -4
  31. llama_cloud/types/extract_run.py +3 -4
  32. llama_cloud/types/extract_run_data.py +2 -4
  33. llama_cloud/types/extract_schema_validate_response.py +2 -2
  34. llama_cloud/types/file.py +3 -4
  35. llama_cloud/types/{llm_config_result.py → file_id_presigned_url.py} +9 -5
  36. llama_cloud/types/json_type.py +9 -0
  37. llama_cloud/types/legacy_parse_job_config.py +1 -0
  38. llama_cloud/types/llama_extract_settings.py +3 -1
  39. llama_cloud/types/llama_parse_parameters.py +1 -0
  40. llama_cloud/types/page_figure_metadata.py +1 -0
  41. llama_cloud/types/{llm_configs_response.py → page_figure_node_with_score.py} +9 -4
  42. llama_cloud/types/parse_job_config.py +1 -0
  43. llama_cloud/types/pipeline_data_source.py +2 -2
  44. llama_cloud/types/pipeline_file.py +5 -8
  45. llama_cloud/types/pipeline_file_create.py +2 -2
  46. llama_cloud/types/preset_retrieval_params.py +8 -6
  47. llama_cloud/types/retrieve_results.py +5 -1
  48. llama_cloud/types/supported_llm_model_names.py +12 -4
  49. llama_cloud/types/user_organization_delete.py +1 -0
  50. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/METADATA +1 -1
  51. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/RECORD +53 -87
  52. llama_cloud/resources/admin/__init__.py +0 -2
  53. llama_cloud/resources/admin/client.py +0 -78
  54. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
  55. llama_cloud/resources/files/types/__init__.py +0 -7
  56. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
  57. llama_cloud/resources/files/types/file_create_permission_info_value.py +0 -7
  58. llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -5
  59. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
  60. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
  61. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
  62. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
  63. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
  64. llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py +0 -7
  65. llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
  66. llama_cloud/types/data_source_custom_metadata_value.py +0 -7
  67. llama_cloud/types/extract_agent_data_schema_value.py +0 -5
  68. llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
  69. llama_cloud/types/extract_resultset_data_item_value.py +0 -7
  70. llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
  71. llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
  72. llama_cloud/types/extract_run_data_item_value.py +0 -5
  73. llama_cloud/types/extract_run_data_schema_value.py +0 -5
  74. llama_cloud/types/extract_run_data_zero_value.py +0 -5
  75. llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
  76. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
  77. llama_cloud/types/file_permission_info_value.py +0 -5
  78. llama_cloud/types/file_resource_info_value.py +0 -5
  79. llama_cloud/types/llm_config_result_llm_type.py +0 -33
  80. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
  81. llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
  82. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
  83. llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
  84. llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
  85. llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
  86. llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.py +0 -7
  87. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/LICENSE +0 -0
  88. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/WHEEL +0 -0
@@ -40,24 +40,26 @@ class LlamaExtractClient:
40
40
  def __init__(self, *, client_wrapper: SyncClientWrapper):
41
41
  self._client_wrapper = client_wrapper
42
42
 
43
- def list_extraction_agents(self, *, project_id: str) -> typing.List[ExtractAgent]:
43
+ def list_extraction_agents(
44
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
45
+ ) -> typing.List[ExtractAgent]:
44
46
  """
45
47
  Parameters:
46
- - project_id: str.
48
+ - project_id: typing.Optional[str].
49
+
50
+ - organization_id: typing.Optional[str].
47
51
  ---
48
52
  from llama_cloud.client import LlamaCloud
49
53
 
50
54
  client = LlamaCloud(
51
55
  token="YOUR_TOKEN",
52
56
  )
53
- client.llama_extract.list_extraction_agents(
54
- project_id="string",
55
- )
57
+ client.llama_extract.list_extraction_agents()
56
58
  """
57
59
  _response = self._client_wrapper.httpx_client.request(
58
60
  "GET",
59
61
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
60
- params=remove_none_from_dict({"project_id": project_id}),
62
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
61
63
  headers=self._client_wrapper.get_headers(),
62
64
  timeout=60,
63
65
  )
@@ -166,12 +168,16 @@ class LlamaExtractClient:
166
168
  raise ApiError(status_code=_response.status_code, body=_response.text)
167
169
  raise ApiError(status_code=_response.status_code, body=_response_json)
168
170
 
169
- def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
171
+ def get_extraction_agent_by_name(
172
+ self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
173
+ ) -> ExtractAgent:
170
174
  """
171
175
  Parameters:
172
176
  - name: str.
173
177
 
174
178
  - project_id: typing.Optional[str].
179
+
180
+ - organization_id: typing.Optional[str].
175
181
  ---
176
182
  from llama_cloud.client import LlamaCloud
177
183
 
@@ -187,7 +193,7 @@ class LlamaExtractClient:
187
193
  urllib.parse.urljoin(
188
194
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
189
195
  ),
190
- params=remove_none_from_dict({"project_id": project_id}),
196
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
191
197
  headers=self._client_wrapper.get_headers(),
192
198
  timeout=60,
193
199
  )
@@ -608,10 +614,16 @@ class LlamaExtractClient:
608
614
  raise ApiError(status_code=_response.status_code, body=_response.text)
609
615
  raise ApiError(status_code=_response.status_code, body=_response_json)
610
616
 
611
- def get_job_result(self, job_id: str) -> ExtractResultset:
617
+ def get_job_result(
618
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
619
+ ) -> ExtractResultset:
612
620
  """
613
621
  Parameters:
614
622
  - job_id: str.
623
+
624
+ - project_id: typing.Optional[str].
625
+
626
+ - organization_id: typing.Optional[str].
615
627
  ---
616
628
  from llama_cloud.client import LlamaCloud
617
629
 
@@ -625,6 +637,7 @@ class LlamaExtractClient:
625
637
  _response = self._client_wrapper.httpx_client.request(
626
638
  "GET",
627
639
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
640
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
628
641
  headers=self._client_wrapper.get_headers(),
629
642
  timeout=60,
630
643
  )
@@ -706,10 +719,16 @@ class LlamaExtractClient:
706
719
  raise ApiError(status_code=_response.status_code, body=_response.text)
707
720
  raise ApiError(status_code=_response.status_code, body=_response_json)
708
721
 
709
- def get_run_by_job_id(self, job_id: str) -> ExtractRun:
722
+ def get_run_by_job_id(
723
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
724
+ ) -> ExtractRun:
710
725
  """
711
726
  Parameters:
712
727
  - job_id: str.
728
+
729
+ - project_id: typing.Optional[str].
730
+
731
+ - organization_id: typing.Optional[str].
713
732
  ---
714
733
  from llama_cloud.client import LlamaCloud
715
734
 
@@ -723,6 +742,7 @@ class LlamaExtractClient:
723
742
  _response = self._client_wrapper.httpx_client.request(
724
743
  "GET",
725
744
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
745
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
726
746
  headers=self._client_wrapper.get_headers(),
727
747
  timeout=60,
728
748
  )
@@ -736,10 +756,16 @@ class LlamaExtractClient:
736
756
  raise ApiError(status_code=_response.status_code, body=_response.text)
737
757
  raise ApiError(status_code=_response.status_code, body=_response_json)
738
758
 
739
- def get_run(self, run_id: str) -> ExtractRun:
759
+ def get_run(
760
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
761
+ ) -> ExtractRun:
740
762
  """
741
763
  Parameters:
742
764
  - run_id: str.
765
+
766
+ - project_id: typing.Optional[str].
767
+
768
+ - organization_id: typing.Optional[str].
743
769
  ---
744
770
  from llama_cloud.client import LlamaCloud
745
771
 
@@ -753,6 +779,7 @@ class LlamaExtractClient:
753
779
  _response = self._client_wrapper.httpx_client.request(
754
780
  "GET",
755
781
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
782
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
756
783
  headers=self._client_wrapper.get_headers(),
757
784
  timeout=60,
758
785
  )
@@ -766,10 +793,16 @@ class LlamaExtractClient:
766
793
  raise ApiError(status_code=_response.status_code, body=_response.text)
767
794
  raise ApiError(status_code=_response.status_code, body=_response_json)
768
795
 
769
- def delete_extraction_run(self, run_id: str) -> typing.Any:
796
+ def delete_extraction_run(
797
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
798
+ ) -> typing.Any:
770
799
  """
771
800
  Parameters:
772
801
  - run_id: str.
802
+
803
+ - project_id: typing.Optional[str].
804
+
805
+ - organization_id: typing.Optional[str].
773
806
  ---
774
807
  from llama_cloud.client import LlamaCloud
775
808
 
@@ -783,6 +816,7 @@ class LlamaExtractClient:
783
816
  _response = self._client_wrapper.httpx_client.request(
784
817
  "DELETE",
785
818
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
819
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
786
820
  headers=self._client_wrapper.get_headers(),
787
821
  timeout=60,
788
822
  )
@@ -801,24 +835,26 @@ class AsyncLlamaExtractClient:
801
835
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
802
836
  self._client_wrapper = client_wrapper
803
837
 
804
- async def list_extraction_agents(self, *, project_id: str) -> typing.List[ExtractAgent]:
838
+ async def list_extraction_agents(
839
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
840
+ ) -> typing.List[ExtractAgent]:
805
841
  """
806
842
  Parameters:
807
- - project_id: str.
843
+ - project_id: typing.Optional[str].
844
+
845
+ - organization_id: typing.Optional[str].
808
846
  ---
809
847
  from llama_cloud.client import AsyncLlamaCloud
810
848
 
811
849
  client = AsyncLlamaCloud(
812
850
  token="YOUR_TOKEN",
813
851
  )
814
- await client.llama_extract.list_extraction_agents(
815
- project_id="string",
816
- )
852
+ await client.llama_extract.list_extraction_agents()
817
853
  """
818
854
  _response = await self._client_wrapper.httpx_client.request(
819
855
  "GET",
820
856
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
821
- params=remove_none_from_dict({"project_id": project_id}),
857
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
822
858
  headers=self._client_wrapper.get_headers(),
823
859
  timeout=60,
824
860
  )
@@ -927,12 +963,16 @@ class AsyncLlamaExtractClient:
927
963
  raise ApiError(status_code=_response.status_code, body=_response.text)
928
964
  raise ApiError(status_code=_response.status_code, body=_response_json)
929
965
 
930
- async def get_extraction_agent_by_name(self, name: str, *, project_id: typing.Optional[str] = None) -> ExtractAgent:
966
+ async def get_extraction_agent_by_name(
967
+ self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
968
+ ) -> ExtractAgent:
931
969
  """
932
970
  Parameters:
933
971
  - name: str.
934
972
 
935
973
  - project_id: typing.Optional[str].
974
+
975
+ - organization_id: typing.Optional[str].
936
976
  ---
937
977
  from llama_cloud.client import AsyncLlamaCloud
938
978
 
@@ -948,7 +988,7 @@ class AsyncLlamaExtractClient:
948
988
  urllib.parse.urljoin(
949
989
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
950
990
  ),
951
- params=remove_none_from_dict({"project_id": project_id}),
991
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
952
992
  headers=self._client_wrapper.get_headers(),
953
993
  timeout=60,
954
994
  )
@@ -1369,10 +1409,16 @@ class AsyncLlamaExtractClient:
1369
1409
  raise ApiError(status_code=_response.status_code, body=_response.text)
1370
1410
  raise ApiError(status_code=_response.status_code, body=_response_json)
1371
1411
 
1372
- async def get_job_result(self, job_id: str) -> ExtractResultset:
1412
+ async def get_job_result(
1413
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1414
+ ) -> ExtractResultset:
1373
1415
  """
1374
1416
  Parameters:
1375
1417
  - job_id: str.
1418
+
1419
+ - project_id: typing.Optional[str].
1420
+
1421
+ - organization_id: typing.Optional[str].
1376
1422
  ---
1377
1423
  from llama_cloud.client import AsyncLlamaCloud
1378
1424
 
@@ -1386,6 +1432,7 @@ class AsyncLlamaExtractClient:
1386
1432
  _response = await self._client_wrapper.httpx_client.request(
1387
1433
  "GET",
1388
1434
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
1435
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1389
1436
  headers=self._client_wrapper.get_headers(),
1390
1437
  timeout=60,
1391
1438
  )
@@ -1467,10 +1514,16 @@ class AsyncLlamaExtractClient:
1467
1514
  raise ApiError(status_code=_response.status_code, body=_response.text)
1468
1515
  raise ApiError(status_code=_response.status_code, body=_response_json)
1469
1516
 
1470
- async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
1517
+ async def get_run_by_job_id(
1518
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1519
+ ) -> ExtractRun:
1471
1520
  """
1472
1521
  Parameters:
1473
1522
  - job_id: str.
1523
+
1524
+ - project_id: typing.Optional[str].
1525
+
1526
+ - organization_id: typing.Optional[str].
1474
1527
  ---
1475
1528
  from llama_cloud.client import AsyncLlamaCloud
1476
1529
 
@@ -1484,6 +1537,7 @@ class AsyncLlamaExtractClient:
1484
1537
  _response = await self._client_wrapper.httpx_client.request(
1485
1538
  "GET",
1486
1539
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
1540
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1487
1541
  headers=self._client_wrapper.get_headers(),
1488
1542
  timeout=60,
1489
1543
  )
@@ -1497,10 +1551,16 @@ class AsyncLlamaExtractClient:
1497
1551
  raise ApiError(status_code=_response.status_code, body=_response.text)
1498
1552
  raise ApiError(status_code=_response.status_code, body=_response_json)
1499
1553
 
1500
- async def get_run(self, run_id: str) -> ExtractRun:
1554
+ async def get_run(
1555
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1556
+ ) -> ExtractRun:
1501
1557
  """
1502
1558
  Parameters:
1503
1559
  - run_id: str.
1560
+
1561
+ - project_id: typing.Optional[str].
1562
+
1563
+ - organization_id: typing.Optional[str].
1504
1564
  ---
1505
1565
  from llama_cloud.client import AsyncLlamaCloud
1506
1566
 
@@ -1514,6 +1574,7 @@ class AsyncLlamaExtractClient:
1514
1574
  _response = await self._client_wrapper.httpx_client.request(
1515
1575
  "GET",
1516
1576
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
1577
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1517
1578
  headers=self._client_wrapper.get_headers(),
1518
1579
  timeout=60,
1519
1580
  )
@@ -1527,10 +1588,16 @@ class AsyncLlamaExtractClient:
1527
1588
  raise ApiError(status_code=_response.status_code, body=_response.text)
1528
1589
  raise ApiError(status_code=_response.status_code, body=_response_json)
1529
1590
 
1530
- async def delete_extraction_run(self, run_id: str) -> typing.Any:
1591
+ async def delete_extraction_run(
1592
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1593
+ ) -> typing.Any:
1531
1594
  """
1532
1595
  Parameters:
1533
1596
  - run_id: str.
1597
+
1598
+ - project_id: typing.Optional[str].
1599
+
1600
+ - organization_id: typing.Optional[str].
1534
1601
  ---
1535
1602
  from llama_cloud.client import AsyncLlamaCloud
1536
1603
 
@@ -1544,6 +1611,7 @@ class AsyncLlamaExtractClient:
1544
1611
  _response = await self._client_wrapper.httpx_client.request(
1545
1612
  "DELETE",
1546
1613
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
1614
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1547
1615
  headers=self._client_wrapper.get_headers(),
1548
1616
  timeout=60,
1549
1617
  )
@@ -1,21 +1,13 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
4
- from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
5
4
  from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
6
- from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
7
5
  from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
8
- from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
9
6
  from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
10
- from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
11
7
 
12
8
  __all__ = [
13
9
  "ExtractAgentCreateDataSchema",
14
- "ExtractAgentCreateDataSchemaZeroValue",
15
10
  "ExtractAgentUpdateDataSchema",
16
- "ExtractAgentUpdateDataSchemaZeroValue",
17
11
  "ExtractJobCreateBatchDataSchemaOverride",
18
- "ExtractJobCreateBatchDataSchemaOverrideZeroValue",
19
12
  "ExtractSchemaValidateRequestDataSchema",
20
- "ExtractSchemaValidateRequestDataSchemaZeroValue",
21
13
  ]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
5
+ from ....types.json_type import JsonType
6
6
 
7
- ExtractAgentCreateDataSchema = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaZeroValue]], str
9
- ]
7
+ ExtractAgentCreateDataSchema = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
5
+ from ....types.json_type import JsonType
6
6
 
7
- ExtractAgentUpdateDataSchema = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaZeroValue]], str
9
- ]
7
+ ExtractAgentUpdateDataSchema = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
5
+ from ....types.json_type import JsonType
6
6
 
7
- ExtractJobCreateBatchDataSchemaOverride = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractJobCreateBatchDataSchemaOverrideZeroValue]], str
9
- ]
7
+ ExtractJobCreateBatchDataSchemaOverride = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
5
+ from ....types.json_type import JsonType
6
6
 
7
- ExtractSchemaValidateRequestDataSchema = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractSchemaValidateRequestDataSchemaZeroValue]], str
9
- ]
7
+ ExtractSchemaValidateRequestDataSchema = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -408,14 +408,18 @@ class OrganizationsClient:
408
408
  raise ApiError(status_code=_response.status_code, body=_response.text)
409
409
  raise ApiError(status_code=_response.status_code, body=_response_json)
410
410
 
411
- def remove_users_from_organization(self, organization_id: str, member_user_id: str) -> None:
411
+ def remove_users_from_organization(
412
+ self, organization_id: str, member_user_id: str, *, request: typing.Optional[typing.List[str]] = None
413
+ ) -> None:
412
414
  """
413
- Remove users from an organization by email.
415
+ Remove users from an organization.
414
416
 
415
417
  Parameters:
416
418
  - organization_id: str.
417
419
 
418
420
  - member_user_id: str.
421
+
422
+ - request: typing.Optional[typing.List[str]].
419
423
  ---
420
424
  from llama_cloud.client import LlamaCloud
421
425
 
@@ -433,6 +437,7 @@ class OrganizationsClient:
433
437
  f"{self._client_wrapper.get_base_url()}/",
434
438
  f"api/v1/organizations/{organization_id}/users/{member_user_id}",
435
439
  ),
440
+ json=jsonable_encoder(request),
436
441
  headers=self._client_wrapper.get_headers(),
437
442
  timeout=60,
438
443
  )
@@ -1109,14 +1114,18 @@ class AsyncOrganizationsClient:
1109
1114
  raise ApiError(status_code=_response.status_code, body=_response.text)
1110
1115
  raise ApiError(status_code=_response.status_code, body=_response_json)
1111
1116
 
1112
- async def remove_users_from_organization(self, organization_id: str, member_user_id: str) -> None:
1117
+ async def remove_users_from_organization(
1118
+ self, organization_id: str, member_user_id: str, *, request: typing.Optional[typing.List[str]] = None
1119
+ ) -> None:
1113
1120
  """
1114
- Remove users from an organization by email.
1121
+ Remove users from an organization.
1115
1122
 
1116
1123
  Parameters:
1117
1124
  - organization_id: str.
1118
1125
 
1119
1126
  - member_user_id: str.
1127
+
1128
+ - request: typing.Optional[typing.List[str]].
1120
1129
  ---
1121
1130
  from llama_cloud.client import AsyncLlamaCloud
1122
1131
 
@@ -1134,6 +1143,7 @@ class AsyncOrganizationsClient:
1134
1143
  f"{self._client_wrapper.get_base_url()}/",
1135
1144
  f"api/v1/organizations/{organization_id}/users/{member_user_id}",
1136
1145
  ),
1146
+ json=jsonable_encoder(request),
1137
1147
  headers=self._client_wrapper.get_headers(),
1138
1148
  timeout=60,
1139
1149
  )
@@ -232,6 +232,7 @@ class ParsingClient:
232
232
  language: typing.List[ParserLanguages],
233
233
  extract_layout: bool,
234
234
  max_pages: typing.Optional[int] = OMIT,
235
+ outlined_table_extraction: bool,
235
236
  output_pdf_of_document: bool,
236
237
  output_s_3_path_prefix: str,
237
238
  output_s_3_region: str,
@@ -360,6 +361,8 @@ class ParsingClient:
360
361
 
361
362
  - max_pages: typing.Optional[int].
362
363
 
364
+ - outlined_table_extraction: bool.
365
+
363
366
  - output_pdf_of_document: bool.
364
367
 
365
368
  - output_s_3_path_prefix: str.
@@ -493,6 +496,7 @@ class ParsingClient:
493
496
  "invalidate_cache": invalidate_cache,
494
497
  "language": language,
495
498
  "extract_layout": extract_layout,
499
+ "outlined_table_extraction": outlined_table_extraction,
496
500
  "output_pdf_of_document": output_pdf_of_document,
497
501
  "output_s3_path_prefix": output_s_3_path_prefix,
498
502
  "output_s3_region": output_s_3_region,
@@ -1258,6 +1262,7 @@ class AsyncParsingClient:
1258
1262
  language: typing.List[ParserLanguages],
1259
1263
  extract_layout: bool,
1260
1264
  max_pages: typing.Optional[int] = OMIT,
1265
+ outlined_table_extraction: bool,
1261
1266
  output_pdf_of_document: bool,
1262
1267
  output_s_3_path_prefix: str,
1263
1268
  output_s_3_region: str,
@@ -1386,6 +1391,8 @@ class AsyncParsingClient:
1386
1391
 
1387
1392
  - max_pages: typing.Optional[int].
1388
1393
 
1394
+ - outlined_table_extraction: bool.
1395
+
1389
1396
  - output_pdf_of_document: bool.
1390
1397
 
1391
1398
  - output_s_3_path_prefix: str.
@@ -1519,6 +1526,7 @@ class AsyncParsingClient:
1519
1526
  "invalidate_cache": invalidate_cache,
1520
1527
  "language": language,
1521
1528
  "extract_layout": extract_layout,
1529
+ "outlined_table_extraction": outlined_table_extraction,
1522
1530
  "output_pdf_of_document": output_pdf_of_document,
1523
1531
  "output_s3_path_prefix": output_s_3_path_prefix,
1524
1532
  "output_s3_region": output_s_3_region,
@@ -1,7 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .types import (
4
- PipelineFileUpdateCustomMetadataValue,
5
4
  PipelineUpdateEmbeddingConfig,
6
5
  PipelineUpdateEmbeddingConfig_AzureEmbedding,
7
6
  PipelineUpdateEmbeddingConfig_BedrockEmbedding,
@@ -11,11 +10,9 @@ from .types import (
11
10
  PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
12
11
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
13
12
  PipelineUpdateTransformConfig,
14
- RetrievalParamsSearchFiltersInferenceSchemaValue,
15
13
  )
16
14
 
17
15
  __all__ = [
18
- "PipelineFileUpdateCustomMetadataValue",
19
16
  "PipelineUpdateEmbeddingConfig",
20
17
  "PipelineUpdateEmbeddingConfig_AzureEmbedding",
21
18
  "PipelineUpdateEmbeddingConfig_BedrockEmbedding",
@@ -25,5 +22,4 @@ __all__ = [
25
22
  "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
26
23
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
27
24
  "PipelineUpdateTransformConfig",
28
- "RetrievalParamsSearchFiltersInferenceSchemaValue",
29
25
  ]
@@ -17,6 +17,7 @@ from ...types.eval_execution_params import EvalExecutionParams
17
17
  from ...types.file_count_by_status_response import FileCountByStatusResponse
18
18
  from ...types.http_validation_error import HttpValidationError
19
19
  from ...types.input_message import InputMessage
20
+ from ...types.json_type import JsonType
20
21
  from ...types.llama_parse_parameters import LlamaParseParameters
21
22
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
22
23
  from ...types.metadata_filters import MetadataFilters
@@ -36,12 +37,8 @@ from ...types.preset_retrieval_params import PresetRetrievalParams
36
37
  from ...types.retrieval_mode import RetrievalMode
37
38
  from ...types.retrieve_results import RetrieveResults
38
39
  from ...types.text_node import TextNode
39
- from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
40
40
  from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
41
41
  from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
42
- from .types.retrieval_params_search_filters_inference_schema_value import (
43
- RetrievalParamsSearchFiltersInferenceSchemaValue,
44
- )
45
42
 
46
43
  try:
47
44
  import pydantic
@@ -665,9 +662,7 @@ class PipelinesClient:
665
662
  file_id: str,
666
663
  pipeline_id: str,
667
664
  *,
668
- custom_metadata: typing.Optional[
669
- typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
670
- ] = OMIT,
665
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
671
666
  ) -> PipelineFile:
672
667
  """
673
668
  Update a file for a pipeline.
@@ -677,7 +672,7 @@ class PipelinesClient:
677
672
 
678
673
  - pipeline_id: str.
679
674
 
680
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
675
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
681
676
  ---
682
677
  from llama_cloud.client import LlamaCloud
683
678
 
@@ -1044,12 +1039,12 @@ class PipelinesClient:
1044
1039
  rerank_top_n: typing.Optional[int] = OMIT,
1045
1040
  alpha: typing.Optional[float] = OMIT,
1046
1041
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1047
- search_filters_inference_schema: typing.Optional[
1048
- typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
1049
- ] = OMIT,
1042
+ search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
1050
1043
  files_top_k: typing.Optional[int] = OMIT,
1051
1044
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1052
1045
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
1046
+ retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
1047
+ retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
1053
1048
  query: str,
1054
1049
  class_name: typing.Optional[str] = OMIT,
1055
1050
  ) -> RetrieveResults:
@@ -1077,7 +1072,7 @@ class PipelinesClient:
1077
1072
 
1078
1073
  - search_filters: typing.Optional[MetadataFilters].
1079
1074
 
1080
- - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
1075
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
1081
1076
 
1082
1077
  - files_top_k: typing.Optional[int].
1083
1078
 
@@ -1085,6 +1080,10 @@ class PipelinesClient:
1085
1080
 
1086
1081
  - retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
1087
1082
 
1083
+ - retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
1084
+
1085
+ - retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
1086
+
1088
1087
  - query: str. The query to retrieve against.
1089
1088
 
1090
1089
  - class_name: typing.Optional[str].
@@ -1128,6 +1127,10 @@ class PipelinesClient:
1128
1127
  _request["retrieval_mode"] = retrieval_mode
1129
1128
  if retrieve_image_nodes is not OMIT:
1130
1129
  _request["retrieve_image_nodes"] = retrieve_image_nodes
1130
+ if retrieve_page_screenshot_nodes is not OMIT:
1131
+ _request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
1132
+ if retrieve_page_figure_nodes is not OMIT:
1133
+ _request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
1131
1134
  if class_name is not OMIT:
1132
1135
  _request["class_name"] = class_name
1133
1136
  _response = self._client_wrapper.httpx_client.request(
@@ -2294,9 +2297,7 @@ class AsyncPipelinesClient:
2294
2297
  file_id: str,
2295
2298
  pipeline_id: str,
2296
2299
  *,
2297
- custom_metadata: typing.Optional[
2298
- typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
2299
- ] = OMIT,
2300
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
2300
2301
  ) -> PipelineFile:
2301
2302
  """
2302
2303
  Update a file for a pipeline.
@@ -2306,7 +2307,7 @@ class AsyncPipelinesClient:
2306
2307
 
2307
2308
  - pipeline_id: str.
2308
2309
 
2309
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
2310
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
2310
2311
  ---
2311
2312
  from llama_cloud.client import AsyncLlamaCloud
2312
2313
 
@@ -2675,12 +2676,12 @@ class AsyncPipelinesClient:
2675
2676
  rerank_top_n: typing.Optional[int] = OMIT,
2676
2677
  alpha: typing.Optional[float] = OMIT,
2677
2678
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2678
- search_filters_inference_schema: typing.Optional[
2679
- typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
2680
- ] = OMIT,
2679
+ search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
2681
2680
  files_top_k: typing.Optional[int] = OMIT,
2682
2681
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2683
2682
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
2683
+ retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
2684
+ retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
2684
2685
  query: str,
2685
2686
  class_name: typing.Optional[str] = OMIT,
2686
2687
  ) -> RetrieveResults:
@@ -2708,7 +2709,7 @@ class AsyncPipelinesClient:
2708
2709
 
2709
2710
  - search_filters: typing.Optional[MetadataFilters].
2710
2711
 
2711
- - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
2712
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
2712
2713
 
2713
2714
  - files_top_k: typing.Optional[int].
2714
2715
 
@@ -2716,6 +2717,10 @@ class AsyncPipelinesClient:
2716
2717
 
2717
2718
  - retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
2718
2719
 
2720
+ - retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
2721
+
2722
+ - retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
2723
+
2719
2724
  - query: str. The query to retrieve against.
2720
2725
 
2721
2726
  - class_name: typing.Optional[str].
@@ -2759,6 +2764,10 @@ class AsyncPipelinesClient:
2759
2764
  _request["retrieval_mode"] = retrieval_mode
2760
2765
  if retrieve_image_nodes is not OMIT:
2761
2766
  _request["retrieve_image_nodes"] = retrieve_image_nodes
2767
+ if retrieve_page_screenshot_nodes is not OMIT:
2768
+ _request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
2769
+ if retrieve_page_figure_nodes is not OMIT:
2770
+ _request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
2762
2771
  if class_name is not OMIT:
2763
2772
  _request["class_name"] = class_name
2764
2773
  _response = await self._client_wrapper.httpx_client.request(