llama-cloud 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (39) hide show
  1. llama_cloud/__init__.py +12 -0
  2. llama_cloud/resources/admin/client.py +5 -5
  3. llama_cloud/resources/alpha/client.py +2 -8
  4. llama_cloud/resources/beta/client.py +30 -126
  5. llama_cloud/resources/chat_apps/client.py +8 -32
  6. llama_cloud/resources/classifier/client.py +8 -32
  7. llama_cloud/resources/data_sinks/client.py +8 -32
  8. llama_cloud/resources/data_sources/client.py +8 -32
  9. llama_cloud/resources/embedding_model_configs/client.py +12 -48
  10. llama_cloud/resources/files/client.py +42 -176
  11. llama_cloud/resources/jobs/client.py +2 -8
  12. llama_cloud/resources/llama_extract/client.py +40 -138
  13. llama_cloud/resources/organizations/client.py +4 -18
  14. llama_cloud/resources/parsing/client.py +12 -16
  15. llama_cloud/resources/pipelines/client.py +45 -32
  16. llama_cloud/resources/projects/client.py +18 -78
  17. llama_cloud/resources/reports/client.py +30 -126
  18. llama_cloud/resources/retrievers/client.py +12 -48
  19. llama_cloud/types/__init__.py +12 -0
  20. llama_cloud/types/extract_job_create.py +2 -0
  21. llama_cloud/types/extract_job_create_priority.py +29 -0
  22. llama_cloud/types/file.py +1 -1
  23. llama_cloud/types/job_names.py +0 -4
  24. llama_cloud/types/llama_extract_feature_availability.py +34 -0
  25. llama_cloud/types/llama_parse_parameters.py +1 -0
  26. llama_cloud/types/parse_job_config.py +1 -0
  27. llama_cloud/types/pipeline.py +4 -0
  28. llama_cloud/types/pipeline_create.py +2 -0
  29. llama_cloud/types/pipeline_file.py +4 -4
  30. llama_cloud/types/schema_generation_availability.py +33 -0
  31. llama_cloud/types/schema_generation_availability_status.py +17 -0
  32. llama_cloud/types/sparse_model_config.py +42 -0
  33. llama_cloud/types/sparse_model_type.py +33 -0
  34. llama_cloud/types/webhook_configuration.py +1 -0
  35. llama_cloud-0.1.40.dist-info/METADATA +106 -0
  36. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/RECORD +38 -32
  37. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/WHEEL +1 -1
  38. llama_cloud-0.1.38.dist-info/METADATA +0 -32
  39. {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/LICENSE +0 -0
@@ -34,7 +34,6 @@ class JobsClient:
34
34
  include_usage_metrics: typing.Optional[bool] = None,
35
35
  project_id: typing.Optional[str] = None,
36
36
  organization_id: typing.Optional[str] = None,
37
- project_id: typing.Optional[str] = None,
38
37
  ) -> PaginatedJobsHistoryWithMetrics:
39
38
  """
40
39
  Get jobs for a project.
@@ -57,8 +56,6 @@ class JobsClient:
57
56
  - project_id: typing.Optional[str].
58
57
 
59
58
  - organization_id: typing.Optional[str].
60
-
61
- - project_id: typing.Optional[str].
62
59
  ---
63
60
  from llama_cloud.client import LlamaCloud
64
61
 
@@ -81,7 +78,7 @@ class JobsClient:
81
78
  "organization_id": organization_id,
82
79
  }
83
80
  ),
84
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
81
+ headers=self._client_wrapper.get_headers(),
85
82
  timeout=60,
86
83
  )
87
84
  if 200 <= _response.status_code < 300:
@@ -109,7 +106,6 @@ class AsyncJobsClient:
109
106
  include_usage_metrics: typing.Optional[bool] = None,
110
107
  project_id: typing.Optional[str] = None,
111
108
  organization_id: typing.Optional[str] = None,
112
- project_id: typing.Optional[str] = None,
113
109
  ) -> PaginatedJobsHistoryWithMetrics:
114
110
  """
115
111
  Get jobs for a project.
@@ -132,8 +128,6 @@ class AsyncJobsClient:
132
128
  - project_id: typing.Optional[str].
133
129
 
134
130
  - organization_id: typing.Optional[str].
135
-
136
- - project_id: typing.Optional[str].
137
131
  ---
138
132
  from llama_cloud.client import AsyncLlamaCloud
139
133
 
@@ -156,7 +150,7 @@ class AsyncJobsClient:
156
150
  "organization_id": organization_id,
157
151
  }
158
152
  ),
159
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
153
+ headers=self._client_wrapper.get_headers(),
160
154
  timeout=60,
161
155
  )
162
156
  if 200 <= _response.status_code < 300:
@@ -50,7 +50,6 @@ class LlamaExtractClient:
50
50
  include_default: typing.Optional[bool] = None,
51
51
  project_id: typing.Optional[str] = None,
52
52
  organization_id: typing.Optional[str] = None,
53
- project_id: typing.Optional[str] = None,
54
53
  ) -> typing.List[ExtractAgent]:
55
54
  """
56
55
  Parameters:
@@ -59,8 +58,6 @@ class LlamaExtractClient:
59
58
  - project_id: typing.Optional[str].
60
59
 
61
60
  - organization_id: typing.Optional[str].
62
-
63
- - project_id: typing.Optional[str].
64
61
  ---
65
62
  from llama_cloud.client import LlamaCloud
66
63
 
@@ -75,7 +72,7 @@ class LlamaExtractClient:
75
72
  params=remove_none_from_dict(
76
73
  {"include_default": include_default, "project_id": project_id, "organization_id": organization_id}
77
74
  ),
78
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
75
+ headers=self._client_wrapper.get_headers(),
79
76
  timeout=60,
80
77
  )
81
78
  if 200 <= _response.status_code < 300:
@@ -96,7 +93,6 @@ class LlamaExtractClient:
96
93
  name: str,
97
94
  data_schema: ExtractAgentCreateDataSchema,
98
95
  config: ExtractConfig,
99
- project_id: typing.Optional[str] = None,
100
96
  ) -> ExtractAgent:
101
97
  """
102
98
  Parameters:
@@ -109,8 +105,6 @@ class LlamaExtractClient:
109
105
  - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
110
106
 
111
107
  - config: ExtractConfig. The configuration parameters for the extraction agent.
112
-
113
- - project_id: typing.Optional[str].
114
108
  ---
115
109
  from llama_cloud import (
116
110
  DocumentChunkMode,
@@ -139,7 +133,7 @@ class LlamaExtractClient:
139
133
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
140
134
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
141
135
  json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
142
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
136
+ headers=self._client_wrapper.get_headers(),
143
137
  timeout=60,
144
138
  )
145
139
  if 200 <= _response.status_code < 300:
@@ -195,7 +189,6 @@ class LlamaExtractClient:
195
189
  organization_id: typing.Optional[str] = None,
196
190
  prompt: typing.Optional[str] = OMIT,
197
191
  file_id: typing.Optional[str] = OMIT,
198
- project_id: typing.Optional[str] = None,
199
192
  ) -> ExtractSchemaGenerateResponse:
200
193
  """
201
194
  Generates an extraction agent's schema definition from a file and/or natural language prompt.
@@ -208,8 +201,6 @@ class LlamaExtractClient:
208
201
  - prompt: typing.Optional[str].
209
202
 
210
203
  - file_id: typing.Optional[str].
211
-
212
- - project_id: typing.Optional[str].
213
204
  ---
214
205
  from llama_cloud.client import LlamaCloud
215
206
 
@@ -230,7 +221,7 @@ class LlamaExtractClient:
230
221
  ),
231
222
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
232
223
  json=jsonable_encoder(_request),
233
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
224
+ headers=self._client_wrapper.get_headers(),
234
225
  timeout=60,
235
226
  )
236
227
  if 200 <= _response.status_code < 300:
@@ -244,12 +235,7 @@ class LlamaExtractClient:
244
235
  raise ApiError(status_code=_response.status_code, body=_response_json)
245
236
 
246
237
  def get_extraction_agent_by_name(
247
- self,
248
- name: str,
249
- *,
250
- project_id: typing.Optional[str] = None,
251
- organization_id: typing.Optional[str] = None,
252
- project_id: typing.Optional[str] = None,
238
+ self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
253
239
  ) -> ExtractAgent:
254
240
  """
255
241
  Parameters:
@@ -258,8 +244,6 @@ class LlamaExtractClient:
258
244
  - project_id: typing.Optional[str].
259
245
 
260
246
  - organization_id: typing.Optional[str].
261
-
262
- - project_id: typing.Optional[str].
263
247
  ---
264
248
  from llama_cloud.client import LlamaCloud
265
249
 
@@ -276,7 +260,7 @@ class LlamaExtractClient:
276
260
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
277
261
  ),
278
262
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
279
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
263
+ headers=self._client_wrapper.get_headers(),
280
264
  timeout=60,
281
265
  )
282
266
  if 200 <= _response.status_code < 300:
@@ -290,11 +274,7 @@ class LlamaExtractClient:
290
274
  raise ApiError(status_code=_response.status_code, body=_response_json)
291
275
 
292
276
  def get_or_create_default_extraction_agent(
293
- self,
294
- *,
295
- project_id: typing.Optional[str] = None,
296
- organization_id: typing.Optional[str] = None,
297
- project_id: typing.Optional[str] = None,
277
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
298
278
  ) -> ExtractAgent:
299
279
  """
300
280
  Get or create a default extraction agent for the current project.
@@ -304,8 +284,6 @@ class LlamaExtractClient:
304
284
  - project_id: typing.Optional[str].
305
285
 
306
286
  - organization_id: typing.Optional[str].
307
-
308
- - project_id: typing.Optional[str].
309
287
  ---
310
288
  from llama_cloud.client import LlamaCloud
311
289
 
@@ -320,7 +298,7 @@ class LlamaExtractClient:
320
298
  f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/default"
321
299
  ),
322
300
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
323
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
301
+ headers=self._client_wrapper.get_headers(),
324
302
  timeout=60,
325
303
  )
326
304
  if 200 <= _response.status_code < 300:
@@ -492,6 +470,7 @@ class LlamaExtractClient:
492
470
  ExtractConfig,
493
471
  ExtractConfigPriority,
494
472
  ExtractJobCreate,
473
+ ExtractJobCreatePriority,
495
474
  ExtractMode,
496
475
  ExtractTarget,
497
476
  )
@@ -502,6 +481,7 @@ class LlamaExtractClient:
502
481
  )
503
482
  client.llama_extract.run_job(
504
483
  request=ExtractJobCreate(
484
+ priority=ExtractJobCreatePriority.LOW,
505
485
  extraction_agent_id="string",
506
486
  file_id="string",
507
487
  config_override=ExtractConfig(
@@ -582,6 +562,7 @@ class LlamaExtractClient:
582
562
  ExtractConfig,
583
563
  ExtractConfigPriority,
584
564
  ExtractJobCreate,
565
+ ExtractJobCreatePriority,
585
566
  ExtractMode,
586
567
  ExtractTarget,
587
568
  FailPageMode,
@@ -598,6 +579,7 @@ class LlamaExtractClient:
598
579
  )
599
580
  client.llama_extract.run_job_test_user(
600
581
  job_create=ExtractJobCreate(
582
+ priority=ExtractJobCreatePriority.LOW,
601
583
  extraction_agent_id="string",
602
584
  file_id="string",
603
585
  config_override=ExtractConfig(
@@ -753,12 +735,7 @@ class LlamaExtractClient:
753
735
  raise ApiError(status_code=_response.status_code, body=_response_json)
754
736
 
755
737
  def get_job_result(
756
- self,
757
- job_id: str,
758
- *,
759
- project_id: typing.Optional[str] = None,
760
- organization_id: typing.Optional[str] = None,
761
- project_id: typing.Optional[str] = None,
738
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
762
739
  ) -> ExtractResultset:
763
740
  """
764
741
  Parameters:
@@ -767,8 +744,6 @@ class LlamaExtractClient:
767
744
  - project_id: typing.Optional[str].
768
745
 
769
746
  - organization_id: typing.Optional[str].
770
-
771
- - project_id: typing.Optional[str].
772
747
  ---
773
748
  from llama_cloud.client import LlamaCloud
774
749
 
@@ -783,7 +758,7 @@ class LlamaExtractClient:
783
758
  "GET",
784
759
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
785
760
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
786
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
761
+ headers=self._client_wrapper.get_headers(),
787
762
  timeout=60,
788
763
  )
789
764
  if 200 <= _response.status_code < 300:
@@ -865,12 +840,7 @@ class LlamaExtractClient:
865
840
  raise ApiError(status_code=_response.status_code, body=_response_json)
866
841
 
867
842
  def get_run_by_job_id(
868
- self,
869
- job_id: str,
870
- *,
871
- project_id: typing.Optional[str] = None,
872
- organization_id: typing.Optional[str] = None,
873
- project_id: typing.Optional[str] = None,
843
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
874
844
  ) -> ExtractRun:
875
845
  """
876
846
  Parameters:
@@ -879,8 +849,6 @@ class LlamaExtractClient:
879
849
  - project_id: typing.Optional[str].
880
850
 
881
851
  - organization_id: typing.Optional[str].
882
-
883
- - project_id: typing.Optional[str].
884
852
  ---
885
853
  from llama_cloud.client import LlamaCloud
886
854
 
@@ -895,7 +863,7 @@ class LlamaExtractClient:
895
863
  "GET",
896
864
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
897
865
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
898
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
866
+ headers=self._client_wrapper.get_headers(),
899
867
  timeout=60,
900
868
  )
901
869
  if 200 <= _response.status_code < 300:
@@ -909,12 +877,7 @@ class LlamaExtractClient:
909
877
  raise ApiError(status_code=_response.status_code, body=_response_json)
910
878
 
911
879
  def get_run(
912
- self,
913
- run_id: str,
914
- *,
915
- project_id: typing.Optional[str] = None,
916
- organization_id: typing.Optional[str] = None,
917
- project_id: typing.Optional[str] = None,
880
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
918
881
  ) -> ExtractRun:
919
882
  """
920
883
  Parameters:
@@ -923,8 +886,6 @@ class LlamaExtractClient:
923
886
  - project_id: typing.Optional[str].
924
887
 
925
888
  - organization_id: typing.Optional[str].
926
-
927
- - project_id: typing.Optional[str].
928
889
  ---
929
890
  from llama_cloud.client import LlamaCloud
930
891
 
@@ -939,7 +900,7 @@ class LlamaExtractClient:
939
900
  "GET",
940
901
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
941
902
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
942
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
903
+ headers=self._client_wrapper.get_headers(),
943
904
  timeout=60,
944
905
  )
945
906
  if 200 <= _response.status_code < 300:
@@ -953,12 +914,7 @@ class LlamaExtractClient:
953
914
  raise ApiError(status_code=_response.status_code, body=_response_json)
954
915
 
955
916
  def delete_extraction_run(
956
- self,
957
- run_id: str,
958
- *,
959
- project_id: typing.Optional[str] = None,
960
- organization_id: typing.Optional[str] = None,
961
- project_id: typing.Optional[str] = None,
917
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
962
918
  ) -> typing.Any:
963
919
  """
964
920
  Parameters:
@@ -967,8 +923,6 @@ class LlamaExtractClient:
967
923
  - project_id: typing.Optional[str].
968
924
 
969
925
  - organization_id: typing.Optional[str].
970
-
971
- - project_id: typing.Optional[str].
972
926
  ---
973
927
  from llama_cloud.client import LlamaCloud
974
928
 
@@ -983,7 +937,7 @@ class LlamaExtractClient:
983
937
  "DELETE",
984
938
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
985
939
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
986
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
940
+ headers=self._client_wrapper.get_headers(),
987
941
  timeout=60,
988
942
  )
989
943
  if 200 <= _response.status_code < 300:
@@ -1007,7 +961,6 @@ class LlamaExtractClient:
1007
961
  file_id: typing.Optional[str] = OMIT,
1008
962
  text: typing.Optional[str] = OMIT,
1009
963
  file: typing.Optional[FileData] = OMIT,
1010
- project_id: typing.Optional[str] = None,
1011
964
  ) -> ExtractJob:
1012
965
  """
1013
966
  Stateless extraction endpoint that uses a default extraction agent in the user's default project.
@@ -1029,8 +982,6 @@ class LlamaExtractClient:
1029
982
  - text: typing.Optional[str].
1030
983
 
1031
984
  - file: typing.Optional[FileData].
1032
-
1033
- - project_id: typing.Optional[str].
1034
985
  ---
1035
986
  from llama_cloud import (
1036
987
  DocumentChunkMode,
@@ -1072,7 +1023,7 @@ class LlamaExtractClient:
1072
1023
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/run"),
1073
1024
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1074
1025
  json=jsonable_encoder(_request),
1075
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1026
+ headers=self._client_wrapper.get_headers(),
1076
1027
  timeout=60,
1077
1028
  )
1078
1029
  if 200 <= _response.status_code < 300:
@@ -1096,7 +1047,6 @@ class AsyncLlamaExtractClient:
1096
1047
  include_default: typing.Optional[bool] = None,
1097
1048
  project_id: typing.Optional[str] = None,
1098
1049
  organization_id: typing.Optional[str] = None,
1099
- project_id: typing.Optional[str] = None,
1100
1050
  ) -> typing.List[ExtractAgent]:
1101
1051
  """
1102
1052
  Parameters:
@@ -1105,8 +1055,6 @@ class AsyncLlamaExtractClient:
1105
1055
  - project_id: typing.Optional[str].
1106
1056
 
1107
1057
  - organization_id: typing.Optional[str].
1108
-
1109
- - project_id: typing.Optional[str].
1110
1058
  ---
1111
1059
  from llama_cloud.client import AsyncLlamaCloud
1112
1060
 
@@ -1121,7 +1069,7 @@ class AsyncLlamaExtractClient:
1121
1069
  params=remove_none_from_dict(
1122
1070
  {"include_default": include_default, "project_id": project_id, "organization_id": organization_id}
1123
1071
  ),
1124
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1072
+ headers=self._client_wrapper.get_headers(),
1125
1073
  timeout=60,
1126
1074
  )
1127
1075
  if 200 <= _response.status_code < 300:
@@ -1142,7 +1090,6 @@ class AsyncLlamaExtractClient:
1142
1090
  name: str,
1143
1091
  data_schema: ExtractAgentCreateDataSchema,
1144
1092
  config: ExtractConfig,
1145
- project_id: typing.Optional[str] = None,
1146
1093
  ) -> ExtractAgent:
1147
1094
  """
1148
1095
  Parameters:
@@ -1155,8 +1102,6 @@ class AsyncLlamaExtractClient:
1155
1102
  - data_schema: ExtractAgentCreateDataSchema. The schema of the data.
1156
1103
 
1157
1104
  - config: ExtractConfig. The configuration parameters for the extraction agent.
1158
-
1159
- - project_id: typing.Optional[str].
1160
1105
  ---
1161
1106
  from llama_cloud import (
1162
1107
  DocumentChunkMode,
@@ -1185,7 +1130,7 @@ class AsyncLlamaExtractClient:
1185
1130
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents"),
1186
1131
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1187
1132
  json=jsonable_encoder({"name": name, "data_schema": data_schema, "config": config}),
1188
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1133
+ headers=self._client_wrapper.get_headers(),
1189
1134
  timeout=60,
1190
1135
  )
1191
1136
  if 200 <= _response.status_code < 300:
@@ -1241,7 +1186,6 @@ class AsyncLlamaExtractClient:
1241
1186
  organization_id: typing.Optional[str] = None,
1242
1187
  prompt: typing.Optional[str] = OMIT,
1243
1188
  file_id: typing.Optional[str] = OMIT,
1244
- project_id: typing.Optional[str] = None,
1245
1189
  ) -> ExtractSchemaGenerateResponse:
1246
1190
  """
1247
1191
  Generates an extraction agent's schema definition from a file and/or natural language prompt.
@@ -1254,8 +1198,6 @@ class AsyncLlamaExtractClient:
1254
1198
  - prompt: typing.Optional[str].
1255
1199
 
1256
1200
  - file_id: typing.Optional[str].
1257
-
1258
- - project_id: typing.Optional[str].
1259
1201
  ---
1260
1202
  from llama_cloud.client import AsyncLlamaCloud
1261
1203
 
@@ -1276,7 +1218,7 @@ class AsyncLlamaExtractClient:
1276
1218
  ),
1277
1219
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1278
1220
  json=jsonable_encoder(_request),
1279
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1221
+ headers=self._client_wrapper.get_headers(),
1280
1222
  timeout=60,
1281
1223
  )
1282
1224
  if 200 <= _response.status_code < 300:
@@ -1290,12 +1232,7 @@ class AsyncLlamaExtractClient:
1290
1232
  raise ApiError(status_code=_response.status_code, body=_response_json)
1291
1233
 
1292
1234
  async def get_extraction_agent_by_name(
1293
- self,
1294
- name: str,
1295
- *,
1296
- project_id: typing.Optional[str] = None,
1297
- organization_id: typing.Optional[str] = None,
1298
- project_id: typing.Optional[str] = None,
1235
+ self, name: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1299
1236
  ) -> ExtractAgent:
1300
1237
  """
1301
1238
  Parameters:
@@ -1304,8 +1241,6 @@ class AsyncLlamaExtractClient:
1304
1241
  - project_id: typing.Optional[str].
1305
1242
 
1306
1243
  - organization_id: typing.Optional[str].
1307
-
1308
- - project_id: typing.Optional[str].
1309
1244
  ---
1310
1245
  from llama_cloud.client import AsyncLlamaCloud
1311
1246
 
@@ -1322,7 +1257,7 @@ class AsyncLlamaExtractClient:
1322
1257
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/extraction-agents/by-name/{name}"
1323
1258
  ),
1324
1259
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1325
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1260
+ headers=self._client_wrapper.get_headers(),
1326
1261
  timeout=60,
1327
1262
  )
1328
1263
  if 200 <= _response.status_code < 300:
@@ -1336,11 +1271,7 @@ class AsyncLlamaExtractClient:
1336
1271
  raise ApiError(status_code=_response.status_code, body=_response_json)
1337
1272
 
1338
1273
  async def get_or_create_default_extraction_agent(
1339
- self,
1340
- *,
1341
- project_id: typing.Optional[str] = None,
1342
- organization_id: typing.Optional[str] = None,
1343
- project_id: typing.Optional[str] = None,
1274
+ self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1344
1275
  ) -> ExtractAgent:
1345
1276
  """
1346
1277
  Get or create a default extraction agent for the current project.
@@ -1350,8 +1281,6 @@ class AsyncLlamaExtractClient:
1350
1281
  - project_id: typing.Optional[str].
1351
1282
 
1352
1283
  - organization_id: typing.Optional[str].
1353
-
1354
- - project_id: typing.Optional[str].
1355
1284
  ---
1356
1285
  from llama_cloud.client import AsyncLlamaCloud
1357
1286
 
@@ -1366,7 +1295,7 @@ class AsyncLlamaExtractClient:
1366
1295
  f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/extraction-agents/default"
1367
1296
  ),
1368
1297
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1369
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1298
+ headers=self._client_wrapper.get_headers(),
1370
1299
  timeout=60,
1371
1300
  )
1372
1301
  if 200 <= _response.status_code < 300:
@@ -1538,6 +1467,7 @@ class AsyncLlamaExtractClient:
1538
1467
  ExtractConfig,
1539
1468
  ExtractConfigPriority,
1540
1469
  ExtractJobCreate,
1470
+ ExtractJobCreatePriority,
1541
1471
  ExtractMode,
1542
1472
  ExtractTarget,
1543
1473
  )
@@ -1548,6 +1478,7 @@ class AsyncLlamaExtractClient:
1548
1478
  )
1549
1479
  await client.llama_extract.run_job(
1550
1480
  request=ExtractJobCreate(
1481
+ priority=ExtractJobCreatePriority.LOW,
1551
1482
  extraction_agent_id="string",
1552
1483
  file_id="string",
1553
1484
  config_override=ExtractConfig(
@@ -1628,6 +1559,7 @@ class AsyncLlamaExtractClient:
1628
1559
  ExtractConfig,
1629
1560
  ExtractConfigPriority,
1630
1561
  ExtractJobCreate,
1562
+ ExtractJobCreatePriority,
1631
1563
  ExtractMode,
1632
1564
  ExtractTarget,
1633
1565
  FailPageMode,
@@ -1644,6 +1576,7 @@ class AsyncLlamaExtractClient:
1644
1576
  )
1645
1577
  await client.llama_extract.run_job_test_user(
1646
1578
  job_create=ExtractJobCreate(
1579
+ priority=ExtractJobCreatePriority.LOW,
1647
1580
  extraction_agent_id="string",
1648
1581
  file_id="string",
1649
1582
  config_override=ExtractConfig(
@@ -1799,12 +1732,7 @@ class AsyncLlamaExtractClient:
1799
1732
  raise ApiError(status_code=_response.status_code, body=_response_json)
1800
1733
 
1801
1734
  async def get_job_result(
1802
- self,
1803
- job_id: str,
1804
- *,
1805
- project_id: typing.Optional[str] = None,
1806
- organization_id: typing.Optional[str] = None,
1807
- project_id: typing.Optional[str] = None,
1735
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1808
1736
  ) -> ExtractResultset:
1809
1737
  """
1810
1738
  Parameters:
@@ -1813,8 +1741,6 @@ class AsyncLlamaExtractClient:
1813
1741
  - project_id: typing.Optional[str].
1814
1742
 
1815
1743
  - organization_id: typing.Optional[str].
1816
-
1817
- - project_id: typing.Optional[str].
1818
1744
  ---
1819
1745
  from llama_cloud.client import AsyncLlamaCloud
1820
1746
 
@@ -1829,7 +1755,7 @@ class AsyncLlamaExtractClient:
1829
1755
  "GET",
1830
1756
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/jobs/{job_id}/result"),
1831
1757
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1832
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1758
+ headers=self._client_wrapper.get_headers(),
1833
1759
  timeout=60,
1834
1760
  )
1835
1761
  if 200 <= _response.status_code < 300:
@@ -1911,12 +1837,7 @@ class AsyncLlamaExtractClient:
1911
1837
  raise ApiError(status_code=_response.status_code, body=_response_json)
1912
1838
 
1913
1839
  async def get_run_by_job_id(
1914
- self,
1915
- job_id: str,
1916
- *,
1917
- project_id: typing.Optional[str] = None,
1918
- organization_id: typing.Optional[str] = None,
1919
- project_id: typing.Optional[str] = None,
1840
+ self, job_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1920
1841
  ) -> ExtractRun:
1921
1842
  """
1922
1843
  Parameters:
@@ -1925,8 +1846,6 @@ class AsyncLlamaExtractClient:
1925
1846
  - project_id: typing.Optional[str].
1926
1847
 
1927
1848
  - organization_id: typing.Optional[str].
1928
-
1929
- - project_id: typing.Optional[str].
1930
1849
  ---
1931
1850
  from llama_cloud.client import AsyncLlamaCloud
1932
1851
 
@@ -1941,7 +1860,7 @@ class AsyncLlamaExtractClient:
1941
1860
  "GET",
1942
1861
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/by-job/{job_id}"),
1943
1862
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1944
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1863
+ headers=self._client_wrapper.get_headers(),
1945
1864
  timeout=60,
1946
1865
  )
1947
1866
  if 200 <= _response.status_code < 300:
@@ -1955,12 +1874,7 @@ class AsyncLlamaExtractClient:
1955
1874
  raise ApiError(status_code=_response.status_code, body=_response_json)
1956
1875
 
1957
1876
  async def get_run(
1958
- self,
1959
- run_id: str,
1960
- *,
1961
- project_id: typing.Optional[str] = None,
1962
- organization_id: typing.Optional[str] = None,
1963
- project_id: typing.Optional[str] = None,
1877
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1964
1878
  ) -> ExtractRun:
1965
1879
  """
1966
1880
  Parameters:
@@ -1969,8 +1883,6 @@ class AsyncLlamaExtractClient:
1969
1883
  - project_id: typing.Optional[str].
1970
1884
 
1971
1885
  - organization_id: typing.Optional[str].
1972
-
1973
- - project_id: typing.Optional[str].
1974
1886
  ---
1975
1887
  from llama_cloud.client import AsyncLlamaCloud
1976
1888
 
@@ -1985,7 +1897,7 @@ class AsyncLlamaExtractClient:
1985
1897
  "GET",
1986
1898
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
1987
1899
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1988
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1900
+ headers=self._client_wrapper.get_headers(),
1989
1901
  timeout=60,
1990
1902
  )
1991
1903
  if 200 <= _response.status_code < 300:
@@ -1999,12 +1911,7 @@ class AsyncLlamaExtractClient:
1999
1911
  raise ApiError(status_code=_response.status_code, body=_response_json)
2000
1912
 
2001
1913
  async def delete_extraction_run(
2002
- self,
2003
- run_id: str,
2004
- *,
2005
- project_id: typing.Optional[str] = None,
2006
- organization_id: typing.Optional[str] = None,
2007
- project_id: typing.Optional[str] = None,
1914
+ self, run_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
2008
1915
  ) -> typing.Any:
2009
1916
  """
2010
1917
  Parameters:
@@ -2013,8 +1920,6 @@ class AsyncLlamaExtractClient:
2013
1920
  - project_id: typing.Optional[str].
2014
1921
 
2015
1922
  - organization_id: typing.Optional[str].
2016
-
2017
- - project_id: typing.Optional[str].
2018
1923
  ---
2019
1924
  from llama_cloud.client import AsyncLlamaCloud
2020
1925
 
@@ -2029,7 +1934,7 @@ class AsyncLlamaExtractClient:
2029
1934
  "DELETE",
2030
1935
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
2031
1936
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
2032
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
1937
+ headers=self._client_wrapper.get_headers(),
2033
1938
  timeout=60,
2034
1939
  )
2035
1940
  if 200 <= _response.status_code < 300:
@@ -2053,7 +1958,6 @@ class AsyncLlamaExtractClient:
2053
1958
  file_id: typing.Optional[str] = OMIT,
2054
1959
  text: typing.Optional[str] = OMIT,
2055
1960
  file: typing.Optional[FileData] = OMIT,
2056
- project_id: typing.Optional[str] = None,
2057
1961
  ) -> ExtractJob:
2058
1962
  """
2059
1963
  Stateless extraction endpoint that uses a default extraction agent in the user's default project.
@@ -2075,8 +1979,6 @@ class AsyncLlamaExtractClient:
2075
1979
  - text: typing.Optional[str].
2076
1980
 
2077
1981
  - file: typing.Optional[FileData].
2078
-
2079
- - project_id: typing.Optional[str].
2080
1982
  ---
2081
1983
  from llama_cloud import (
2082
1984
  DocumentChunkMode,
@@ -2118,7 +2020,7 @@ class AsyncLlamaExtractClient:
2118
2020
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/run"),
2119
2021
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
2120
2022
  json=jsonable_encoder(_request),
2121
- headers=remove_none_from_dict({**self._client_wrapper.get_headers(), "Project-Id": project_id}),
2023
+ headers=self._client_wrapper.get_headers(),
2122
2024
  timeout=60,
2123
2025
  )
2124
2026
  if 200 <= _response.status_code < 300: