llama-cloud 0.1.10__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -26,6 +26,7 @@ from .types import (
26
26
  ChatApp,
27
27
  ChatAppResponse,
28
28
  ChatData,
29
+ ChunkMode,
29
30
  CloudAzStorageBlobDataSource,
30
31
  CloudAzureAiSearchVectorStore,
31
32
  CloudBoxDataSource,
@@ -70,6 +71,7 @@ from .types import (
70
71
  DataSourceCustomMetadataValue,
71
72
  DataSourceDefinition,
72
73
  EditSuggestion,
74
+ EditSuggestionBlocksItem,
73
75
  ElementSegmentationConfig,
74
76
  EmbeddingModelConfig,
75
77
  EmbeddingModelConfigEmbeddingConfig,
@@ -120,6 +122,7 @@ from .types import (
120
122
  ExtractSchemaValidateResponse,
121
123
  ExtractSchemaValidateResponseDataSchemaValue,
122
124
  ExtractState,
125
+ ExtractTarget,
123
126
  File,
124
127
  FilePermissionInfoValue,
125
128
  FileResourceInfoValue,
@@ -237,6 +240,7 @@ from .types import (
237
240
  ProgressEventStatus,
238
241
  Project,
239
242
  ProjectCreate,
243
+ PromptConf,
240
244
  PromptMixinPrompts,
241
245
  PromptSpec,
242
246
  PydanticProgramMode,
@@ -252,7 +256,6 @@ from .types import (
252
256
  ReportEventItemEventData_ReportBlockUpdate,
253
257
  ReportEventItemEventData_ReportStateUpdate,
254
258
  ReportEventType,
255
- ReportFileInfo,
256
259
  ReportMetadata,
257
260
  ReportPlan,
258
261
  ReportPlanBlock,
@@ -267,10 +270,13 @@ from .types import (
267
270
  RetrieverCreate,
268
271
  RetrieverPipeline,
269
272
  Role,
273
+ SchemaRelaxMode,
270
274
  SemanticChunkingConfig,
271
275
  SentenceChunkingConfig,
272
276
  SentenceSplitter,
273
277
  StatusEnum,
278
+ StructMode,
279
+ StructParseConf,
274
280
  SupportedLlmModel,
275
281
  SupportedLlmModelNames,
276
282
  TextBlock,
@@ -370,6 +376,7 @@ __all__ = [
370
376
  "ChatApp",
371
377
  "ChatAppResponse",
372
378
  "ChatData",
379
+ "ChunkMode",
373
380
  "CloudAzStorageBlobDataSource",
374
381
  "CloudAzureAiSearchVectorStore",
375
382
  "CloudBoxDataSource",
@@ -417,6 +424,7 @@ __all__ = [
417
424
  "DataSourceUpdateComponent",
418
425
  "DataSourceUpdateCustomMetadataValue",
419
426
  "EditSuggestion",
427
+ "EditSuggestionBlocksItem",
420
428
  "ElementSegmentationConfig",
421
429
  "EmbeddingModelConfig",
422
430
  "EmbeddingModelConfigCreateEmbeddingConfig",
@@ -481,6 +489,7 @@ __all__ = [
481
489
  "ExtractSchemaValidateResponse",
482
490
  "ExtractSchemaValidateResponseDataSchemaValue",
483
491
  "ExtractState",
492
+ "ExtractTarget",
484
493
  "File",
485
494
  "FileCreateFromUrlResourceInfoValue",
486
495
  "FileCreatePermissionInfoValue",
@@ -612,6 +621,7 @@ __all__ = [
612
621
  "ProgressEventStatus",
613
622
  "Project",
614
623
  "ProjectCreate",
624
+ "PromptConf",
615
625
  "PromptMixinPrompts",
616
626
  "PromptSpec",
617
627
  "PydanticProgramMode",
@@ -627,7 +637,6 @@ __all__ = [
627
637
  "ReportEventItemEventData_ReportBlockUpdate",
628
638
  "ReportEventItemEventData_ReportStateUpdate",
629
639
  "ReportEventType",
630
- "ReportFileInfo",
631
640
  "ReportMetadata",
632
641
  "ReportPlan",
633
642
  "ReportPlanBlock",
@@ -642,10 +651,13 @@ __all__ = [
642
651
  "RetrieverCreate",
643
652
  "RetrieverPipeline",
644
653
  "Role",
654
+ "SchemaRelaxMode",
645
655
  "SemanticChunkingConfig",
646
656
  "SentenceChunkingConfig",
647
657
  "SentenceSplitter",
648
658
  "StatusEnum",
659
+ "StructMode",
660
+ "StructParseConf",
649
661
  "SupportedLlmModel",
650
662
  "SupportedLlmModelNames",
651
663
  "TextBlock",
@@ -151,6 +151,7 @@ class FilesClient:
151
151
  def upload_file(
152
152
  self,
153
153
  *,
154
+ external_file_id: typing.Optional[str] = None,
154
155
  project_id: typing.Optional[str] = None,
155
156
  organization_id: typing.Optional[str] = None,
156
157
  upload_file: typing.IO,
@@ -159,6 +160,8 @@ class FilesClient:
159
160
  Upload a file to S3.
160
161
 
161
162
  Parameters:
163
+ - external_file_id: typing.Optional[str].
164
+
162
165
  - project_id: typing.Optional[str].
163
166
 
164
167
  - organization_id: typing.Optional[str].
@@ -168,7 +171,9 @@ class FilesClient:
168
171
  _response = self._client_wrapper.httpx_client.request(
169
172
  "POST",
170
173
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/files"),
171
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
174
+ params=remove_none_from_dict(
175
+ {"external_file_id": external_file_id, "project_id": project_id, "organization_id": organization_id}
176
+ ),
172
177
  data=jsonable_encoder({}),
173
178
  files={"upload_file": upload_file},
174
179
  headers=self._client_wrapper.get_headers(),
@@ -766,6 +771,7 @@ class AsyncFilesClient:
766
771
  async def upload_file(
767
772
  self,
768
773
  *,
774
+ external_file_id: typing.Optional[str] = None,
769
775
  project_id: typing.Optional[str] = None,
770
776
  organization_id: typing.Optional[str] = None,
771
777
  upload_file: typing.IO,
@@ -774,6 +780,8 @@ class AsyncFilesClient:
774
780
  Upload a file to S3.
775
781
 
776
782
  Parameters:
783
+ - external_file_id: typing.Optional[str].
784
+
777
785
  - project_id: typing.Optional[str].
778
786
 
779
787
  - organization_id: typing.Optional[str].
@@ -783,7 +791,9 @@ class AsyncFilesClient:
783
791
  _response = await self._client_wrapper.httpx_client.request(
784
792
  "POST",
785
793
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/files"),
786
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
794
+ params=remove_none_from_dict(
795
+ {"external_file_id": external_file_id, "project_id": project_id, "organization_id": organization_id}
796
+ ),
787
797
  data=jsonable_encoder({}),
788
798
  files={"upload_file": upload_file},
789
799
  headers=self._client_wrapper.get_headers(),
@@ -88,7 +88,7 @@ class LlamaExtractClient:
88
88
 
89
89
  - config: ExtractConfig. The configuration parameters for the extraction agent.
90
90
  ---
91
- from llama_cloud import ExtractConfig, ExtractMode
91
+ from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
92
92
  from llama_cloud.client import LlamaCloud
93
93
 
94
94
  client = LlamaCloud(
@@ -97,7 +97,8 @@ class LlamaExtractClient:
97
97
  client.llama_extract.create_extraction_agent(
98
98
  name="string",
99
99
  config=ExtractConfig(
100
- extraction_mode=ExtractMode.PER_DOC,
100
+ extraction_target=ExtractTarget.PER_DOC,
101
+ extraction_mode=ExtractMode.FAST,
101
102
  ),
102
103
  )
103
104
  """
@@ -234,7 +235,7 @@ class LlamaExtractClient:
234
235
 
235
236
  - config: ExtractConfig. The configuration parameters for the extraction agent.
236
237
  ---
237
- from llama_cloud import ExtractConfig, ExtractMode
238
+ from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
238
239
  from llama_cloud.client import LlamaCloud
239
240
 
240
241
  client = LlamaCloud(
@@ -243,7 +244,8 @@ class LlamaExtractClient:
243
244
  client.llama_extract.update_extraction_agent(
244
245
  extraction_agent_id="string",
245
246
  config=ExtractConfig(
246
- extraction_mode=ExtractMode.PER_DOC,
247
+ extraction_target=ExtractTarget.PER_DOC,
248
+ extraction_mode=ExtractMode.FAST,
247
249
  ),
248
250
  )
249
251
  """
@@ -336,7 +338,12 @@ class LlamaExtractClient:
336
338
  Parameters:
337
339
  - request: ExtractJobCreate.
338
340
  ---
339
- from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
341
+ from llama_cloud import (
342
+ ExtractConfig,
343
+ ExtractJobCreate,
344
+ ExtractMode,
345
+ ExtractTarget,
346
+ )
340
347
  from llama_cloud.client import LlamaCloud
341
348
 
342
349
  client = LlamaCloud(
@@ -347,7 +354,8 @@ class LlamaExtractClient:
347
354
  extraction_agent_id="string",
348
355
  file_id="string",
349
356
  config_override=ExtractConfig(
350
- extraction_mode=ExtractMode.PER_DOC,
357
+ extraction_target=ExtractTarget.PER_DOC,
358
+ extraction_mode=ExtractMode.FAST,
351
359
  ),
352
360
  ),
353
361
  )
@@ -409,9 +417,11 @@ class LlamaExtractClient:
409
417
  - extract_settings: typing.Optional[LlamaExtractSettings].
410
418
  ---
411
419
  from llama_cloud import (
420
+ ChunkMode,
412
421
  ExtractConfig,
413
422
  ExtractJobCreate,
414
423
  ExtractMode,
424
+ ExtractTarget,
415
425
  LlamaExtractSettings,
416
426
  LlamaParseParameters,
417
427
  )
@@ -425,10 +435,12 @@ class LlamaExtractClient:
425
435
  extraction_agent_id="string",
426
436
  file_id="string",
427
437
  config_override=ExtractConfig(
428
- extraction_mode=ExtractMode.PER_DOC,
438
+ extraction_target=ExtractTarget.PER_DOC,
439
+ extraction_mode=ExtractMode.FAST,
429
440
  ),
430
441
  ),
431
442
  extract_settings=LlamaExtractSettings(
443
+ chunk_mode=ChunkMode.PAGE,
432
444
  llama_parse_params=LlamaParseParameters(),
433
445
  ),
434
446
  )
@@ -458,7 +470,12 @@ class LlamaExtractClient:
458
470
  Parameters:
459
471
  - request: ExtractJobCreate.
460
472
  ---
461
- from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
473
+ from llama_cloud import (
474
+ ExtractConfig,
475
+ ExtractJobCreate,
476
+ ExtractMode,
477
+ ExtractTarget,
478
+ )
462
479
  from llama_cloud.client import LlamaCloud
463
480
 
464
481
  client = LlamaCloud(
@@ -469,7 +486,8 @@ class LlamaExtractClient:
469
486
  extraction_agent_id="string",
470
487
  file_id="string",
471
488
  config_override=ExtractConfig(
472
- extraction_mode=ExtractMode.PER_DOC,
489
+ extraction_target=ExtractTarget.PER_DOC,
490
+ extraction_mode=ExtractMode.FAST,
473
491
  ),
474
492
  ),
475
493
  )
@@ -501,9 +519,11 @@ class LlamaExtractClient:
501
519
  - extract_settings: typing.Optional[LlamaExtractSettings].
502
520
  ---
503
521
  from llama_cloud import (
522
+ ChunkMode,
504
523
  ExtractConfig,
505
524
  ExtractJobCreate,
506
525
  ExtractMode,
526
+ ExtractTarget,
507
527
  LlamaExtractSettings,
508
528
  LlamaParseParameters,
509
529
  )
@@ -517,10 +537,12 @@ class LlamaExtractClient:
517
537
  extraction_agent_id="string",
518
538
  file_id="string",
519
539
  config_override=ExtractConfig(
520
- extraction_mode=ExtractMode.PER_DOC,
540
+ extraction_target=ExtractTarget.PER_DOC,
541
+ extraction_mode=ExtractMode.FAST,
521
542
  ),
522
543
  ),
523
544
  extract_settings=LlamaExtractSettings(
545
+ chunk_mode=ChunkMode.PAGE,
524
546
  llama_parse_params=LlamaParseParameters(),
525
547
  ),
526
548
  )
@@ -725,7 +747,7 @@ class AsyncLlamaExtractClient:
725
747
 
726
748
  - config: ExtractConfig. The configuration parameters for the extraction agent.
727
749
  ---
728
- from llama_cloud import ExtractConfig, ExtractMode
750
+ from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
729
751
  from llama_cloud.client import AsyncLlamaCloud
730
752
 
731
753
  client = AsyncLlamaCloud(
@@ -734,7 +756,8 @@ class AsyncLlamaExtractClient:
734
756
  await client.llama_extract.create_extraction_agent(
735
757
  name="string",
736
758
  config=ExtractConfig(
737
- extraction_mode=ExtractMode.PER_DOC,
759
+ extraction_target=ExtractTarget.PER_DOC,
760
+ extraction_mode=ExtractMode.FAST,
738
761
  ),
739
762
  )
740
763
  """
@@ -871,7 +894,7 @@ class AsyncLlamaExtractClient:
871
894
 
872
895
  - config: ExtractConfig. The configuration parameters for the extraction agent.
873
896
  ---
874
- from llama_cloud import ExtractConfig, ExtractMode
897
+ from llama_cloud import ExtractConfig, ExtractMode, ExtractTarget
875
898
  from llama_cloud.client import AsyncLlamaCloud
876
899
 
877
900
  client = AsyncLlamaCloud(
@@ -880,7 +903,8 @@ class AsyncLlamaExtractClient:
880
903
  await client.llama_extract.update_extraction_agent(
881
904
  extraction_agent_id="string",
882
905
  config=ExtractConfig(
883
- extraction_mode=ExtractMode.PER_DOC,
906
+ extraction_target=ExtractTarget.PER_DOC,
907
+ extraction_mode=ExtractMode.FAST,
884
908
  ),
885
909
  )
886
910
  """
@@ -973,7 +997,12 @@ class AsyncLlamaExtractClient:
973
997
  Parameters:
974
998
  - request: ExtractJobCreate.
975
999
  ---
976
- from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
1000
+ from llama_cloud import (
1001
+ ExtractConfig,
1002
+ ExtractJobCreate,
1003
+ ExtractMode,
1004
+ ExtractTarget,
1005
+ )
977
1006
  from llama_cloud.client import AsyncLlamaCloud
978
1007
 
979
1008
  client = AsyncLlamaCloud(
@@ -984,7 +1013,8 @@ class AsyncLlamaExtractClient:
984
1013
  extraction_agent_id="string",
985
1014
  file_id="string",
986
1015
  config_override=ExtractConfig(
987
- extraction_mode=ExtractMode.PER_DOC,
1016
+ extraction_target=ExtractTarget.PER_DOC,
1017
+ extraction_mode=ExtractMode.FAST,
988
1018
  ),
989
1019
  ),
990
1020
  )
@@ -1046,9 +1076,11 @@ class AsyncLlamaExtractClient:
1046
1076
  - extract_settings: typing.Optional[LlamaExtractSettings].
1047
1077
  ---
1048
1078
  from llama_cloud import (
1079
+ ChunkMode,
1049
1080
  ExtractConfig,
1050
1081
  ExtractJobCreate,
1051
1082
  ExtractMode,
1083
+ ExtractTarget,
1052
1084
  LlamaExtractSettings,
1053
1085
  LlamaParseParameters,
1054
1086
  )
@@ -1062,10 +1094,12 @@ class AsyncLlamaExtractClient:
1062
1094
  extraction_agent_id="string",
1063
1095
  file_id="string",
1064
1096
  config_override=ExtractConfig(
1065
- extraction_mode=ExtractMode.PER_DOC,
1097
+ extraction_target=ExtractTarget.PER_DOC,
1098
+ extraction_mode=ExtractMode.FAST,
1066
1099
  ),
1067
1100
  ),
1068
1101
  extract_settings=LlamaExtractSettings(
1102
+ chunk_mode=ChunkMode.PAGE,
1069
1103
  llama_parse_params=LlamaParseParameters(),
1070
1104
  ),
1071
1105
  )
@@ -1095,7 +1129,12 @@ class AsyncLlamaExtractClient:
1095
1129
  Parameters:
1096
1130
  - request: ExtractJobCreate.
1097
1131
  ---
1098
- from llama_cloud import ExtractConfig, ExtractJobCreate, ExtractMode
1132
+ from llama_cloud import (
1133
+ ExtractConfig,
1134
+ ExtractJobCreate,
1135
+ ExtractMode,
1136
+ ExtractTarget,
1137
+ )
1099
1138
  from llama_cloud.client import AsyncLlamaCloud
1100
1139
 
1101
1140
  client = AsyncLlamaCloud(
@@ -1106,7 +1145,8 @@ class AsyncLlamaExtractClient:
1106
1145
  extraction_agent_id="string",
1107
1146
  file_id="string",
1108
1147
  config_override=ExtractConfig(
1109
- extraction_mode=ExtractMode.PER_DOC,
1148
+ extraction_target=ExtractTarget.PER_DOC,
1149
+ extraction_mode=ExtractMode.FAST,
1110
1150
  ),
1111
1151
  ),
1112
1152
  )
@@ -1138,9 +1178,11 @@ class AsyncLlamaExtractClient:
1138
1178
  - extract_settings: typing.Optional[LlamaExtractSettings].
1139
1179
  ---
1140
1180
  from llama_cloud import (
1181
+ ChunkMode,
1141
1182
  ExtractConfig,
1142
1183
  ExtractJobCreate,
1143
1184
  ExtractMode,
1185
+ ExtractTarget,
1144
1186
  LlamaExtractSettings,
1145
1187
  LlamaParseParameters,
1146
1188
  )
@@ -1154,10 +1196,12 @@ class AsyncLlamaExtractClient:
1154
1196
  extraction_agent_id="string",
1155
1197
  file_id="string",
1156
1198
  config_override=ExtractConfig(
1157
- extraction_mode=ExtractMode.PER_DOC,
1199
+ extraction_target=ExtractTarget.PER_DOC,
1200
+ extraction_mode=ExtractMode.FAST,
1158
1201
  ),
1159
1202
  ),
1160
1203
  extract_settings=LlamaExtractSettings(
1204
+ chunk_mode=ChunkMode.PAGE,
1161
1205
  llama_parse_params=LlamaParseParameters(),
1162
1206
  ),
1163
1207
  )
@@ -100,6 +100,96 @@ class ParsingClient:
100
100
  raise ApiError(status_code=_response.status_code, body=_response.text)
101
101
  raise ApiError(status_code=_response.status_code, body=_response_json)
102
102
 
103
+ def screenshot(
104
+ self,
105
+ *,
106
+ organization_id: typing.Optional[str] = None,
107
+ project_id: typing.Optional[str] = None,
108
+ file: typing.Optional[str] = OMIT,
109
+ do_not_cache: bool,
110
+ http_proxy: str,
111
+ input_s_3_path: str,
112
+ input_s_3_region: str,
113
+ input_url: str,
114
+ invalidate_cache: bool,
115
+ max_pages: typing.Optional[int] = OMIT,
116
+ output_s_3_path_prefix: str,
117
+ output_s_3_region: str,
118
+ target_pages: str,
119
+ webhook_url: str,
120
+ job_timeout_in_seconds: float,
121
+ job_timeout_extra_time_per_page_in_seconds: float,
122
+ ) -> ParsingJob:
123
+ """
124
+ Parameters:
125
+ - organization_id: typing.Optional[str].
126
+
127
+ - project_id: typing.Optional[str].
128
+
129
+ - file: typing.Optional[str].
130
+
131
+ - do_not_cache: bool.
132
+
133
+ - http_proxy: str.
134
+
135
+ - input_s_3_path: str.
136
+
137
+ - input_s_3_region: str.
138
+
139
+ - input_url: str.
140
+
141
+ - invalidate_cache: bool.
142
+
143
+ - max_pages: typing.Optional[int].
144
+
145
+ - output_s_3_path_prefix: str.
146
+
147
+ - output_s_3_region: str.
148
+
149
+ - target_pages: str.
150
+
151
+ - webhook_url: str.
152
+
153
+ - job_timeout_in_seconds: float.
154
+
155
+ - job_timeout_extra_time_per_page_in_seconds: float.
156
+ """
157
+ _request: typing.Dict[str, typing.Any] = {
158
+ "do_not_cache": do_not_cache,
159
+ "http_proxy": http_proxy,
160
+ "input_s3_path": input_s_3_path,
161
+ "input_s3_region": input_s_3_region,
162
+ "input_url": input_url,
163
+ "invalidate_cache": invalidate_cache,
164
+ "output_s3_path_prefix": output_s_3_path_prefix,
165
+ "output_s3_region": output_s_3_region,
166
+ "target_pages": target_pages,
167
+ "webhook_url": webhook_url,
168
+ "job_timeout_in_seconds": job_timeout_in_seconds,
169
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
170
+ }
171
+ if file is not OMIT:
172
+ _request["file"] = file
173
+ if max_pages is not OMIT:
174
+ _request["max_pages"] = max_pages
175
+ _response = self._client_wrapper.httpx_client.request(
176
+ "POST",
177
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
178
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
179
+ json=jsonable_encoder(_request),
180
+ headers=self._client_wrapper.get_headers(),
181
+ timeout=60,
182
+ )
183
+ if 200 <= _response.status_code < 300:
184
+ return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
185
+ if _response.status_code == 422:
186
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
187
+ try:
188
+ _response_json = _response.json()
189
+ except JSONDecodeError:
190
+ raise ApiError(status_code=_response.status_code, body=_response.text)
191
+ raise ApiError(status_code=_response.status_code, body=_response_json)
192
+
103
193
  def upload_file(
104
194
  self,
105
195
  *,
@@ -957,6 +1047,96 @@ class AsyncParsingClient:
957
1047
  raise ApiError(status_code=_response.status_code, body=_response.text)
958
1048
  raise ApiError(status_code=_response.status_code, body=_response_json)
959
1049
 
1050
+ async def screenshot(
1051
+ self,
1052
+ *,
1053
+ organization_id: typing.Optional[str] = None,
1054
+ project_id: typing.Optional[str] = None,
1055
+ file: typing.Optional[str] = OMIT,
1056
+ do_not_cache: bool,
1057
+ http_proxy: str,
1058
+ input_s_3_path: str,
1059
+ input_s_3_region: str,
1060
+ input_url: str,
1061
+ invalidate_cache: bool,
1062
+ max_pages: typing.Optional[int] = OMIT,
1063
+ output_s_3_path_prefix: str,
1064
+ output_s_3_region: str,
1065
+ target_pages: str,
1066
+ webhook_url: str,
1067
+ job_timeout_in_seconds: float,
1068
+ job_timeout_extra_time_per_page_in_seconds: float,
1069
+ ) -> ParsingJob:
1070
+ """
1071
+ Parameters:
1072
+ - organization_id: typing.Optional[str].
1073
+
1074
+ - project_id: typing.Optional[str].
1075
+
1076
+ - file: typing.Optional[str].
1077
+
1078
+ - do_not_cache: bool.
1079
+
1080
+ - http_proxy: str.
1081
+
1082
+ - input_s_3_path: str.
1083
+
1084
+ - input_s_3_region: str.
1085
+
1086
+ - input_url: str.
1087
+
1088
+ - invalidate_cache: bool.
1089
+
1090
+ - max_pages: typing.Optional[int].
1091
+
1092
+ - output_s_3_path_prefix: str.
1093
+
1094
+ - output_s_3_region: str.
1095
+
1096
+ - target_pages: str.
1097
+
1098
+ - webhook_url: str.
1099
+
1100
+ - job_timeout_in_seconds: float.
1101
+
1102
+ - job_timeout_extra_time_per_page_in_seconds: float.
1103
+ """
1104
+ _request: typing.Dict[str, typing.Any] = {
1105
+ "do_not_cache": do_not_cache,
1106
+ "http_proxy": http_proxy,
1107
+ "input_s3_path": input_s_3_path,
1108
+ "input_s3_region": input_s_3_region,
1109
+ "input_url": input_url,
1110
+ "invalidate_cache": invalidate_cache,
1111
+ "output_s3_path_prefix": output_s_3_path_prefix,
1112
+ "output_s3_region": output_s_3_region,
1113
+ "target_pages": target_pages,
1114
+ "webhook_url": webhook_url,
1115
+ "job_timeout_in_seconds": job_timeout_in_seconds,
1116
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
1117
+ }
1118
+ if file is not OMIT:
1119
+ _request["file"] = file
1120
+ if max_pages is not OMIT:
1121
+ _request["max_pages"] = max_pages
1122
+ _response = await self._client_wrapper.httpx_client.request(
1123
+ "POST",
1124
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
1125
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
1126
+ json=jsonable_encoder(_request),
1127
+ headers=self._client_wrapper.get_headers(),
1128
+ timeout=60,
1129
+ )
1130
+ if 200 <= _response.status_code < 300:
1131
+ return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
1132
+ if _response.status_code == 422:
1133
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1134
+ try:
1135
+ _response_json = _response.json()
1136
+ except JSONDecodeError:
1137
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1138
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1139
+
960
1140
  async def upload_file(
961
1141
  self,
962
1142
  *,
@@ -252,7 +252,12 @@ class ReportsClient:
252
252
  raise ApiError(status_code=_response.status_code, body=_response_json)
253
253
 
254
254
  def delete_report(
255
- self, report_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
255
+ self,
256
+ report_id: str,
257
+ *,
258
+ cascade_delete: typing.Optional[bool] = None,
259
+ project_id: typing.Optional[str] = None,
260
+ organization_id: typing.Optional[str] = None,
256
261
  ) -> typing.Any:
257
262
  """
258
263
  Delete a report.
@@ -260,6 +265,8 @@ class ReportsClient:
260
265
  Parameters:
261
266
  - report_id: str.
262
267
 
268
+ - cascade_delete: typing.Optional[bool]. Whether to delete associated retriever and pipeline data
269
+
263
270
  - project_id: typing.Optional[str].
264
271
 
265
272
  - organization_id: typing.Optional[str].
@@ -276,7 +283,9 @@ class ReportsClient:
276
283
  _response = self._client_wrapper.httpx_client.request(
277
284
  "DELETE",
278
285
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/reports/{report_id}"),
279
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
286
+ params=remove_none_from_dict(
287
+ {"cascade_delete": cascade_delete, "project_id": project_id, "organization_id": organization_id}
288
+ ),
280
289
  headers=self._client_wrapper.get_headers(),
281
290
  timeout=60,
282
291
  )
@@ -835,7 +844,12 @@ class AsyncReportsClient:
835
844
  raise ApiError(status_code=_response.status_code, body=_response_json)
836
845
 
837
846
  async def delete_report(
838
- self, report_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
847
+ self,
848
+ report_id: str,
849
+ *,
850
+ cascade_delete: typing.Optional[bool] = None,
851
+ project_id: typing.Optional[str] = None,
852
+ organization_id: typing.Optional[str] = None,
839
853
  ) -> typing.Any:
840
854
  """
841
855
  Delete a report.
@@ -843,6 +857,8 @@ class AsyncReportsClient:
843
857
  Parameters:
844
858
  - report_id: str.
845
859
 
860
+ - cascade_delete: typing.Optional[bool]. Whether to delete associated retriever and pipeline data
861
+
846
862
  - project_id: typing.Optional[str].
847
863
 
848
864
  - organization_id: typing.Optional[str].
@@ -859,7 +875,9 @@ class AsyncReportsClient:
859
875
  _response = await self._client_wrapper.httpx_client.request(
860
876
  "DELETE",
861
877
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/reports/{report_id}"),
862
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
878
+ params=remove_none_from_dict(
879
+ {"cascade_delete": cascade_delete, "project_id": project_id, "organization_id": organization_id}
880
+ ),
863
881
  headers=self._client_wrapper.get_headers(),
864
882
  timeout=60,
865
883
  )