llama-cloud 0.1.9__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (40) hide show
  1. llama_cloud/__init__.py +30 -8
  2. llama_cloud/resources/__init__.py +14 -3
  3. llama_cloud/resources/chat_apps/client.py +99 -133
  4. llama_cloud/resources/llama_extract/__init__.py +16 -2
  5. llama_cloud/resources/llama_extract/client.py +272 -102
  6. llama_cloud/resources/llama_extract/types/__init__.py +14 -3
  7. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
  8. llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
  9. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/{extract_agent_update_data_schema_value.py → extract_agent_update_data_schema_zero_value.py} +1 -1
  11. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
  12. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +8 -12
  14. llama_cloud/resources/parsing/client.py +64 -0
  15. llama_cloud/resources/reports/client.py +30 -26
  16. llama_cloud/resources/retrievers/client.py +16 -4
  17. llama_cloud/types/__init__.py +18 -4
  18. llama_cloud/types/chat_app.py +11 -9
  19. llama_cloud/types/chat_app_response.py +12 -10
  20. llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
  21. llama_cloud/types/extract_job_create.py +4 -2
  22. llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
  23. llama_cloud/types/{extract_job_create_data_schema_override_value.py → extract_job_create_data_schema_override_zero_value.py} +1 -1
  24. llama_cloud/types/extract_run.py +2 -2
  25. llama_cloud/types/extract_run_data.py +11 -0
  26. llama_cloud/types/extract_run_data_item_value.py +5 -0
  27. llama_cloud/types/extract_run_data_zero_value.py +5 -0
  28. llama_cloud/types/extract_schema_validate_response.py +32 -0
  29. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
  30. llama_cloud/types/llama_extract_settings.py +4 -0
  31. llama_cloud/types/llama_parse_parameters.py +8 -0
  32. llama_cloud/types/plan.py +4 -0
  33. llama_cloud/types/preset_composite_retrieval_params.py +35 -0
  34. llama_cloud/types/report_file_info.py +37 -0
  35. llama_cloud/types/report_metadata.py +2 -1
  36. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.10.dist-info}/METADATA +1 -1
  37. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.10.dist-info}/RECORD +39 -28
  38. llama_cloud/types/extract_run_data_value.py +0 -5
  39. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.10.dist-info}/LICENSE +0 -0
  40. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.10.dist-info}/WHEEL +0 -0
@@ -1,6 +1,17 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .extract_agent_create_data_schema_value import ExtractAgentCreateDataSchemaValue
4
- from .extract_agent_update_data_schema_value import ExtractAgentUpdateDataSchemaValue
3
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
4
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
5
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
6
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
7
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
8
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
5
9
 
6
- __all__ = ["ExtractAgentCreateDataSchemaValue", "ExtractAgentUpdateDataSchemaValue"]
10
+ __all__ = [
11
+ "ExtractAgentCreateDataSchema",
12
+ "ExtractAgentCreateDataSchemaZeroValue",
13
+ "ExtractAgentUpdateDataSchema",
14
+ "ExtractAgentUpdateDataSchemaZeroValue",
15
+ "ExtractSchemaValidateRequestDataSchema",
16
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
17
+ ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
6
+
7
+ ExtractAgentCreateDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractAgentCreateDataSchemaValue = typing.Union[
5
+ ExtractAgentCreateDataSchemaZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
6
+
7
+ ExtractAgentUpdateDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractAgentUpdateDataSchemaValue = typing.Union[
5
+ ExtractAgentUpdateDataSchemaZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
6
+
7
+ ExtractSchemaValidateRequestDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractSchemaValidateRequestDataSchemaZeroValue]], str
9
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractSchemaValidateRequestDataSchemaZeroValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -229,14 +229,14 @@ class OrganizationsClient:
229
229
  raise ApiError(status_code=_response.status_code, body=_response.text)
230
230
  raise ApiError(status_code=_response.status_code, body=_response_json)
231
231
 
232
- def update_organization(self, organization_id: str, *, name: typing.Optional[str] = OMIT) -> Organization:
232
+ def update_organization(self, organization_id: str, *, name: str) -> Organization:
233
233
  """
234
234
  Update an existing organization.
235
235
 
236
236
  Parameters:
237
237
  - organization_id: str.
238
238
 
239
- - name: typing.Optional[str].
239
+ - name: str. A name for the organization.
240
240
  ---
241
241
  from llama_cloud.client import LlamaCloud
242
242
 
@@ -245,15 +245,13 @@ class OrganizationsClient:
245
245
  )
246
246
  client.organizations.update_organization(
247
247
  organization_id="string",
248
+ name="string",
248
249
  )
249
250
  """
250
- _request: typing.Dict[str, typing.Any] = {}
251
- if name is not OMIT:
252
- _request["name"] = name
253
251
  _response = self._client_wrapper.httpx_client.request(
254
252
  "PUT",
255
253
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
256
- json=jsonable_encoder(_request),
254
+ json=jsonable_encoder({"name": name}),
257
255
  headers=self._client_wrapper.get_headers(),
258
256
  timeout=60,
259
257
  )
@@ -922,14 +920,14 @@ class AsyncOrganizationsClient:
922
920
  raise ApiError(status_code=_response.status_code, body=_response.text)
923
921
  raise ApiError(status_code=_response.status_code, body=_response_json)
924
922
 
925
- async def update_organization(self, organization_id: str, *, name: typing.Optional[str] = OMIT) -> Organization:
923
+ async def update_organization(self, organization_id: str, *, name: str) -> Organization:
926
924
  """
927
925
  Update an existing organization.
928
926
 
929
927
  Parameters:
930
928
  - organization_id: str.
931
929
 
932
- - name: typing.Optional[str].
930
+ - name: str. A name for the organization.
933
931
  ---
934
932
  from llama_cloud.client import AsyncLlamaCloud
935
933
 
@@ -938,15 +936,13 @@ class AsyncOrganizationsClient:
938
936
  )
939
937
  await client.organizations.update_organization(
940
938
  organization_id="string",
939
+ name="string",
941
940
  )
942
941
  """
943
- _request: typing.Dict[str, typing.Any] = {}
944
- if name is not OMIT:
945
- _request["name"] = name
946
942
  _response = await self._client_wrapper.httpx_client.request(
947
943
  "PUT",
948
944
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
949
- json=jsonable_encoder(_request),
945
+ json=jsonable_encoder({"name": name}),
950
946
  headers=self._client_wrapper.get_headers(),
951
947
  timeout=60,
952
948
  )
@@ -166,6 +166,14 @@ class ParsingClient:
166
166
  bounding_box: str,
167
167
  gpt_4_o_mode: bool,
168
168
  gpt_4_o_api_key: str,
169
+ job_timeout_in_seconds: float,
170
+ job_timeout_extra_time_per_page_in_seconds: float,
171
+ strict_mode_image_extraction: bool,
172
+ strict_mode_image_ocr: bool,
173
+ strict_mode_reconstruction: bool,
174
+ strict_mode_buggy_font: bool,
175
+ ignore_document_elements_for_layout_detection: bool,
176
+ output_tables_as_html: bool,
169
177
  ) -> ParsingJob:
170
178
  """
171
179
  Upload a file to s3 and create a job. return a job id
@@ -296,6 +304,22 @@ class ParsingClient:
296
304
  - gpt_4_o_mode: bool.
297
305
 
298
306
  - gpt_4_o_api_key: str.
307
+
308
+ - job_timeout_in_seconds: float.
309
+
310
+ - job_timeout_extra_time_per_page_in_seconds: float.
311
+
312
+ - strict_mode_image_extraction: bool.
313
+
314
+ - strict_mode_image_ocr: bool.
315
+
316
+ - strict_mode_reconstruction: bool.
317
+
318
+ - strict_mode_buggy_font: bool.
319
+
320
+ - ignore_document_elements_for_layout_detection: bool.
321
+
322
+ - output_tables_as_html: bool.
299
323
  """
300
324
  _request: typing.Dict[str, typing.Any] = {
301
325
  "annotate_links": annotate_links,
@@ -357,6 +381,14 @@ class ParsingClient:
357
381
  "bounding_box": bounding_box,
358
382
  "gpt4o_mode": gpt_4_o_mode,
359
383
  "gpt4o_api_key": gpt_4_o_api_key,
384
+ "job_timeout_in_seconds": job_timeout_in_seconds,
385
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
386
+ "strict_mode_image_extraction": strict_mode_image_extraction,
387
+ "strict_mode_image_ocr": strict_mode_image_ocr,
388
+ "strict_mode_reconstruction": strict_mode_reconstruction,
389
+ "strict_mode_buggy_font": strict_mode_buggy_font,
390
+ "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
391
+ "output_tables_as_HTML": output_tables_as_html,
360
392
  }
361
393
  if file is not OMIT:
362
394
  _request["file"] = file
@@ -991,6 +1023,14 @@ class AsyncParsingClient:
991
1023
  bounding_box: str,
992
1024
  gpt_4_o_mode: bool,
993
1025
  gpt_4_o_api_key: str,
1026
+ job_timeout_in_seconds: float,
1027
+ job_timeout_extra_time_per_page_in_seconds: float,
1028
+ strict_mode_image_extraction: bool,
1029
+ strict_mode_image_ocr: bool,
1030
+ strict_mode_reconstruction: bool,
1031
+ strict_mode_buggy_font: bool,
1032
+ ignore_document_elements_for_layout_detection: bool,
1033
+ output_tables_as_html: bool,
994
1034
  ) -> ParsingJob:
995
1035
  """
996
1036
  Upload a file to s3 and create a job. return a job id
@@ -1121,6 +1161,22 @@ class AsyncParsingClient:
1121
1161
  - gpt_4_o_mode: bool.
1122
1162
 
1123
1163
  - gpt_4_o_api_key: str.
1164
+
1165
+ - job_timeout_in_seconds: float.
1166
+
1167
+ - job_timeout_extra_time_per_page_in_seconds: float.
1168
+
1169
+ - strict_mode_image_extraction: bool.
1170
+
1171
+ - strict_mode_image_ocr: bool.
1172
+
1173
+ - strict_mode_reconstruction: bool.
1174
+
1175
+ - strict_mode_buggy_font: bool.
1176
+
1177
+ - ignore_document_elements_for_layout_detection: bool.
1178
+
1179
+ - output_tables_as_html: bool.
1124
1180
  """
1125
1181
  _request: typing.Dict[str, typing.Any] = {
1126
1182
  "annotate_links": annotate_links,
@@ -1182,6 +1238,14 @@ class AsyncParsingClient:
1182
1238
  "bounding_box": bounding_box,
1183
1239
  "gpt4o_mode": gpt_4_o_mode,
1184
1240
  "gpt4o_api_key": gpt_4_o_api_key,
1241
+ "job_timeout_in_seconds": job_timeout_in_seconds,
1242
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
1243
+ "strict_mode_image_extraction": strict_mode_image_extraction,
1244
+ "strict_mode_image_ocr": strict_mode_image_ocr,
1245
+ "strict_mode_reconstruction": strict_mode_reconstruction,
1246
+ "strict_mode_buggy_font": strict_mode_buggy_font,
1247
+ "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
1248
+ "output_tables_as_HTML": output_tables_as_html,
1185
1249
  }
1186
1250
  if file is not OMIT:
1187
1251
  _request["file"] = file
@@ -47,9 +47,10 @@ class ReportsClient:
47
47
  organization_id: typing.Optional[str] = None,
48
48
  name: str,
49
49
  template_text: str,
50
- template_instructions: str,
50
+ template_instructions: typing.Optional[str] = OMIT,
51
+ existing_retriever_id: typing.Optional[str] = OMIT,
51
52
  files: typing.List[str],
52
- template_file: typing.IO,
53
+ template_file: typing.Optional[str] = OMIT,
53
54
  ) -> ReportCreateResponse:
54
55
  """
55
56
  Create a new report.
@@ -63,25 +64,26 @@ class ReportsClient:
63
64
 
64
65
  - template_text: str.
65
66
 
66
- - template_instructions: str.
67
+ - template_instructions: typing.Optional[str].
68
+
69
+ - existing_retriever_id: typing.Optional[str].
67
70
 
68
71
  - files: typing.List[str].
69
72
 
70
- - template_file: typing.IO.
73
+ - template_file: typing.Optional[str].
71
74
  """
75
+ _request: typing.Dict[str, typing.Any] = {"name": name, "template_text": template_text, "files": files}
76
+ if template_instructions is not OMIT:
77
+ _request["template_instructions"] = template_instructions
78
+ if existing_retriever_id is not OMIT:
79
+ _request["existing_retriever_id"] = existing_retriever_id
80
+ if template_file is not OMIT:
81
+ _request["template_file"] = template_file
72
82
  _response = self._client_wrapper.httpx_client.request(
73
83
  "POST",
74
84
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/reports"),
75
85
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
76
- data=jsonable_encoder(
77
- {
78
- "name": name,
79
- "template_text": template_text,
80
- "template_instructions": template_instructions,
81
- "files": files,
82
- }
83
- ),
84
- files={"template_file": template_file},
86
+ json=jsonable_encoder(_request),
85
87
  headers=self._client_wrapper.get_headers(),
86
88
  timeout=60,
87
89
  )
@@ -628,9 +630,10 @@ class AsyncReportsClient:
628
630
  organization_id: typing.Optional[str] = None,
629
631
  name: str,
630
632
  template_text: str,
631
- template_instructions: str,
633
+ template_instructions: typing.Optional[str] = OMIT,
634
+ existing_retriever_id: typing.Optional[str] = OMIT,
632
635
  files: typing.List[str],
633
- template_file: typing.IO,
636
+ template_file: typing.Optional[str] = OMIT,
634
637
  ) -> ReportCreateResponse:
635
638
  """
636
639
  Create a new report.
@@ -644,25 +647,26 @@ class AsyncReportsClient:
644
647
 
645
648
  - template_text: str.
646
649
 
647
- - template_instructions: str.
650
+ - template_instructions: typing.Optional[str].
651
+
652
+ - existing_retriever_id: typing.Optional[str].
648
653
 
649
654
  - files: typing.List[str].
650
655
 
651
- - template_file: typing.IO.
656
+ - template_file: typing.Optional[str].
652
657
  """
658
+ _request: typing.Dict[str, typing.Any] = {"name": name, "template_text": template_text, "files": files}
659
+ if template_instructions is not OMIT:
660
+ _request["template_instructions"] = template_instructions
661
+ if existing_retriever_id is not OMIT:
662
+ _request["existing_retriever_id"] = existing_retriever_id
663
+ if template_file is not OMIT:
664
+ _request["template_file"] = template_file
653
665
  _response = await self._client_wrapper.httpx_client.request(
654
666
  "POST",
655
667
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/reports"),
656
668
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
657
- data=jsonable_encoder(
658
- {
659
- "name": name,
660
- "template_text": template_text,
661
- "template_instructions": template_instructions,
662
- "files": files,
663
- }
664
- ),
665
- files={"template_file": template_file},
669
+ json=jsonable_encoder(_request),
666
670
  headers=self._client_wrapper.get_headers(),
667
671
  timeout=60,
668
672
  )
@@ -33,12 +33,18 @@ class RetrieversClient:
33
33
  self._client_wrapper = client_wrapper
34
34
 
35
35
  def list_retrievers(
36
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
36
+ self,
37
+ *,
38
+ name: typing.Optional[str] = None,
39
+ project_id: typing.Optional[str] = None,
40
+ organization_id: typing.Optional[str] = None,
37
41
  ) -> typing.List[Retriever]:
38
42
  """
39
43
  List Retrievers for a project.
40
44
 
41
45
  Parameters:
46
+ - name: typing.Optional[str].
47
+
42
48
  - project_id: typing.Optional[str].
43
49
 
44
50
  - organization_id: typing.Optional[str].
@@ -53,7 +59,7 @@ class RetrieversClient:
53
59
  _response = self._client_wrapper.httpx_client.request(
54
60
  "GET",
55
61
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers"),
56
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
62
+ params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
57
63
  headers=self._client_wrapper.get_headers(),
58
64
  timeout=60,
59
65
  )
@@ -346,12 +352,18 @@ class AsyncRetrieversClient:
346
352
  self._client_wrapper = client_wrapper
347
353
 
348
354
  async def list_retrievers(
349
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
355
+ self,
356
+ *,
357
+ name: typing.Optional[str] = None,
358
+ project_id: typing.Optional[str] = None,
359
+ organization_id: typing.Optional[str] = None,
350
360
  ) -> typing.List[Retriever]:
351
361
  """
352
362
  List Retrievers for a project.
353
363
 
354
364
  Parameters:
365
+ - name: typing.Optional[str].
366
+
355
367
  - project_id: typing.Optional[str].
356
368
 
357
369
  - organization_id: typing.Optional[str].
@@ -366,7 +378,7 @@ class AsyncRetrieversClient:
366
378
  _response = await self._client_wrapper.httpx_client.request(
367
379
  "GET",
368
380
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers"),
369
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
381
+ params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
370
382
  headers=self._client_wrapper.get_headers(),
371
383
  timeout=60,
372
384
  )
@@ -110,7 +110,8 @@ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
110
110
  from .extract_config import ExtractConfig
111
111
  from .extract_job import ExtractJob
112
112
  from .extract_job_create import ExtractJobCreate
113
- from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
113
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
114
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
114
115
  from .extract_mode import ExtractMode
115
116
  from .extract_resultset import ExtractResultset
116
117
  from .extract_resultset_data import ExtractResultsetData
@@ -118,9 +119,13 @@ from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
118
119
  from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
119
120
  from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
120
121
  from .extract_run import ExtractRun
122
+ from .extract_run_data import ExtractRunData
123
+ from .extract_run_data_item_value import ExtractRunDataItemValue
121
124
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
122
- from .extract_run_data_value import ExtractRunDataValue
125
+ from .extract_run_data_zero_value import ExtractRunDataZeroValue
123
126
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
127
+ from .extract_schema_validate_response import ExtractSchemaValidateResponse
128
+ from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
124
129
  from .extract_state import ExtractState
125
130
  from .file import File
126
131
  from .file_permission_info_value import FilePermissionInfoValue
@@ -240,6 +245,7 @@ from .pipeline_type import PipelineType
240
245
  from .plan import Plan
241
246
  from .playground_session import PlaygroundSession
242
247
  from .pooling import Pooling
248
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
243
249
  from .preset_retrieval_params import PresetRetrievalParams
244
250
  from .presigned_url import PresignedUrl
245
251
  from .progress_event import ProgressEvent
@@ -263,6 +269,7 @@ from .report_event_item_event_data import (
263
269
  ReportEventItemEventData_ReportStateUpdate,
264
270
  )
265
271
  from .report_event_type import ReportEventType
272
+ from .report_file_info import ReportFileInfo
266
273
  from .report_metadata import ReportMetadata
267
274
  from .report_plan import ReportPlan
268
275
  from .report_plan_block import ReportPlanBlock
@@ -406,7 +413,8 @@ __all__ = [
406
413
  "ExtractConfig",
407
414
  "ExtractJob",
408
415
  "ExtractJobCreate",
409
- "ExtractJobCreateDataSchemaOverrideValue",
416
+ "ExtractJobCreateDataSchemaOverride",
417
+ "ExtractJobCreateDataSchemaOverrideZeroValue",
410
418
  "ExtractMode",
411
419
  "ExtractResultset",
412
420
  "ExtractResultsetData",
@@ -414,9 +422,13 @@ __all__ = [
414
422
  "ExtractResultsetDataZeroValue",
415
423
  "ExtractResultsetExtractionMetadataValue",
416
424
  "ExtractRun",
425
+ "ExtractRunData",
426
+ "ExtractRunDataItemValue",
417
427
  "ExtractRunDataSchemaValue",
418
- "ExtractRunDataValue",
428
+ "ExtractRunDataZeroValue",
419
429
  "ExtractRunExtractionMetadataValue",
430
+ "ExtractSchemaValidateResponse",
431
+ "ExtractSchemaValidateResponseDataSchemaValue",
420
432
  "ExtractState",
421
433
  "File",
422
434
  "FilePermissionInfoValue",
@@ -528,6 +540,7 @@ __all__ = [
528
540
  "Plan",
529
541
  "PlaygroundSession",
530
542
  "Pooling",
543
+ "PresetCompositeRetrievalParams",
531
544
  "PresetRetrievalParams",
532
545
  "PresignedUrl",
533
546
  "ProgressEvent",
@@ -549,6 +562,7 @@ __all__ = [
549
562
  "ReportEventItemEventData_ReportBlockUpdate",
550
563
  "ReportEventItemEventData_ReportStateUpdate",
551
564
  "ReportEventType",
565
+ "ReportFileInfo",
552
566
  "ReportMetadata",
553
567
  "ReportPlan",
554
568
  "ReportPlanBlock",
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .llm_parameters import LlmParameters
8
- from .preset_retrieval_params import PresetRetrievalParams
8
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -21,14 +21,16 @@ class ChatApp(pydantic.BaseModel):
21
21
  Schema for a chat app
22
22
  """
23
23
 
24
- id: str
25
- name: str
26
- pipeline_id: str
27
- project_id: str
28
- llm_config: LlmParameters
29
- retrieval_config: PresetRetrievalParams
30
- created_at: dt.datetime
31
- updated_at: dt.datetime
24
+ id: str = pydantic.Field(description="Unique identifier")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
+ name: str = pydantic.Field(description="Name of the chat app")
28
+ retriever_id: str = pydantic.Field(description="ID of the retriever to use for the chat app")
29
+ llm_config: LlmParameters = pydantic.Field(description="Configuration for the LLM model to use for the chat app")
30
+ retrieval_config: PresetCompositeRetrievalParams = pydantic.Field(
31
+ description="Configuration for the retrieval model to use for the chat app"
32
+ )
33
+ project_id: str = pydantic.Field(description="ID of the project the chat app belongs to")
32
34
 
33
35
  def json(self, **kwargs: typing.Any) -> str:
34
36
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .llm_parameters import LlmParameters
8
- from .preset_retrieval_params import PresetRetrievalParams
8
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -17,15 +17,17 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ChatAppResponse(pydantic.BaseModel):
20
- id: str
21
- name: str
22
- pipeline_id: str
23
- project_id: str
24
- llm_config: LlmParameters
25
- retrieval_config: PresetRetrievalParams
26
- created_at: dt.datetime
27
- updated_at: dt.datetime
28
- pipeline_name: str
20
+ id: str = pydantic.Field(description="Unique identifier")
21
+ created_at: typing.Optional[dt.datetime]
22
+ updated_at: typing.Optional[dt.datetime]
23
+ name: str = pydantic.Field(description="Name of the chat app")
24
+ retriever_id: str = pydantic.Field(description="ID of the retriever to use for the chat app")
25
+ llm_config: LlmParameters = pydantic.Field(description="Configuration for the LLM model to use for the chat app")
26
+ retrieval_config: PresetCompositeRetrievalParams = pydantic.Field(
27
+ description="Configuration for the retrieval model to use for the chat app"
28
+ )
29
+ project_id: str = pydantic.Field(description="ID of the project the chat app belongs to")
30
+ retriever_name: str
29
31
 
30
32
  def json(self, **kwargs: typing.Any) -> str:
31
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -35,6 +35,7 @@ class CloudMongoDbAtlasVectorSearch(pydantic.BaseModel):
35
35
  collection_name: str
36
36
  vector_index_name: typing.Optional[str]
37
37
  fulltext_index_name: typing.Optional[str]
38
+ embedding_dimension: typing.Optional[int]
38
39
  class_name: typing.Optional[str]
39
40
 
40
41
  def json(self, **kwargs: typing.Any) -> str:
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
- from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
8
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -23,7 +23,9 @@ class ExtractJobCreate(pydantic.BaseModel):
23
23
 
24
24
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
25
25
  file_id: str = pydantic.Field(description="The id of the file")
26
- data_schema_override: typing.Optional[typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideValue]]]
26
+ data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
27
+ description="The data schema to override the extraction agent's data schema with"
28
+ )
27
29
  config_override: typing.Optional[ExtractConfig]
28
30
 
29
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
6
+
7
+ ExtractJobCreateDataSchemaOverride = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractJobCreateDataSchemaOverrideValue = typing.Union[
5
+ ExtractJobCreateDataSchemaOverrideZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -5,8 +5,8 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
+ from .extract_run_data import ExtractRunData
8
9
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
9
- from .extract_run_data_value import ExtractRunDataValue
10
10
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
11
11
  from .extract_state import ExtractState
12
12
  from .file import File
@@ -37,7 +37,7 @@ class ExtractRun(pydantic.BaseModel):
37
37
  status: ExtractState = pydantic.Field(description="The status of the extraction run")
38
38
  error: typing.Optional[str]
39
39
  job_id: typing.Optional[str]
40
- data: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunDataValue]]]
40
+ data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
41
  extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
42
42
 
43
43
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,11 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_run_data_item_value import ExtractRunDataItemValue
6
+ from .extract_run_data_zero_value import ExtractRunDataZeroValue
7
+
8
+ ExtractRunData = typing.Union[
9
+ typing.Dict[str, typing.Optional[ExtractRunDataZeroValue]],
10
+ typing.List[typing.Dict[str, typing.Optional[ExtractRunDataItemValue]]],
11
+ ]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractRunDataItemValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractRunDataZeroValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]