llama-cloud 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (46) hide show
  1. llama_cloud/__init__.py +34 -8
  2. llama_cloud/resources/__init__.py +14 -3
  3. llama_cloud/resources/chat_apps/client.py +99 -133
  4. llama_cloud/resources/llama_extract/__init__.py +16 -2
  5. llama_cloud/resources/llama_extract/client.py +328 -122
  6. llama_cloud/resources/llama_extract/types/__init__.py +14 -3
  7. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
  8. llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
  9. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/{extract_agent_update_data_schema_value.py → extract_agent_update_data_schema_zero_value.py} +1 -1
  11. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
  12. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +8 -12
  14. llama_cloud/resources/parsing/client.py +244 -0
  15. llama_cloud/resources/reports/client.py +30 -26
  16. llama_cloud/resources/retrievers/client.py +16 -4
  17. llama_cloud/types/__init__.py +22 -4
  18. llama_cloud/types/chat_app.py +11 -9
  19. llama_cloud/types/chat_app_response.py +12 -10
  20. llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
  21. llama_cloud/types/edit_suggestion.py +3 -4
  22. llama_cloud/types/edit_suggestion_blocks_item.py +8 -0
  23. llama_cloud/types/extract_config.py +2 -0
  24. llama_cloud/types/extract_job_create.py +4 -2
  25. llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
  26. llama_cloud/types/{extract_job_create_data_schema_override_value.py → extract_job_create_data_schema_override_zero_value.py} +1 -1
  27. llama_cloud/types/extract_mode.py +7 -7
  28. llama_cloud/types/extract_run.py +2 -2
  29. llama_cloud/types/extract_run_data.py +11 -0
  30. llama_cloud/types/extract_run_data_item_value.py +5 -0
  31. llama_cloud/types/extract_run_data_zero_value.py +5 -0
  32. llama_cloud/types/extract_schema_validate_response.py +32 -0
  33. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
  34. llama_cloud/types/extract_target.py +17 -0
  35. llama_cloud/types/llama_extract_settings.py +8 -1
  36. llama_cloud/types/llama_parse_parameters.py +9 -0
  37. llama_cloud/types/plan.py +4 -0
  38. llama_cloud/types/preset_composite_retrieval_params.py +35 -0
  39. llama_cloud/types/report_file_info.py +37 -0
  40. llama_cloud/types/report_metadata.py +2 -1
  41. llama_cloud/types/supported_llm_model_names.py +28 -4
  42. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/METADATA +1 -1
  43. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/RECORD +45 -32
  44. llama_cloud/types/extract_run_data_value.py +0 -5
  45. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/LICENSE +0 -0
  46. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/WHEEL +0 -0
@@ -1,6 +1,17 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .extract_agent_create_data_schema_value import ExtractAgentCreateDataSchemaValue
4
- from .extract_agent_update_data_schema_value import ExtractAgentUpdateDataSchemaValue
3
+ from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
4
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
5
+ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
6
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
7
+ from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
8
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
5
9
 
6
- __all__ = ["ExtractAgentCreateDataSchemaValue", "ExtractAgentUpdateDataSchemaValue"]
10
+ __all__ = [
11
+ "ExtractAgentCreateDataSchema",
12
+ "ExtractAgentCreateDataSchemaZeroValue",
13
+ "ExtractAgentUpdateDataSchema",
14
+ "ExtractAgentUpdateDataSchemaZeroValue",
15
+ "ExtractSchemaValidateRequestDataSchema",
16
+ "ExtractSchemaValidateRequestDataSchemaZeroValue",
17
+ ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_agent_create_data_schema_zero_value import ExtractAgentCreateDataSchemaZeroValue
6
+
7
+ ExtractAgentCreateDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractAgentCreateDataSchemaZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractAgentCreateDataSchemaValue = typing.Union[
5
+ ExtractAgentCreateDataSchemaZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
6
+
7
+ ExtractAgentUpdateDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractAgentUpdateDataSchemaZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractAgentUpdateDataSchemaValue = typing.Union[
5
+ ExtractAgentUpdateDataSchemaZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
6
+
7
+ ExtractSchemaValidateRequestDataSchema = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractSchemaValidateRequestDataSchemaZeroValue]], str
9
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractSchemaValidateRequestDataSchemaZeroValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -229,14 +229,14 @@ class OrganizationsClient:
229
229
  raise ApiError(status_code=_response.status_code, body=_response.text)
230
230
  raise ApiError(status_code=_response.status_code, body=_response_json)
231
231
 
232
- def update_organization(self, organization_id: str, *, name: typing.Optional[str] = OMIT) -> Organization:
232
+ def update_organization(self, organization_id: str, *, name: str) -> Organization:
233
233
  """
234
234
  Update an existing organization.
235
235
 
236
236
  Parameters:
237
237
  - organization_id: str.
238
238
 
239
- - name: typing.Optional[str].
239
+ - name: str. A name for the organization.
240
240
  ---
241
241
  from llama_cloud.client import LlamaCloud
242
242
 
@@ -245,15 +245,13 @@ class OrganizationsClient:
245
245
  )
246
246
  client.organizations.update_organization(
247
247
  organization_id="string",
248
+ name="string",
248
249
  )
249
250
  """
250
- _request: typing.Dict[str, typing.Any] = {}
251
- if name is not OMIT:
252
- _request["name"] = name
253
251
  _response = self._client_wrapper.httpx_client.request(
254
252
  "PUT",
255
253
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
256
- json=jsonable_encoder(_request),
254
+ json=jsonable_encoder({"name": name}),
257
255
  headers=self._client_wrapper.get_headers(),
258
256
  timeout=60,
259
257
  )
@@ -922,14 +920,14 @@ class AsyncOrganizationsClient:
922
920
  raise ApiError(status_code=_response.status_code, body=_response.text)
923
921
  raise ApiError(status_code=_response.status_code, body=_response_json)
924
922
 
925
- async def update_organization(self, organization_id: str, *, name: typing.Optional[str] = OMIT) -> Organization:
923
+ async def update_organization(self, organization_id: str, *, name: str) -> Organization:
926
924
  """
927
925
  Update an existing organization.
928
926
 
929
927
  Parameters:
930
928
  - organization_id: str.
931
929
 
932
- - name: typing.Optional[str].
930
+ - name: str. A name for the organization.
933
931
  ---
934
932
  from llama_cloud.client import AsyncLlamaCloud
935
933
 
@@ -938,15 +936,13 @@ class AsyncOrganizationsClient:
938
936
  )
939
937
  await client.organizations.update_organization(
940
938
  organization_id="string",
939
+ name="string",
941
940
  )
942
941
  """
943
- _request: typing.Dict[str, typing.Any] = {}
944
- if name is not OMIT:
945
- _request["name"] = name
946
942
  _response = await self._client_wrapper.httpx_client.request(
947
943
  "PUT",
948
944
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
949
- json=jsonable_encoder(_request),
945
+ json=jsonable_encoder({"name": name}),
950
946
  headers=self._client_wrapper.get_headers(),
951
947
  timeout=60,
952
948
  )
@@ -100,6 +100,96 @@ class ParsingClient:
100
100
  raise ApiError(status_code=_response.status_code, body=_response.text)
101
101
  raise ApiError(status_code=_response.status_code, body=_response_json)
102
102
 
103
+ def screenshot(
104
+ self,
105
+ *,
106
+ organization_id: typing.Optional[str] = None,
107
+ project_id: typing.Optional[str] = None,
108
+ file: typing.Optional[str] = OMIT,
109
+ do_not_cache: bool,
110
+ http_proxy: str,
111
+ input_s_3_path: str,
112
+ input_s_3_region: str,
113
+ input_url: str,
114
+ invalidate_cache: bool,
115
+ max_pages: typing.Optional[int] = OMIT,
116
+ output_s_3_path_prefix: str,
117
+ output_s_3_region: str,
118
+ target_pages: str,
119
+ webhook_url: str,
120
+ job_timeout_in_seconds: float,
121
+ job_timeout_extra_time_per_page_in_seconds: float,
122
+ ) -> ParsingJob:
123
+ """
124
+ Parameters:
125
+ - organization_id: typing.Optional[str].
126
+
127
+ - project_id: typing.Optional[str].
128
+
129
+ - file: typing.Optional[str].
130
+
131
+ - do_not_cache: bool.
132
+
133
+ - http_proxy: str.
134
+
135
+ - input_s_3_path: str.
136
+
137
+ - input_s_3_region: str.
138
+
139
+ - input_url: str.
140
+
141
+ - invalidate_cache: bool.
142
+
143
+ - max_pages: typing.Optional[int].
144
+
145
+ - output_s_3_path_prefix: str.
146
+
147
+ - output_s_3_region: str.
148
+
149
+ - target_pages: str.
150
+
151
+ - webhook_url: str.
152
+
153
+ - job_timeout_in_seconds: float.
154
+
155
+ - job_timeout_extra_time_per_page_in_seconds: float.
156
+ """
157
+ _request: typing.Dict[str, typing.Any] = {
158
+ "do_not_cache": do_not_cache,
159
+ "http_proxy": http_proxy,
160
+ "input_s3_path": input_s_3_path,
161
+ "input_s3_region": input_s_3_region,
162
+ "input_url": input_url,
163
+ "invalidate_cache": invalidate_cache,
164
+ "output_s3_path_prefix": output_s_3_path_prefix,
165
+ "output_s3_region": output_s_3_region,
166
+ "target_pages": target_pages,
167
+ "webhook_url": webhook_url,
168
+ "job_timeout_in_seconds": job_timeout_in_seconds,
169
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
170
+ }
171
+ if file is not OMIT:
172
+ _request["file"] = file
173
+ if max_pages is not OMIT:
174
+ _request["max_pages"] = max_pages
175
+ _response = self._client_wrapper.httpx_client.request(
176
+ "POST",
177
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
178
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
179
+ json=jsonable_encoder(_request),
180
+ headers=self._client_wrapper.get_headers(),
181
+ timeout=60,
182
+ )
183
+ if 200 <= _response.status_code < 300:
184
+ return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
185
+ if _response.status_code == 422:
186
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
187
+ try:
188
+ _response_json = _response.json()
189
+ except JSONDecodeError:
190
+ raise ApiError(status_code=_response.status_code, body=_response.text)
191
+ raise ApiError(status_code=_response.status_code, body=_response_json)
192
+
103
193
  def upload_file(
104
194
  self,
105
195
  *,
@@ -166,6 +256,14 @@ class ParsingClient:
166
256
  bounding_box: str,
167
257
  gpt_4_o_mode: bool,
168
258
  gpt_4_o_api_key: str,
259
+ job_timeout_in_seconds: float,
260
+ job_timeout_extra_time_per_page_in_seconds: float,
261
+ strict_mode_image_extraction: bool,
262
+ strict_mode_image_ocr: bool,
263
+ strict_mode_reconstruction: bool,
264
+ strict_mode_buggy_font: bool,
265
+ ignore_document_elements_for_layout_detection: bool,
266
+ output_tables_as_html: bool,
169
267
  ) -> ParsingJob:
170
268
  """
171
269
  Upload a file to s3 and create a job. return a job id
@@ -296,6 +394,22 @@ class ParsingClient:
296
394
  - gpt_4_o_mode: bool.
297
395
 
298
396
  - gpt_4_o_api_key: str.
397
+
398
+ - job_timeout_in_seconds: float.
399
+
400
+ - job_timeout_extra_time_per_page_in_seconds: float.
401
+
402
+ - strict_mode_image_extraction: bool.
403
+
404
+ - strict_mode_image_ocr: bool.
405
+
406
+ - strict_mode_reconstruction: bool.
407
+
408
+ - strict_mode_buggy_font: bool.
409
+
410
+ - ignore_document_elements_for_layout_detection: bool.
411
+
412
+ - output_tables_as_html: bool.
299
413
  """
300
414
  _request: typing.Dict[str, typing.Any] = {
301
415
  "annotate_links": annotate_links,
@@ -357,6 +471,14 @@ class ParsingClient:
357
471
  "bounding_box": bounding_box,
358
472
  "gpt4o_mode": gpt_4_o_mode,
359
473
  "gpt4o_api_key": gpt_4_o_api_key,
474
+ "job_timeout_in_seconds": job_timeout_in_seconds,
475
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
476
+ "strict_mode_image_extraction": strict_mode_image_extraction,
477
+ "strict_mode_image_ocr": strict_mode_image_ocr,
478
+ "strict_mode_reconstruction": strict_mode_reconstruction,
479
+ "strict_mode_buggy_font": strict_mode_buggy_font,
480
+ "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
481
+ "output_tables_as_HTML": output_tables_as_html,
360
482
  }
361
483
  if file is not OMIT:
362
484
  _request["file"] = file
@@ -925,6 +1047,96 @@ class AsyncParsingClient:
925
1047
  raise ApiError(status_code=_response.status_code, body=_response.text)
926
1048
  raise ApiError(status_code=_response.status_code, body=_response_json)
927
1049
 
1050
+ async def screenshot(
1051
+ self,
1052
+ *,
1053
+ organization_id: typing.Optional[str] = None,
1054
+ project_id: typing.Optional[str] = None,
1055
+ file: typing.Optional[str] = OMIT,
1056
+ do_not_cache: bool,
1057
+ http_proxy: str,
1058
+ input_s_3_path: str,
1059
+ input_s_3_region: str,
1060
+ input_url: str,
1061
+ invalidate_cache: bool,
1062
+ max_pages: typing.Optional[int] = OMIT,
1063
+ output_s_3_path_prefix: str,
1064
+ output_s_3_region: str,
1065
+ target_pages: str,
1066
+ webhook_url: str,
1067
+ job_timeout_in_seconds: float,
1068
+ job_timeout_extra_time_per_page_in_seconds: float,
1069
+ ) -> ParsingJob:
1070
+ """
1071
+ Parameters:
1072
+ - organization_id: typing.Optional[str].
1073
+
1074
+ - project_id: typing.Optional[str].
1075
+
1076
+ - file: typing.Optional[str].
1077
+
1078
+ - do_not_cache: bool.
1079
+
1080
+ - http_proxy: str.
1081
+
1082
+ - input_s_3_path: str.
1083
+
1084
+ - input_s_3_region: str.
1085
+
1086
+ - input_url: str.
1087
+
1088
+ - invalidate_cache: bool.
1089
+
1090
+ - max_pages: typing.Optional[int].
1091
+
1092
+ - output_s_3_path_prefix: str.
1093
+
1094
+ - output_s_3_region: str.
1095
+
1096
+ - target_pages: str.
1097
+
1098
+ - webhook_url: str.
1099
+
1100
+ - job_timeout_in_seconds: float.
1101
+
1102
+ - job_timeout_extra_time_per_page_in_seconds: float.
1103
+ """
1104
+ _request: typing.Dict[str, typing.Any] = {
1105
+ "do_not_cache": do_not_cache,
1106
+ "http_proxy": http_proxy,
1107
+ "input_s3_path": input_s_3_path,
1108
+ "input_s3_region": input_s_3_region,
1109
+ "input_url": input_url,
1110
+ "invalidate_cache": invalidate_cache,
1111
+ "output_s3_path_prefix": output_s_3_path_prefix,
1112
+ "output_s3_region": output_s_3_region,
1113
+ "target_pages": target_pages,
1114
+ "webhook_url": webhook_url,
1115
+ "job_timeout_in_seconds": job_timeout_in_seconds,
1116
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
1117
+ }
1118
+ if file is not OMIT:
1119
+ _request["file"] = file
1120
+ if max_pages is not OMIT:
1121
+ _request["max_pages"] = max_pages
1122
+ _response = await self._client_wrapper.httpx_client.request(
1123
+ "POST",
1124
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
1125
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
1126
+ json=jsonable_encoder(_request),
1127
+ headers=self._client_wrapper.get_headers(),
1128
+ timeout=60,
1129
+ )
1130
+ if 200 <= _response.status_code < 300:
1131
+ return pydantic.parse_obj_as(ParsingJob, _response.json()) # type: ignore
1132
+ if _response.status_code == 422:
1133
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1134
+ try:
1135
+ _response_json = _response.json()
1136
+ except JSONDecodeError:
1137
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1138
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1139
+
928
1140
  async def upload_file(
929
1141
  self,
930
1142
  *,
@@ -991,6 +1203,14 @@ class AsyncParsingClient:
991
1203
  bounding_box: str,
992
1204
  gpt_4_o_mode: bool,
993
1205
  gpt_4_o_api_key: str,
1206
+ job_timeout_in_seconds: float,
1207
+ job_timeout_extra_time_per_page_in_seconds: float,
1208
+ strict_mode_image_extraction: bool,
1209
+ strict_mode_image_ocr: bool,
1210
+ strict_mode_reconstruction: bool,
1211
+ strict_mode_buggy_font: bool,
1212
+ ignore_document_elements_for_layout_detection: bool,
1213
+ output_tables_as_html: bool,
994
1214
  ) -> ParsingJob:
995
1215
  """
996
1216
  Upload a file to s3 and create a job. return a job id
@@ -1121,6 +1341,22 @@ class AsyncParsingClient:
1121
1341
  - gpt_4_o_mode: bool.
1122
1342
 
1123
1343
  - gpt_4_o_api_key: str.
1344
+
1345
+ - job_timeout_in_seconds: float.
1346
+
1347
+ - job_timeout_extra_time_per_page_in_seconds: float.
1348
+
1349
+ - strict_mode_image_extraction: bool.
1350
+
1351
+ - strict_mode_image_ocr: bool.
1352
+
1353
+ - strict_mode_reconstruction: bool.
1354
+
1355
+ - strict_mode_buggy_font: bool.
1356
+
1357
+ - ignore_document_elements_for_layout_detection: bool.
1358
+
1359
+ - output_tables_as_html: bool.
1124
1360
  """
1125
1361
  _request: typing.Dict[str, typing.Any] = {
1126
1362
  "annotate_links": annotate_links,
@@ -1182,6 +1418,14 @@ class AsyncParsingClient:
1182
1418
  "bounding_box": bounding_box,
1183
1419
  "gpt4o_mode": gpt_4_o_mode,
1184
1420
  "gpt4o_api_key": gpt_4_o_api_key,
1421
+ "job_timeout_in_seconds": job_timeout_in_seconds,
1422
+ "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
1423
+ "strict_mode_image_extraction": strict_mode_image_extraction,
1424
+ "strict_mode_image_ocr": strict_mode_image_ocr,
1425
+ "strict_mode_reconstruction": strict_mode_reconstruction,
1426
+ "strict_mode_buggy_font": strict_mode_buggy_font,
1427
+ "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
1428
+ "output_tables_as_HTML": output_tables_as_html,
1185
1429
  }
1186
1430
  if file is not OMIT:
1187
1431
  _request["file"] = file
@@ -47,9 +47,10 @@ class ReportsClient:
47
47
  organization_id: typing.Optional[str] = None,
48
48
  name: str,
49
49
  template_text: str,
50
- template_instructions: str,
50
+ template_instructions: typing.Optional[str] = OMIT,
51
+ existing_retriever_id: typing.Optional[str] = OMIT,
51
52
  files: typing.List[str],
52
- template_file: typing.IO,
53
+ template_file: typing.Optional[str] = OMIT,
53
54
  ) -> ReportCreateResponse:
54
55
  """
55
56
  Create a new report.
@@ -63,25 +64,26 @@ class ReportsClient:
63
64
 
64
65
  - template_text: str.
65
66
 
66
- - template_instructions: str.
67
+ - template_instructions: typing.Optional[str].
68
+
69
+ - existing_retriever_id: typing.Optional[str].
67
70
 
68
71
  - files: typing.List[str].
69
72
 
70
- - template_file: typing.IO.
73
+ - template_file: typing.Optional[str].
71
74
  """
75
+ _request: typing.Dict[str, typing.Any] = {"name": name, "template_text": template_text, "files": files}
76
+ if template_instructions is not OMIT:
77
+ _request["template_instructions"] = template_instructions
78
+ if existing_retriever_id is not OMIT:
79
+ _request["existing_retriever_id"] = existing_retriever_id
80
+ if template_file is not OMIT:
81
+ _request["template_file"] = template_file
72
82
  _response = self._client_wrapper.httpx_client.request(
73
83
  "POST",
74
84
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/reports"),
75
85
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
76
- data=jsonable_encoder(
77
- {
78
- "name": name,
79
- "template_text": template_text,
80
- "template_instructions": template_instructions,
81
- "files": files,
82
- }
83
- ),
84
- files={"template_file": template_file},
86
+ json=jsonable_encoder(_request),
85
87
  headers=self._client_wrapper.get_headers(),
86
88
  timeout=60,
87
89
  )
@@ -628,9 +630,10 @@ class AsyncReportsClient:
628
630
  organization_id: typing.Optional[str] = None,
629
631
  name: str,
630
632
  template_text: str,
631
- template_instructions: str,
633
+ template_instructions: typing.Optional[str] = OMIT,
634
+ existing_retriever_id: typing.Optional[str] = OMIT,
632
635
  files: typing.List[str],
633
- template_file: typing.IO,
636
+ template_file: typing.Optional[str] = OMIT,
634
637
  ) -> ReportCreateResponse:
635
638
  """
636
639
  Create a new report.
@@ -644,25 +647,26 @@ class AsyncReportsClient:
644
647
 
645
648
  - template_text: str.
646
649
 
647
- - template_instructions: str.
650
+ - template_instructions: typing.Optional[str].
651
+
652
+ - existing_retriever_id: typing.Optional[str].
648
653
 
649
654
  - files: typing.List[str].
650
655
 
651
- - template_file: typing.IO.
656
+ - template_file: typing.Optional[str].
652
657
  """
658
+ _request: typing.Dict[str, typing.Any] = {"name": name, "template_text": template_text, "files": files}
659
+ if template_instructions is not OMIT:
660
+ _request["template_instructions"] = template_instructions
661
+ if existing_retriever_id is not OMIT:
662
+ _request["existing_retriever_id"] = existing_retriever_id
663
+ if template_file is not OMIT:
664
+ _request["template_file"] = template_file
653
665
  _response = await self._client_wrapper.httpx_client.request(
654
666
  "POST",
655
667
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/reports"),
656
668
  params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
657
- data=jsonable_encoder(
658
- {
659
- "name": name,
660
- "template_text": template_text,
661
- "template_instructions": template_instructions,
662
- "files": files,
663
- }
664
- ),
665
- files={"template_file": template_file},
669
+ json=jsonable_encoder(_request),
666
670
  headers=self._client_wrapper.get_headers(),
667
671
  timeout=60,
668
672
  )
@@ -33,12 +33,18 @@ class RetrieversClient:
33
33
  self._client_wrapper = client_wrapper
34
34
 
35
35
  def list_retrievers(
36
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
36
+ self,
37
+ *,
38
+ name: typing.Optional[str] = None,
39
+ project_id: typing.Optional[str] = None,
40
+ organization_id: typing.Optional[str] = None,
37
41
  ) -> typing.List[Retriever]:
38
42
  """
39
43
  List Retrievers for a project.
40
44
 
41
45
  Parameters:
46
+ - name: typing.Optional[str].
47
+
42
48
  - project_id: typing.Optional[str].
43
49
 
44
50
  - organization_id: typing.Optional[str].
@@ -53,7 +59,7 @@ class RetrieversClient:
53
59
  _response = self._client_wrapper.httpx_client.request(
54
60
  "GET",
55
61
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers"),
56
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
62
+ params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
57
63
  headers=self._client_wrapper.get_headers(),
58
64
  timeout=60,
59
65
  )
@@ -346,12 +352,18 @@ class AsyncRetrieversClient:
346
352
  self._client_wrapper = client_wrapper
347
353
 
348
354
  async def list_retrievers(
349
- self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
355
+ self,
356
+ *,
357
+ name: typing.Optional[str] = None,
358
+ project_id: typing.Optional[str] = None,
359
+ organization_id: typing.Optional[str] = None,
350
360
  ) -> typing.List[Retriever]:
351
361
  """
352
362
  List Retrievers for a project.
353
363
 
354
364
  Parameters:
365
+ - name: typing.Optional[str].
366
+
355
367
  - project_id: typing.Optional[str].
356
368
 
357
369
  - organization_id: typing.Optional[str].
@@ -366,7 +378,7 @@ class AsyncRetrieversClient:
366
378
  _response = await self._client_wrapper.httpx_client.request(
367
379
  "GET",
368
380
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers"),
369
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
381
+ params=remove_none_from_dict({"name": name, "project_id": project_id, "organization_id": organization_id}),
370
382
  headers=self._client_wrapper.get_headers(),
371
383
  timeout=60,
372
384
  )