llama-cloud 0.1.34__py3-none-any.whl → 0.1.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +54 -2
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +6 -0
- llama_cloud/resources/admin/client.py +51 -0
- llama_cloud/resources/beta/client.py +211 -8
- llama_cloud/resources/classifier/client.py +231 -181
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +2 -0
- llama_cloud/resources/files/client.py +226 -0
- llama_cloud/resources/llama_extract/__init__.py +4 -0
- llama_cloud/resources/llama_extract/client.py +275 -4
- llama_cloud/resources/llama_extract/types/__init__.py +4 -0
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py +9 -0
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py +7 -0
- llama_cloud/resources/parsing/client.py +24 -0
- llama_cloud/resources/users/__init__.py +2 -0
- llama_cloud/resources/users/client.py +155 -0
- llama_cloud/types/__init__.py +48 -2
- llama_cloud/types/classification_result.py +4 -5
- llama_cloud/types/classifier_rule.py +43 -0
- llama_cloud/types/classify_job.py +45 -0
- llama_cloud/types/{classify_response.py → classify_job_results.py} +3 -6
- llama_cloud/types/classify_job_with_status.py +47 -0
- llama_cloud/types/classify_parsing_configuration.py +38 -0
- llama_cloud/types/cloud_astra_db_vector_store.py +51 -0
- llama_cloud/types/cloud_confluence_data_source.py +15 -0
- llama_cloud/types/configurable_data_sink_names.py +4 -0
- llama_cloud/types/data_sink_component.py +2 -0
- llama_cloud/types/data_sink_create_component.py +2 -0
- llama_cloud/types/data_source_reader_version_metadata.py +2 -1
- llama_cloud/types/data_source_reader_version_metadata_reader_version.py +17 -0
- llama_cloud/types/extract_agent.py +3 -0
- llama_cloud/types/extract_config.py +4 -0
- llama_cloud/types/failure_handling_config.py +37 -0
- llama_cloud/types/file_classification.py +41 -0
- llama_cloud/types/file_data.py +36 -0
- llama_cloud/types/file_store_info_response.py +34 -0
- llama_cloud/types/file_store_info_response_status.py +25 -0
- llama_cloud/types/legacy_parse_job_config.py +3 -0
- llama_cloud/types/llama_extract_settings.py +4 -0
- llama_cloud/types/llama_parse_parameters.py +3 -0
- llama_cloud/types/managed_open_ai_embedding.py +36 -0
- llama_cloud/types/managed_open_ai_embedding_config.py +34 -0
- llama_cloud/types/multimodal_parse_resolution.py +17 -0
- llama_cloud/types/paginated_response_quota_configuration.py +36 -0
- llama_cloud/types/parse_job_config.py +3 -0
- llama_cloud/types/pipeline_embedding_config.py +11 -0
- llama_cloud/types/quota_configuration.py +53 -0
- llama_cloud/types/quota_configuration_configuration_type.py +33 -0
- llama_cloud/types/quota_configuration_status.py +21 -0
- llama_cloud/types/quota_rate_limit_configuration_value.py +38 -0
- llama_cloud/types/quota_rate_limit_configuration_value_denominator_units.py +29 -0
- llama_cloud/types/supported_llm_model_names.py +12 -0
- llama_cloud/types/update_user_response.py +33 -0
- llama_cloud/types/usage_response_active_alerts_item.py +4 -0
- llama_cloud/types/user_summary.py +38 -0
- llama_cloud/types/webhook_configuration_webhook_events_item.py +20 -0
- {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.36.dist-info}/METADATA +2 -4
- {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.36.dist-info}/RECORD +60 -34
- {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.36.dist-info}/WHEEL +1 -1
- {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.36.dist-info}/LICENSE +0 -0
|
@@ -118,6 +118,7 @@ class ParsingClient:
|
|
|
118
118
|
output_s_3_region: str,
|
|
119
119
|
target_pages: str,
|
|
120
120
|
webhook_url: str,
|
|
121
|
+
webhook_configurations: str,
|
|
121
122
|
job_timeout_in_seconds: float,
|
|
122
123
|
job_timeout_extra_time_per_page_in_seconds: float,
|
|
123
124
|
) -> ParsingJob:
|
|
@@ -151,6 +152,8 @@ class ParsingClient:
|
|
|
151
152
|
|
|
152
153
|
- webhook_url: str.
|
|
153
154
|
|
|
155
|
+
- webhook_configurations: str.
|
|
156
|
+
|
|
154
157
|
- job_timeout_in_seconds: float.
|
|
155
158
|
|
|
156
159
|
- job_timeout_extra_time_per_page_in_seconds: float.
|
|
@@ -166,6 +169,7 @@ class ParsingClient:
|
|
|
166
169
|
"output_s3_region": output_s_3_region,
|
|
167
170
|
"target_pages": target_pages,
|
|
168
171
|
"webhook_url": webhook_url,
|
|
172
|
+
"webhook_configurations": webhook_configurations,
|
|
169
173
|
"job_timeout_in_seconds": job_timeout_in_seconds,
|
|
170
174
|
"job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
|
|
171
175
|
}
|
|
@@ -242,6 +246,7 @@ class ParsingClient:
|
|
|
242
246
|
page_separator: str,
|
|
243
247
|
page_suffix: str,
|
|
244
248
|
preserve_layout_alignment_across_pages: bool,
|
|
249
|
+
preserve_very_small_text: bool,
|
|
245
250
|
skip_diagonal_text: bool,
|
|
246
251
|
spreadsheet_extract_sub_tables: bool,
|
|
247
252
|
structured_output: bool,
|
|
@@ -253,6 +258,7 @@ class ParsingClient:
|
|
|
253
258
|
vendor_multimodal_model_name: str,
|
|
254
259
|
model: str,
|
|
255
260
|
webhook_url: str,
|
|
261
|
+
webhook_configurations: str,
|
|
256
262
|
preset: str,
|
|
257
263
|
parse_mode: typing.Optional[ParsingMode] = OMIT,
|
|
258
264
|
page_error_tolerance: float,
|
|
@@ -389,6 +395,8 @@ class ParsingClient:
|
|
|
389
395
|
|
|
390
396
|
- preserve_layout_alignment_across_pages: bool.
|
|
391
397
|
|
|
398
|
+
- preserve_very_small_text: bool.
|
|
399
|
+
|
|
392
400
|
- skip_diagonal_text: bool.
|
|
393
401
|
|
|
394
402
|
- spreadsheet_extract_sub_tables: bool.
|
|
@@ -411,6 +419,8 @@ class ParsingClient:
|
|
|
411
419
|
|
|
412
420
|
- webhook_url: str.
|
|
413
421
|
|
|
422
|
+
- webhook_configurations: str.
|
|
423
|
+
|
|
414
424
|
- preset: str.
|
|
415
425
|
|
|
416
426
|
- parse_mode: typing.Optional[ParsingMode].
|
|
@@ -530,6 +540,7 @@ class ParsingClient:
|
|
|
530
540
|
"page_separator": page_separator,
|
|
531
541
|
"page_suffix": page_suffix,
|
|
532
542
|
"preserve_layout_alignment_across_pages": preserve_layout_alignment_across_pages,
|
|
543
|
+
"preserve_very_small_text": preserve_very_small_text,
|
|
533
544
|
"skip_diagonal_text": skip_diagonal_text,
|
|
534
545
|
"spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
|
|
535
546
|
"structured_output": structured_output,
|
|
@@ -541,6 +552,7 @@ class ParsingClient:
|
|
|
541
552
|
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
542
553
|
"model": model,
|
|
543
554
|
"webhook_url": webhook_url,
|
|
555
|
+
"webhook_configurations": webhook_configurations,
|
|
544
556
|
"preset": preset,
|
|
545
557
|
"page_error_tolerance": page_error_tolerance,
|
|
546
558
|
"replace_failed_page_with_error_message_prefix": replace_failed_page_with_error_message_prefix,
|
|
@@ -1278,6 +1290,7 @@ class AsyncParsingClient:
|
|
|
1278
1290
|
output_s_3_region: str,
|
|
1279
1291
|
target_pages: str,
|
|
1280
1292
|
webhook_url: str,
|
|
1293
|
+
webhook_configurations: str,
|
|
1281
1294
|
job_timeout_in_seconds: float,
|
|
1282
1295
|
job_timeout_extra_time_per_page_in_seconds: float,
|
|
1283
1296
|
) -> ParsingJob:
|
|
@@ -1311,6 +1324,8 @@ class AsyncParsingClient:
|
|
|
1311
1324
|
|
|
1312
1325
|
- webhook_url: str.
|
|
1313
1326
|
|
|
1327
|
+
- webhook_configurations: str.
|
|
1328
|
+
|
|
1314
1329
|
- job_timeout_in_seconds: float.
|
|
1315
1330
|
|
|
1316
1331
|
- job_timeout_extra_time_per_page_in_seconds: float.
|
|
@@ -1326,6 +1341,7 @@ class AsyncParsingClient:
|
|
|
1326
1341
|
"output_s3_region": output_s_3_region,
|
|
1327
1342
|
"target_pages": target_pages,
|
|
1328
1343
|
"webhook_url": webhook_url,
|
|
1344
|
+
"webhook_configurations": webhook_configurations,
|
|
1329
1345
|
"job_timeout_in_seconds": job_timeout_in_seconds,
|
|
1330
1346
|
"job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
|
|
1331
1347
|
}
|
|
@@ -1402,6 +1418,7 @@ class AsyncParsingClient:
|
|
|
1402
1418
|
page_separator: str,
|
|
1403
1419
|
page_suffix: str,
|
|
1404
1420
|
preserve_layout_alignment_across_pages: bool,
|
|
1421
|
+
preserve_very_small_text: bool,
|
|
1405
1422
|
skip_diagonal_text: bool,
|
|
1406
1423
|
spreadsheet_extract_sub_tables: bool,
|
|
1407
1424
|
structured_output: bool,
|
|
@@ -1413,6 +1430,7 @@ class AsyncParsingClient:
|
|
|
1413
1430
|
vendor_multimodal_model_name: str,
|
|
1414
1431
|
model: str,
|
|
1415
1432
|
webhook_url: str,
|
|
1433
|
+
webhook_configurations: str,
|
|
1416
1434
|
preset: str,
|
|
1417
1435
|
parse_mode: typing.Optional[ParsingMode] = OMIT,
|
|
1418
1436
|
page_error_tolerance: float,
|
|
@@ -1549,6 +1567,8 @@ class AsyncParsingClient:
|
|
|
1549
1567
|
|
|
1550
1568
|
- preserve_layout_alignment_across_pages: bool.
|
|
1551
1569
|
|
|
1570
|
+
- preserve_very_small_text: bool.
|
|
1571
|
+
|
|
1552
1572
|
- skip_diagonal_text: bool.
|
|
1553
1573
|
|
|
1554
1574
|
- spreadsheet_extract_sub_tables: bool.
|
|
@@ -1571,6 +1591,8 @@ class AsyncParsingClient:
|
|
|
1571
1591
|
|
|
1572
1592
|
- webhook_url: str.
|
|
1573
1593
|
|
|
1594
|
+
- webhook_configurations: str.
|
|
1595
|
+
|
|
1574
1596
|
- preset: str.
|
|
1575
1597
|
|
|
1576
1598
|
- parse_mode: typing.Optional[ParsingMode].
|
|
@@ -1690,6 +1712,7 @@ class AsyncParsingClient:
|
|
|
1690
1712
|
"page_separator": page_separator,
|
|
1691
1713
|
"page_suffix": page_suffix,
|
|
1692
1714
|
"preserve_layout_alignment_across_pages": preserve_layout_alignment_across_pages,
|
|
1715
|
+
"preserve_very_small_text": preserve_very_small_text,
|
|
1693
1716
|
"skip_diagonal_text": skip_diagonal_text,
|
|
1694
1717
|
"spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
|
|
1695
1718
|
"structured_output": structured_output,
|
|
@@ -1701,6 +1724,7 @@ class AsyncParsingClient:
|
|
|
1701
1724
|
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
1702
1725
|
"model": model,
|
|
1703
1726
|
"webhook_url": webhook_url,
|
|
1727
|
+
"webhook_configurations": webhook_configurations,
|
|
1704
1728
|
"preset": preset,
|
|
1705
1729
|
"page_error_tolerance": page_error_tolerance,
|
|
1706
1730
|
"replace_failed_page_with_error_message_prefix": replace_failed_page_with_error_message_prefix,
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
11
|
+
from ...types.http_validation_error import HttpValidationError
|
|
12
|
+
from ...types.update_user_response import UpdateUserResponse
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
import pydantic
|
|
16
|
+
if pydantic.__version__.startswith("1."):
|
|
17
|
+
raise ImportError
|
|
18
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
19
|
+
except ImportError:
|
|
20
|
+
import pydantic # type: ignore
|
|
21
|
+
|
|
22
|
+
# this is used as the default value for optional parameters
|
|
23
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class UsersClient:
|
|
27
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
28
|
+
self._client_wrapper = client_wrapper
|
|
29
|
+
|
|
30
|
+
def update_user(
|
|
31
|
+
self,
|
|
32
|
+
user_id: str,
|
|
33
|
+
*,
|
|
34
|
+
first_name: typing.Optional[str] = OMIT,
|
|
35
|
+
last_name: typing.Optional[str] = OMIT,
|
|
36
|
+
email: typing.Optional[str] = OMIT,
|
|
37
|
+
current_password: typing.Optional[str] = OMIT,
|
|
38
|
+
new_password: typing.Optional[str] = OMIT,
|
|
39
|
+
) -> UpdateUserResponse:
|
|
40
|
+
"""
|
|
41
|
+
Parameters:
|
|
42
|
+
- user_id: str.
|
|
43
|
+
|
|
44
|
+
- first_name: typing.Optional[str].
|
|
45
|
+
|
|
46
|
+
- last_name: typing.Optional[str].
|
|
47
|
+
|
|
48
|
+
- email: typing.Optional[str].
|
|
49
|
+
|
|
50
|
+
- current_password: typing.Optional[str].
|
|
51
|
+
|
|
52
|
+
- new_password: typing.Optional[str].
|
|
53
|
+
---
|
|
54
|
+
from llama_cloud.client import LlamaCloud
|
|
55
|
+
|
|
56
|
+
client = LlamaCloud(
|
|
57
|
+
token="YOUR_TOKEN",
|
|
58
|
+
)
|
|
59
|
+
client.users.update_user(
|
|
60
|
+
user_id="string",
|
|
61
|
+
)
|
|
62
|
+
"""
|
|
63
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
64
|
+
if first_name is not OMIT:
|
|
65
|
+
_request["first_name"] = first_name
|
|
66
|
+
if last_name is not OMIT:
|
|
67
|
+
_request["last_name"] = last_name
|
|
68
|
+
if email is not OMIT:
|
|
69
|
+
_request["email"] = email
|
|
70
|
+
if current_password is not OMIT:
|
|
71
|
+
_request["current_password"] = current_password
|
|
72
|
+
if new_password is not OMIT:
|
|
73
|
+
_request["new_password"] = new_password
|
|
74
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
75
|
+
"PUT",
|
|
76
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/users/{user_id}"),
|
|
77
|
+
json=jsonable_encoder(_request),
|
|
78
|
+
headers=self._client_wrapper.get_headers(),
|
|
79
|
+
timeout=60,
|
|
80
|
+
)
|
|
81
|
+
if 200 <= _response.status_code < 300:
|
|
82
|
+
return pydantic.parse_obj_as(UpdateUserResponse, _response.json()) # type: ignore
|
|
83
|
+
if _response.status_code == 422:
|
|
84
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
85
|
+
try:
|
|
86
|
+
_response_json = _response.json()
|
|
87
|
+
except JSONDecodeError:
|
|
88
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
89
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class AsyncUsersClient:
|
|
93
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
94
|
+
self._client_wrapper = client_wrapper
|
|
95
|
+
|
|
96
|
+
async def update_user(
|
|
97
|
+
self,
|
|
98
|
+
user_id: str,
|
|
99
|
+
*,
|
|
100
|
+
first_name: typing.Optional[str] = OMIT,
|
|
101
|
+
last_name: typing.Optional[str] = OMIT,
|
|
102
|
+
email: typing.Optional[str] = OMIT,
|
|
103
|
+
current_password: typing.Optional[str] = OMIT,
|
|
104
|
+
new_password: typing.Optional[str] = OMIT,
|
|
105
|
+
) -> UpdateUserResponse:
|
|
106
|
+
"""
|
|
107
|
+
Parameters:
|
|
108
|
+
- user_id: str.
|
|
109
|
+
|
|
110
|
+
- first_name: typing.Optional[str].
|
|
111
|
+
|
|
112
|
+
- last_name: typing.Optional[str].
|
|
113
|
+
|
|
114
|
+
- email: typing.Optional[str].
|
|
115
|
+
|
|
116
|
+
- current_password: typing.Optional[str].
|
|
117
|
+
|
|
118
|
+
- new_password: typing.Optional[str].
|
|
119
|
+
---
|
|
120
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
121
|
+
|
|
122
|
+
client = AsyncLlamaCloud(
|
|
123
|
+
token="YOUR_TOKEN",
|
|
124
|
+
)
|
|
125
|
+
await client.users.update_user(
|
|
126
|
+
user_id="string",
|
|
127
|
+
)
|
|
128
|
+
"""
|
|
129
|
+
_request: typing.Dict[str, typing.Any] = {}
|
|
130
|
+
if first_name is not OMIT:
|
|
131
|
+
_request["first_name"] = first_name
|
|
132
|
+
if last_name is not OMIT:
|
|
133
|
+
_request["last_name"] = last_name
|
|
134
|
+
if email is not OMIT:
|
|
135
|
+
_request["email"] = email
|
|
136
|
+
if current_password is not OMIT:
|
|
137
|
+
_request["current_password"] = current_password
|
|
138
|
+
if new_password is not OMIT:
|
|
139
|
+
_request["new_password"] = new_password
|
|
140
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
141
|
+
"PUT",
|
|
142
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/users/{user_id}"),
|
|
143
|
+
json=jsonable_encoder(_request),
|
|
144
|
+
headers=self._client_wrapper.get_headers(),
|
|
145
|
+
timeout=60,
|
|
146
|
+
)
|
|
147
|
+
if 200 <= _response.status_code < 300:
|
|
148
|
+
return pydantic.parse_obj_as(UpdateUserResponse, _response.json()) # type: ignore
|
|
149
|
+
if _response.status_code == 422:
|
|
150
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
151
|
+
try:
|
|
152
|
+
_response_json = _response.json()
|
|
153
|
+
except JSONDecodeError:
|
|
154
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
155
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -41,7 +41,12 @@ from .chat_app_response import ChatAppResponse
|
|
|
41
41
|
from .chat_data import ChatData
|
|
42
42
|
from .chunk_mode import ChunkMode
|
|
43
43
|
from .classification_result import ClassificationResult
|
|
44
|
-
from .
|
|
44
|
+
from .classifier_rule import ClassifierRule
|
|
45
|
+
from .classify_job import ClassifyJob
|
|
46
|
+
from .classify_job_results import ClassifyJobResults
|
|
47
|
+
from .classify_job_with_status import ClassifyJobWithStatus
|
|
48
|
+
from .classify_parsing_configuration import ClassifyParsingConfiguration
|
|
49
|
+
from .cloud_astra_db_vector_store import CloudAstraDbVectorStore
|
|
45
50
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
46
51
|
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
47
52
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
@@ -79,6 +84,7 @@ from .data_source_create_component import DataSourceCreateComponent
|
|
|
79
84
|
from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
|
|
80
85
|
from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
|
|
81
86
|
from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
|
|
87
|
+
from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
|
|
82
88
|
from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
|
|
83
89
|
from .delete_params import DeleteParams
|
|
84
90
|
from .document_block import DocumentBlock
|
|
@@ -138,12 +144,17 @@ from .extract_schema_validate_response_data_schema_value import ExtractSchemaVal
|
|
|
138
144
|
from .extract_state import ExtractState
|
|
139
145
|
from .extract_target import ExtractTarget
|
|
140
146
|
from .fail_page_mode import FailPageMode
|
|
147
|
+
from .failure_handling_config import FailureHandlingConfig
|
|
141
148
|
from .file import File
|
|
149
|
+
from .file_classification import FileClassification
|
|
142
150
|
from .file_count_by_status_response import FileCountByStatusResponse
|
|
151
|
+
from .file_data import FileData
|
|
143
152
|
from .file_id_presigned_url import FileIdPresignedUrl
|
|
144
153
|
from .file_parse_public import FileParsePublic
|
|
145
154
|
from .file_permission_info_value import FilePermissionInfoValue
|
|
146
155
|
from .file_resource_info_value import FileResourceInfoValue
|
|
156
|
+
from .file_store_info_response import FileStoreInfoResponse
|
|
157
|
+
from .file_store_info_response_status import FileStoreInfoResponseStatus
|
|
147
158
|
from .filter_condition import FilterCondition
|
|
148
159
|
from .filter_operation import FilterOperation
|
|
149
160
|
from .filter_operation_eq import FilterOperationEq
|
|
@@ -199,12 +210,15 @@ from .llm_parameters import LlmParameters
|
|
|
199
210
|
from .load_files_job_config import LoadFilesJobConfig
|
|
200
211
|
from .managed_ingestion_status import ManagedIngestionStatus
|
|
201
212
|
from .managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
213
|
+
from .managed_open_ai_embedding import ManagedOpenAiEmbedding
|
|
214
|
+
from .managed_open_ai_embedding_config import ManagedOpenAiEmbeddingConfig
|
|
202
215
|
from .message_annotation import MessageAnnotation
|
|
203
216
|
from .message_role import MessageRole
|
|
204
217
|
from .metadata_filter import MetadataFilter
|
|
205
218
|
from .metadata_filter_value import MetadataFilterValue
|
|
206
219
|
from .metadata_filters import MetadataFilters
|
|
207
220
|
from .metadata_filters_filters_item import MetadataFiltersFiltersItem
|
|
221
|
+
from .multimodal_parse_resolution import MultimodalParseResolution
|
|
208
222
|
from .node_relationship import NodeRelationship
|
|
209
223
|
from .none_chunking_config import NoneChunkingConfig
|
|
210
224
|
from .none_segmentation_config import NoneSegmentationConfig
|
|
@@ -225,6 +239,7 @@ from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesRe
|
|
|
225
239
|
from .paginated_report_response import PaginatedReportResponse
|
|
226
240
|
from .paginated_response_agent_data import PaginatedResponseAgentData
|
|
227
241
|
from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
|
|
242
|
+
from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
|
|
228
243
|
from .parse_job_config import ParseJobConfig
|
|
229
244
|
from .parse_job_config_priority import ParseJobConfigPriority
|
|
230
245
|
from .parse_plan_level import ParsePlanLevel
|
|
@@ -268,6 +283,7 @@ from .pipeline_embedding_config import (
|
|
|
268
283
|
PipelineEmbeddingConfig_CohereEmbedding,
|
|
269
284
|
PipelineEmbeddingConfig_GeminiEmbedding,
|
|
270
285
|
PipelineEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
286
|
+
PipelineEmbeddingConfig_ManagedOpenaiEmbedding,
|
|
271
287
|
PipelineEmbeddingConfig_OpenaiEmbedding,
|
|
272
288
|
PipelineEmbeddingConfig_VertexaiEmbedding,
|
|
273
289
|
)
|
|
@@ -304,6 +320,11 @@ from .progress_event_status import ProgressEventStatus
|
|
|
304
320
|
from .project import Project
|
|
305
321
|
from .project_create import ProjectCreate
|
|
306
322
|
from .prompt_conf import PromptConf
|
|
323
|
+
from .quota_configuration import QuotaConfiguration
|
|
324
|
+
from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
|
|
325
|
+
from .quota_configuration_status import QuotaConfigurationStatus
|
|
326
|
+
from .quota_rate_limit_configuration_value import QuotaRateLimitConfigurationValue
|
|
327
|
+
from .quota_rate_limit_configuration_value_denominator_units import QuotaRateLimitConfigurationValueDenominatorUnits
|
|
307
328
|
from .re_rank_config import ReRankConfig
|
|
308
329
|
from .re_ranker_type import ReRankerType
|
|
309
330
|
from .recurring_credit_grant import RecurringCreditGrant
|
|
@@ -349,6 +370,7 @@ from .text_node import TextNode
|
|
|
349
370
|
from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
350
371
|
from .text_node_with_score import TextNodeWithScore
|
|
351
372
|
from .token_chunking_config import TokenChunkingConfig
|
|
373
|
+
from .update_user_response import UpdateUserResponse
|
|
352
374
|
from .usage_and_plan import UsageAndPlan
|
|
353
375
|
from .usage_metric_response import UsageMetricResponse
|
|
354
376
|
from .usage_response import UsageResponse
|
|
@@ -358,6 +380,7 @@ from .user_organization import UserOrganization
|
|
|
358
380
|
from .user_organization_create import UserOrganizationCreate
|
|
359
381
|
from .user_organization_delete import UserOrganizationDelete
|
|
360
382
|
from .user_organization_role import UserOrganizationRole
|
|
383
|
+
from .user_summary import UserSummary
|
|
361
384
|
from .validation_error import ValidationError
|
|
362
385
|
from .validation_error_loc_item import ValidationErrorLocItem
|
|
363
386
|
from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
|
|
@@ -404,7 +427,12 @@ __all__ = [
|
|
|
404
427
|
"ChatData",
|
|
405
428
|
"ChunkMode",
|
|
406
429
|
"ClassificationResult",
|
|
407
|
-
"
|
|
430
|
+
"ClassifierRule",
|
|
431
|
+
"ClassifyJob",
|
|
432
|
+
"ClassifyJobResults",
|
|
433
|
+
"ClassifyJobWithStatus",
|
|
434
|
+
"ClassifyParsingConfiguration",
|
|
435
|
+
"CloudAstraDbVectorStore",
|
|
408
436
|
"CloudAzStorageBlobDataSource",
|
|
409
437
|
"CloudAzureAiSearchVectorStore",
|
|
410
438
|
"CloudBoxDataSource",
|
|
@@ -442,6 +470,7 @@ __all__ = [
|
|
|
442
470
|
"DataSourceCreateCustomMetadataValue",
|
|
443
471
|
"DataSourceCustomMetadataValue",
|
|
444
472
|
"DataSourceReaderVersionMetadata",
|
|
473
|
+
"DataSourceReaderVersionMetadataReaderVersion",
|
|
445
474
|
"DataSourceUpdateDispatcherConfig",
|
|
446
475
|
"DeleteParams",
|
|
447
476
|
"DocumentBlock",
|
|
@@ -497,12 +526,17 @@ __all__ = [
|
|
|
497
526
|
"ExtractState",
|
|
498
527
|
"ExtractTarget",
|
|
499
528
|
"FailPageMode",
|
|
529
|
+
"FailureHandlingConfig",
|
|
500
530
|
"File",
|
|
531
|
+
"FileClassification",
|
|
501
532
|
"FileCountByStatusResponse",
|
|
533
|
+
"FileData",
|
|
502
534
|
"FileIdPresignedUrl",
|
|
503
535
|
"FileParsePublic",
|
|
504
536
|
"FilePermissionInfoValue",
|
|
505
537
|
"FileResourceInfoValue",
|
|
538
|
+
"FileStoreInfoResponse",
|
|
539
|
+
"FileStoreInfoResponseStatus",
|
|
506
540
|
"FilterCondition",
|
|
507
541
|
"FilterOperation",
|
|
508
542
|
"FilterOperationEq",
|
|
@@ -554,12 +588,15 @@ __all__ = [
|
|
|
554
588
|
"LoadFilesJobConfig",
|
|
555
589
|
"ManagedIngestionStatus",
|
|
556
590
|
"ManagedIngestionStatusResponse",
|
|
591
|
+
"ManagedOpenAiEmbedding",
|
|
592
|
+
"ManagedOpenAiEmbeddingConfig",
|
|
557
593
|
"MessageAnnotation",
|
|
558
594
|
"MessageRole",
|
|
559
595
|
"MetadataFilter",
|
|
560
596
|
"MetadataFilterValue",
|
|
561
597
|
"MetadataFilters",
|
|
562
598
|
"MetadataFiltersFiltersItem",
|
|
599
|
+
"MultimodalParseResolution",
|
|
563
600
|
"NodeRelationship",
|
|
564
601
|
"NoneChunkingConfig",
|
|
565
602
|
"NoneSegmentationConfig",
|
|
@@ -580,6 +617,7 @@ __all__ = [
|
|
|
580
617
|
"PaginatedReportResponse",
|
|
581
618
|
"PaginatedResponseAgentData",
|
|
582
619
|
"PaginatedResponseAggregateGroup",
|
|
620
|
+
"PaginatedResponseQuotaConfiguration",
|
|
583
621
|
"ParseJobConfig",
|
|
584
622
|
"ParseJobConfigPriority",
|
|
585
623
|
"ParsePlanLevel",
|
|
@@ -620,6 +658,7 @@ __all__ = [
|
|
|
620
658
|
"PipelineEmbeddingConfig_CohereEmbedding",
|
|
621
659
|
"PipelineEmbeddingConfig_GeminiEmbedding",
|
|
622
660
|
"PipelineEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
661
|
+
"PipelineEmbeddingConfig_ManagedOpenaiEmbedding",
|
|
623
662
|
"PipelineEmbeddingConfig_OpenaiEmbedding",
|
|
624
663
|
"PipelineEmbeddingConfig_VertexaiEmbedding",
|
|
625
664
|
"PipelineFile",
|
|
@@ -651,6 +690,11 @@ __all__ = [
|
|
|
651
690
|
"Project",
|
|
652
691
|
"ProjectCreate",
|
|
653
692
|
"PromptConf",
|
|
693
|
+
"QuotaConfiguration",
|
|
694
|
+
"QuotaConfigurationConfigurationType",
|
|
695
|
+
"QuotaConfigurationStatus",
|
|
696
|
+
"QuotaRateLimitConfigurationValue",
|
|
697
|
+
"QuotaRateLimitConfigurationValueDenominatorUnits",
|
|
654
698
|
"ReRankConfig",
|
|
655
699
|
"ReRankerType",
|
|
656
700
|
"RecurringCreditGrant",
|
|
@@ -694,6 +738,7 @@ __all__ = [
|
|
|
694
738
|
"TextNodeRelationshipsValue",
|
|
695
739
|
"TextNodeWithScore",
|
|
696
740
|
"TokenChunkingConfig",
|
|
741
|
+
"UpdateUserResponse",
|
|
697
742
|
"UsageAndPlan",
|
|
698
743
|
"UsageMetricResponse",
|
|
699
744
|
"UsageResponse",
|
|
@@ -703,6 +748,7 @@ __all__ = [
|
|
|
703
748
|
"UserOrganizationCreate",
|
|
704
749
|
"UserOrganizationDelete",
|
|
705
750
|
"UserOrganizationRole",
|
|
751
|
+
"UserSummary",
|
|
706
752
|
"ValidationError",
|
|
707
753
|
"ValidationErrorLocItem",
|
|
708
754
|
"VertexAiEmbeddingConfig",
|
|
@@ -17,14 +17,13 @@ except ImportError:
|
|
|
17
17
|
class ClassificationResult(pydantic.BaseModel):
|
|
18
18
|
"""
|
|
19
19
|
Result of classifying a single file.
|
|
20
|
-
|
|
21
|
-
Contains the classification outcome with confidence score and matched rule info.
|
|
22
20
|
"""
|
|
23
21
|
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
reasoning: str = pydantic.Field(
|
|
23
|
+
description="Step-by-step explanation of why this classification was chosen and the confidence score assigned"
|
|
24
|
+
)
|
|
26
25
|
confidence: float = pydantic.Field(description="Confidence score of the classification (0.0-1.0)")
|
|
27
|
-
|
|
26
|
+
type: typing.Optional[str]
|
|
28
27
|
|
|
29
28
|
def json(self, **kwargs: typing.Any) -> str:
|
|
30
29
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ClassifierRule(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
A rule for classifying documents - v0 simplified version.
|
|
20
|
+
|
|
21
|
+
This represents a single classification rule that will be applied to documents.
|
|
22
|
+
All rules are content-based and use natural language descriptions.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
type: str = pydantic.Field(
|
|
26
|
+
description="The document type to assign when this rule matches (e.g., 'invoice', 'receipt', 'contract')"
|
|
27
|
+
)
|
|
28
|
+
description: str = pydantic.Field(
|
|
29
|
+
description="Natural language description of what to classify. Be specific about the content characteristics that identify this document type."
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().json(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().dict(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
class Config:
|
|
41
|
+
frozen = True
|
|
42
|
+
smart_union = True
|
|
43
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .classifier_rule import ClassifierRule
|
|
8
|
+
from .classify_parsing_configuration import ClassifyParsingConfiguration
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ClassifyJob(pydantic.BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
A classify job.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
25
|
+
created_at: typing.Optional[dt.datetime]
|
|
26
|
+
updated_at: typing.Optional[dt.datetime]
|
|
27
|
+
rules: typing.List[ClassifierRule] = pydantic.Field(description="The rules to classify the files")
|
|
28
|
+
user_id: str = pydantic.Field(description="The ID of the user")
|
|
29
|
+
project_id: str = pydantic.Field(description="The ID of the project")
|
|
30
|
+
parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = pydantic.Field(
|
|
31
|
+
description="The configuration for the parsing job"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
35
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
36
|
+
return super().json(**kwargs_with_defaults)
|
|
37
|
+
|
|
38
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
39
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
40
|
+
return super().dict(**kwargs_with_defaults)
|
|
41
|
+
|
|
42
|
+
class Config:
|
|
43
|
+
frozen = True
|
|
44
|
+
smart_union = True
|
|
45
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -4,7 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .
|
|
7
|
+
from .file_classification import FileClassification
|
|
8
8
|
|
|
9
9
|
try:
|
|
10
10
|
import pydantic
|
|
@@ -15,17 +15,14 @@ except ImportError:
|
|
|
15
15
|
import pydantic # type: ignore
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
class
|
|
18
|
+
class ClassifyJobResults(pydantic.BaseModel):
|
|
19
19
|
"""
|
|
20
20
|
Response model for the classify endpoint following AIP-132 pagination standard.
|
|
21
|
-
|
|
22
|
-
Contains classification results with pagination support and summary statistics.
|
|
23
21
|
"""
|
|
24
22
|
|
|
25
|
-
items: typing.List[
|
|
23
|
+
items: typing.List[FileClassification] = pydantic.Field(description="The list of items.")
|
|
26
24
|
next_page_token: typing.Optional[str]
|
|
27
25
|
total_size: typing.Optional[int]
|
|
28
|
-
unknown_count: int = pydantic.Field(description="Number of files that couldn't be classified")
|
|
29
26
|
|
|
30
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
31
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .classifier_rule import ClassifierRule
|
|
8
|
+
from .classify_parsing_configuration import ClassifyParsingConfiguration
|
|
9
|
+
from .status_enum import StatusEnum
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import pydantic
|
|
13
|
+
if pydantic.__version__.startswith("1."):
|
|
14
|
+
raise ImportError
|
|
15
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
16
|
+
except ImportError:
|
|
17
|
+
import pydantic # type: ignore
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ClassifyJobWithStatus(pydantic.BaseModel):
|
|
21
|
+
"""
|
|
22
|
+
A classify job with status.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
26
|
+
created_at: typing.Optional[dt.datetime]
|
|
27
|
+
updated_at: typing.Optional[dt.datetime]
|
|
28
|
+
rules: typing.List[ClassifierRule] = pydantic.Field(description="The rules to classify the files")
|
|
29
|
+
user_id: str = pydantic.Field(description="The ID of the user")
|
|
30
|
+
project_id: str = pydantic.Field(description="The ID of the project")
|
|
31
|
+
parsing_configuration: typing.Optional[ClassifyParsingConfiguration] = pydantic.Field(
|
|
32
|
+
description="The configuration for the parsing job"
|
|
33
|
+
)
|
|
34
|
+
status: StatusEnum = pydantic.Field(description="The status of the classify job")
|
|
35
|
+
|
|
36
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().json(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
41
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
42
|
+
return super().dict(**kwargs_with_defaults)
|
|
43
|
+
|
|
44
|
+
class Config:
|
|
45
|
+
frozen = True
|
|
46
|
+
smart_union = True
|
|
47
|
+
json_encoders = {dt.datetime: serialize_datetime}
|