llama-cloud 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +12 -0
- llama_cloud/resources/admin/client.py +5 -5
- llama_cloud/resources/alpha/client.py +2 -8
- llama_cloud/resources/beta/client.py +30 -126
- llama_cloud/resources/chat_apps/client.py +8 -32
- llama_cloud/resources/classifier/client.py +8 -32
- llama_cloud/resources/data_sinks/client.py +8 -32
- llama_cloud/resources/data_sources/client.py +8 -32
- llama_cloud/resources/embedding_model_configs/client.py +12 -48
- llama_cloud/resources/files/client.py +42 -176
- llama_cloud/resources/jobs/client.py +2 -8
- llama_cloud/resources/llama_extract/client.py +40 -138
- llama_cloud/resources/organizations/client.py +4 -18
- llama_cloud/resources/parsing/client.py +12 -16
- llama_cloud/resources/pipelines/client.py +45 -32
- llama_cloud/resources/projects/client.py +18 -78
- llama_cloud/resources/reports/client.py +30 -126
- llama_cloud/resources/retrievers/client.py +12 -48
- llama_cloud/types/__init__.py +12 -0
- llama_cloud/types/extract_job_create.py +2 -0
- llama_cloud/types/extract_job_create_priority.py +29 -0
- llama_cloud/types/file.py +1 -1
- llama_cloud/types/job_names.py +0 -4
- llama_cloud/types/llama_extract_feature_availability.py +34 -0
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/parse_job_config.py +1 -0
- llama_cloud/types/pipeline.py +4 -0
- llama_cloud/types/pipeline_create.py +2 -0
- llama_cloud/types/pipeline_file.py +4 -4
- llama_cloud/types/schema_generation_availability.py +33 -0
- llama_cloud/types/schema_generation_availability_status.py +17 -0
- llama_cloud/types/sparse_model_config.py +42 -0
- llama_cloud/types/sparse_model_type.py +33 -0
- llama_cloud/types/webhook_configuration.py +1 -0
- llama_cloud-0.1.40.dist-info/METADATA +106 -0
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/RECORD +38 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/WHEEL +1 -1
- llama_cloud-0.1.38.dist-info/METADATA +0 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -118,6 +118,7 @@ from .types import (
|
|
|
118
118
|
ExtractJobCreate,
|
|
119
119
|
ExtractJobCreateDataSchemaOverride,
|
|
120
120
|
ExtractJobCreateDataSchemaOverrideZeroValue,
|
|
121
|
+
ExtractJobCreatePriority,
|
|
121
122
|
ExtractMode,
|
|
122
123
|
ExtractModels,
|
|
123
124
|
ExtractResultset,
|
|
@@ -190,6 +191,7 @@ from .types import (
|
|
|
190
191
|
LLamaParseTransformConfig,
|
|
191
192
|
LegacyParseJobConfig,
|
|
192
193
|
LicenseInfoResponse,
|
|
194
|
+
LlamaExtractFeatureAvailability,
|
|
193
195
|
LlamaExtractModeAvailability,
|
|
194
196
|
LlamaExtractModeAvailabilityStatus,
|
|
195
197
|
LlamaExtractSettings,
|
|
@@ -344,9 +346,13 @@ from .types import (
|
|
|
344
346
|
RetrieverCreate,
|
|
345
347
|
RetrieverPipeline,
|
|
346
348
|
Role,
|
|
349
|
+
SchemaGenerationAvailability,
|
|
350
|
+
SchemaGenerationAvailabilityStatus,
|
|
347
351
|
SchemaRelaxMode,
|
|
348
352
|
SemanticChunkingConfig,
|
|
349
353
|
SentenceChunkingConfig,
|
|
354
|
+
SparseModelConfig,
|
|
355
|
+
SparseModelType,
|
|
350
356
|
SrcAppSchemaChatChatMessage,
|
|
351
357
|
StatusEnum,
|
|
352
358
|
StructMode,
|
|
@@ -571,6 +577,7 @@ __all__ = [
|
|
|
571
577
|
"ExtractJobCreateBatchDataSchemaOverrideZeroValue",
|
|
572
578
|
"ExtractJobCreateDataSchemaOverride",
|
|
573
579
|
"ExtractJobCreateDataSchemaOverrideZeroValue",
|
|
580
|
+
"ExtractJobCreatePriority",
|
|
574
581
|
"ExtractMode",
|
|
575
582
|
"ExtractModels",
|
|
576
583
|
"ExtractResultset",
|
|
@@ -649,6 +656,7 @@ __all__ = [
|
|
|
649
656
|
"LegacyParseJobConfig",
|
|
650
657
|
"LicenseInfoResponse",
|
|
651
658
|
"LlamaCloudEnvironment",
|
|
659
|
+
"LlamaExtractFeatureAvailability",
|
|
652
660
|
"LlamaExtractModeAvailability",
|
|
653
661
|
"LlamaExtractModeAvailabilityStatus",
|
|
654
662
|
"LlamaExtractSettings",
|
|
@@ -814,9 +822,13 @@ __all__ = [
|
|
|
814
822
|
"RetrieverCreate",
|
|
815
823
|
"RetrieverPipeline",
|
|
816
824
|
"Role",
|
|
825
|
+
"SchemaGenerationAvailability",
|
|
826
|
+
"SchemaGenerationAvailabilityStatus",
|
|
817
827
|
"SchemaRelaxMode",
|
|
818
828
|
"SemanticChunkingConfig",
|
|
819
829
|
"SentenceChunkingConfig",
|
|
830
|
+
"SparseModelConfig",
|
|
831
|
+
"SparseModelType",
|
|
820
832
|
"SrcAppSchemaChatChatMessage",
|
|
821
833
|
"StatusEnum",
|
|
822
834
|
"StructMode",
|
|
@@ -11,7 +11,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
|
11
11
|
from ...types.file_store_info_response import FileStoreInfoResponse
|
|
12
12
|
from ...types.http_validation_error import HttpValidationError
|
|
13
13
|
from ...types.license_info_response import LicenseInfoResponse
|
|
14
|
-
from ...types.
|
|
14
|
+
from ...types.llama_extract_feature_availability import LlamaExtractFeatureAvailability
|
|
15
15
|
|
|
16
16
|
try:
|
|
17
17
|
import pydantic
|
|
@@ -80,7 +80,7 @@ class AdminClient:
|
|
|
80
80
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
81
81
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
82
82
|
|
|
83
|
-
def get_llamaextract_features(self) ->
|
|
83
|
+
def get_llamaextract_features(self) -> LlamaExtractFeatureAvailability:
|
|
84
84
|
"""
|
|
85
85
|
Get LlamaExtract feature availability based on available models.
|
|
86
86
|
|
|
@@ -99,7 +99,7 @@ class AdminClient:
|
|
|
99
99
|
timeout=60,
|
|
100
100
|
)
|
|
101
101
|
if 200 <= _response.status_code < 300:
|
|
102
|
-
return pydantic.parse_obj_as(
|
|
102
|
+
return pydantic.parse_obj_as(LlamaExtractFeatureAvailability, _response.json()) # type: ignore
|
|
103
103
|
if _response.status_code == 422:
|
|
104
104
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
105
105
|
try:
|
|
@@ -167,7 +167,7 @@ class AsyncAdminClient:
|
|
|
167
167
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
168
168
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
169
169
|
|
|
170
|
-
async def get_llamaextract_features(self) ->
|
|
170
|
+
async def get_llamaextract_features(self) -> LlamaExtractFeatureAvailability:
|
|
171
171
|
"""
|
|
172
172
|
Get LlamaExtract feature availability based on available models.
|
|
173
173
|
|
|
@@ -186,7 +186,7 @@ class AsyncAdminClient:
|
|
|
186
186
|
timeout=60,
|
|
187
187
|
)
|
|
188
188
|
if 200 <= _response.status_code < 300:
|
|
189
|
-
return pydantic.parse_obj_as(
|
|
189
|
+
return pydantic.parse_obj_as(LlamaExtractFeatureAvailability, _response.json()) # type: ignore
|
|
190
190
|
if _response.status_code == 422:
|
|
191
191
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
192
192
|
try:
|
|
@@ -35,7 +35,6 @@ class AlphaClient:
|
|
|
35
35
|
organization_id: typing.Optional[str] = None,
|
|
36
36
|
configuration: str,
|
|
37
37
|
file: typing.Optional[str] = OMIT,
|
|
38
|
-
project_id: typing.Optional[str] = None,
|
|
39
38
|
) -> ParsingJob:
|
|
40
39
|
"""
|
|
41
40
|
Parameters:
|
|
@@ -46,8 +45,6 @@ class AlphaClient:
|
|
|
46
45
|
- configuration: str.
|
|
47
46
|
|
|
48
47
|
- file: typing.Optional[str].
|
|
49
|
-
|
|
50
|
-
- project_id: typing.Optional[str].
|
|
51
48
|
"""
|
|
52
49
|
_request: typing.Dict[str, typing.Any] = {"configuration": configuration}
|
|
53
50
|
if file is not OMIT:
|
|
@@ -57,7 +54,7 @@ class AlphaClient:
|
|
|
57
54
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
|
|
58
55
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
59
56
|
json=jsonable_encoder(_request),
|
|
60
|
-
headers=
|
|
57
|
+
headers=self._client_wrapper.get_headers(),
|
|
61
58
|
timeout=60,
|
|
62
59
|
)
|
|
63
60
|
if 200 <= _response.status_code < 300:
|
|
@@ -82,7 +79,6 @@ class AsyncAlphaClient:
|
|
|
82
79
|
organization_id: typing.Optional[str] = None,
|
|
83
80
|
configuration: str,
|
|
84
81
|
file: typing.Optional[str] = OMIT,
|
|
85
|
-
project_id: typing.Optional[str] = None,
|
|
86
82
|
) -> ParsingJob:
|
|
87
83
|
"""
|
|
88
84
|
Parameters:
|
|
@@ -93,8 +89,6 @@ class AsyncAlphaClient:
|
|
|
93
89
|
- configuration: str.
|
|
94
90
|
|
|
95
91
|
- file: typing.Optional[str].
|
|
96
|
-
|
|
97
|
-
- project_id: typing.Optional[str].
|
|
98
92
|
"""
|
|
99
93
|
_request: typing.Dict[str, typing.Any] = {"configuration": configuration}
|
|
100
94
|
if file is not OMIT:
|
|
@@ -104,7 +98,7 @@ class AsyncAlphaClient:
|
|
|
104
98
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v2alpha1/parse/upload"),
|
|
105
99
|
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
106
100
|
json=jsonable_encoder(_request),
|
|
107
|
-
headers=
|
|
101
|
+
headers=self._client_wrapper.get_headers(),
|
|
108
102
|
timeout=60,
|
|
109
103
|
)
|
|
110
104
|
if 200 <= _response.status_code < 300:
|