llama-cloud 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +4 -16
- llama_cloud/client.py +0 -3
- llama_cloud/resources/__init__.py +0 -5
- llama_cloud/resources/files/client.py +34 -6
- llama_cloud/resources/llama_extract/client.py +126 -424
- llama_cloud/resources/parsing/client.py +82 -18
- llama_cloud/types/__init__.py +4 -10
- llama_cloud/types/extract_job.py +3 -1
- llama_cloud/types/extract_resultset.py +2 -6
- llama_cloud/types/extract_run.py +5 -0
- llama_cloud/types/extract_run_data_value.py +5 -0
- llama_cloud/types/{extraction_schema_data_schema_value.py → extract_run_extraction_metadata_value.py} +1 -1
- llama_cloud/types/extract_state.py +4 -4
- llama_cloud/types/llama_parse_parameters.py +3 -0
- {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/METADATA +2 -1
- {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/RECORD +18 -26
- {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/WHEEL +1 -1
- llama_cloud/resources/extraction/__init__.py +0 -5
- llama_cloud/resources/extraction/client.py +0 -756
- llama_cloud/resources/extraction/types/__init__.py +0 -6
- llama_cloud/resources/extraction/types/extraction_schema_create_data_schema_value.py +0 -7
- llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +0 -7
- llama_cloud/types/extraction_job.py +0 -35
- llama_cloud/types/extraction_result.py +0 -44
- llama_cloud/types/extraction_result_data_value.py +0 -5
- llama_cloud/types/extraction_schema.py +0 -41
- {llama_cloud-0.1.8.dist-info → llama_cloud-0.1.9.dist-info}/LICENSE +0 -0
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
from .extraction_schema_create_data_schema_value import ExtractionSchemaCreateDataSchemaValue
|
|
4
|
-
from .extraction_schema_update_data_schema_value import ExtractionSchemaUpdateDataSchemaValue
|
|
5
|
-
|
|
6
|
-
__all__ = ["ExtractionSchemaCreateDataSchemaValue", "ExtractionSchemaUpdateDataSchemaValue"]
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .file import File
|
|
8
|
-
from .status_enum import StatusEnum
|
|
9
|
-
|
|
10
|
-
try:
|
|
11
|
-
import pydantic
|
|
12
|
-
if pydantic.__version__.startswith("1."):
|
|
13
|
-
raise ImportError
|
|
14
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
-
except ImportError:
|
|
16
|
-
import pydantic # type: ignore
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class ExtractionJob(pydantic.BaseModel):
|
|
20
|
-
id: str = pydantic.Field(description="The id of the extraction job")
|
|
21
|
-
status: StatusEnum = pydantic.Field(description="The status of the extraction job")
|
|
22
|
-
file: File = pydantic.Field(description="The file that the extract was extracted from")
|
|
23
|
-
|
|
24
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
25
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
-
return super().json(**kwargs_with_defaults)
|
|
27
|
-
|
|
28
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
29
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
-
return super().dict(**kwargs_with_defaults)
|
|
31
|
-
|
|
32
|
-
class Config:
|
|
33
|
-
frozen = True
|
|
34
|
-
smart_union = True
|
|
35
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .extraction_result_data_value import ExtractionResultDataValue
|
|
8
|
-
from .file import File
|
|
9
|
-
|
|
10
|
-
try:
|
|
11
|
-
import pydantic
|
|
12
|
-
if pydantic.__version__.startswith("1."):
|
|
13
|
-
raise ImportError
|
|
14
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
-
except ImportError:
|
|
16
|
-
import pydantic # type: ignore
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class ExtractionResult(pydantic.BaseModel):
|
|
20
|
-
"""
|
|
21
|
-
Schema for an extraction result.
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
id: str = pydantic.Field(description="Unique identifier")
|
|
25
|
-
created_at: typing.Optional[dt.datetime]
|
|
26
|
-
updated_at: typing.Optional[dt.datetime]
|
|
27
|
-
schema_id: str = pydantic.Field(description="The id of the schema")
|
|
28
|
-
data: typing.Dict[str, typing.Optional[ExtractionResultDataValue]] = pydantic.Field(
|
|
29
|
-
description="The data extracted from the file"
|
|
30
|
-
)
|
|
31
|
-
file: File = pydantic.Field(description="The file that the extract was extracted from")
|
|
32
|
-
|
|
33
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
34
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
35
|
-
return super().json(**kwargs_with_defaults)
|
|
36
|
-
|
|
37
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
38
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
39
|
-
return super().dict(**kwargs_with_defaults)
|
|
40
|
-
|
|
41
|
-
class Config:
|
|
42
|
-
frozen = True
|
|
43
|
-
smart_union = True
|
|
44
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .extraction_schema_data_schema_value import ExtractionSchemaDataSchemaValue
|
|
8
|
-
|
|
9
|
-
try:
|
|
10
|
-
import pydantic
|
|
11
|
-
if pydantic.__version__.startswith("1."):
|
|
12
|
-
raise ImportError
|
|
13
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
-
except ImportError:
|
|
15
|
-
import pydantic # type: ignore
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class ExtractionSchema(pydantic.BaseModel):
|
|
19
|
-
"""
|
|
20
|
-
Schema for extraction schema.
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
id: str = pydantic.Field(description="Unique identifier")
|
|
24
|
-
created_at: typing.Optional[dt.datetime]
|
|
25
|
-
updated_at: typing.Optional[dt.datetime]
|
|
26
|
-
name: str = pydantic.Field(description="The name of the extraction schema")
|
|
27
|
-
project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
|
|
28
|
-
data_schema: typing.Optional[typing.Dict[str, typing.Optional[ExtractionSchemaDataSchemaValue]]]
|
|
29
|
-
|
|
30
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
31
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
-
return super().json(**kwargs_with_defaults)
|
|
33
|
-
|
|
34
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
35
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
36
|
-
return super().dict(**kwargs_with_defaults)
|
|
37
|
-
|
|
38
|
-
class Config:
|
|
39
|
-
frozen = True
|
|
40
|
-
smart_union = True
|
|
41
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
File without changes
|