llama-cloud 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +12 -0
- llama_cloud/resources/admin/client.py +5 -5
- llama_cloud/resources/alpha/client.py +2 -8
- llama_cloud/resources/beta/client.py +30 -126
- llama_cloud/resources/chat_apps/client.py +8 -32
- llama_cloud/resources/classifier/client.py +8 -32
- llama_cloud/resources/data_sinks/client.py +8 -32
- llama_cloud/resources/data_sources/client.py +8 -32
- llama_cloud/resources/embedding_model_configs/client.py +12 -48
- llama_cloud/resources/files/client.py +42 -176
- llama_cloud/resources/jobs/client.py +2 -8
- llama_cloud/resources/llama_extract/client.py +40 -138
- llama_cloud/resources/organizations/client.py +4 -18
- llama_cloud/resources/parsing/client.py +12 -16
- llama_cloud/resources/pipelines/client.py +45 -32
- llama_cloud/resources/projects/client.py +18 -78
- llama_cloud/resources/reports/client.py +30 -126
- llama_cloud/resources/retrievers/client.py +12 -48
- llama_cloud/types/__init__.py +12 -0
- llama_cloud/types/extract_job_create.py +2 -0
- llama_cloud/types/extract_job_create_priority.py +29 -0
- llama_cloud/types/file.py +1 -1
- llama_cloud/types/job_names.py +0 -4
- llama_cloud/types/llama_extract_feature_availability.py +34 -0
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/parse_job_config.py +1 -0
- llama_cloud/types/pipeline.py +4 -0
- llama_cloud/types/pipeline_create.py +2 -0
- llama_cloud/types/pipeline_file.py +4 -4
- llama_cloud/types/schema_generation_availability.py +33 -0
- llama_cloud/types/schema_generation_availability_status.py +17 -0
- llama_cloud/types/sparse_model_config.py +42 -0
- llama_cloud/types/sparse_model_type.py +33 -0
- llama_cloud/types/webhook_configuration.py +1 -0
- llama_cloud-0.1.40.dist-info/METADATA +106 -0
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/RECORD +38 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/WHEEL +1 -1
- llama_cloud-0.1.38.dist-info/METADATA +0 -32
- {llama_cloud-0.1.38.dist-info → llama_cloud-0.1.40.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SparseModelType(str, enum.Enum):
|
|
10
|
+
"""
|
|
11
|
+
Enum for sparse model types supported in LlamaCloud.
|
|
12
|
+
|
|
13
|
+
SPLADE: Uses HuggingFace Splade model for sparse embeddings
|
|
14
|
+
BM25: Uses Qdrant's FastEmbed BM25 model for sparse embeddings
|
|
15
|
+
AUTO: Automatically selects based on deployment mode (BYOC uses term frequency, Cloud uses Splade)
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
SPLADE = "splade"
|
|
19
|
+
BM_25 = "bm25"
|
|
20
|
+
AUTO = "auto"
|
|
21
|
+
|
|
22
|
+
def visit(
|
|
23
|
+
self,
|
|
24
|
+
splade: typing.Callable[[], T_Result],
|
|
25
|
+
bm_25: typing.Callable[[], T_Result],
|
|
26
|
+
auto: typing.Callable[[], T_Result],
|
|
27
|
+
) -> T_Result:
|
|
28
|
+
if self is SparseModelType.SPLADE:
|
|
29
|
+
return splade()
|
|
30
|
+
if self is SparseModelType.BM_25:
|
|
31
|
+
return bm_25()
|
|
32
|
+
if self is SparseModelType.AUTO:
|
|
33
|
+
return auto()
|
|
@@ -23,6 +23,7 @@ class WebhookConfiguration(pydantic.BaseModel):
|
|
|
23
23
|
webhook_url: typing.Optional[str]
|
|
24
24
|
webhook_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
|
|
25
25
|
webhook_events: typing.Optional[typing.List[WebhookConfigurationWebhookEventsItem]]
|
|
26
|
+
webhook_output_format: typing.Optional[str]
|
|
26
27
|
|
|
27
28
|
def json(self, **kwargs: typing.Any) -> str:
|
|
28
29
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: llama-cloud
|
|
3
|
+
Version: 0.1.40
|
|
4
|
+
Summary:
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: Logan Markewich
|
|
7
|
+
Author-email: logan@runllama.ai
|
|
8
|
+
Requires-Python: >=3.8,<4
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Requires-Dist: certifi (>=2024.7.4)
|
|
18
|
+
Requires-Dist: httpx (>=0.20.0)
|
|
19
|
+
Requires-Dist: pydantic (>=1.10)
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
|
|
22
|
+
# LlamaIndex Python Client
|
|
23
|
+
|
|
24
|
+
This client is auto-generated using [Fern](https://buildwithfern.com/docs/intro)
|
|
25
|
+
|
|
26
|
+
## Setup
|
|
27
|
+
|
|
28
|
+
Before testing or releasing, ensure you have the development environment set up:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
# Install Poetry (if not already installed)
|
|
32
|
+
curl -sSL https://install.python-poetry.org | python3 -
|
|
33
|
+
|
|
34
|
+
# Install project dependencies
|
|
35
|
+
poetry install
|
|
36
|
+
|
|
37
|
+
# Verify setup
|
|
38
|
+
poetry --version
|
|
39
|
+
python --version
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Quick Start
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
# Set up development environment (first time only)
|
|
46
|
+
make setup
|
|
47
|
+
|
|
48
|
+
# See all available commands
|
|
49
|
+
make help
|
|
50
|
+
|
|
51
|
+
# Test before releasing
|
|
52
|
+
make test
|
|
53
|
+
|
|
54
|
+
# Complete release workflow
|
|
55
|
+
make release
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Commands
|
|
59
|
+
|
|
60
|
+
| Command | Description |
|
|
61
|
+
|---------|-------------|
|
|
62
|
+
| `make setup` | Set up development environment (first time only) |
|
|
63
|
+
| `make help` | Show all available commands |
|
|
64
|
+
| `make build` | Build the package |
|
|
65
|
+
| `make test` | Build, install locally, and run comprehensive tests |
|
|
66
|
+
| `make publish` | Test and publish to PyPI (with confirmation) |
|
|
67
|
+
| `make release` | Complete release workflow (clean → build → test → publish) |
|
|
68
|
+
| `make clean` | Clean build artifacts |
|
|
69
|
+
| `make setup-credentials` | Set up PyPI credentials (one-time setup) |
|
|
70
|
+
|
|
71
|
+
## Release Process
|
|
72
|
+
|
|
73
|
+
**First time setup:**
|
|
74
|
+
```bash
|
|
75
|
+
make setup # Set up development environment
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
**For each release:**
|
|
79
|
+
1. **Update version** in `pyproject.toml`
|
|
80
|
+
2. **Run release workflow:**
|
|
81
|
+
```bash
|
|
82
|
+
make release
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
That's it! The Makefile handles building, testing, and publishing with safety checks.
|
|
86
|
+
|
|
87
|
+
## Manual Steps (if needed)
|
|
88
|
+
|
|
89
|
+
<details>
|
|
90
|
+
<summary>Expand for manual commands</summary>
|
|
91
|
+
|
|
92
|
+
**Build and test manually:**
|
|
93
|
+
```bash
|
|
94
|
+
poetry build
|
|
95
|
+
pip install --force-reinstall dist/llama_cloud-*-py3-none-any.whl
|
|
96
|
+
python test_local_build.py
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
**Setup credentials manually:**
|
|
100
|
+
```bash
|
|
101
|
+
poetry config pypi-token.pypi <your-token>
|
|
102
|
+
```
|
|
103
|
+
Get token from [1Password](https://start.1password.com/open/i?a=32SA66TZ3JCRXOCMASLSDCT5TI&v=lhv7hvb5o46cwo257c3hviqkle&i=yvslwei7jtf6tgqamzcdantqi4&h=llamaindex.1password.com)
|
|
104
|
+
|
|
105
|
+
</details>
|
|
106
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama_cloud/__init__.py,sha256=
|
|
1
|
+
llama_cloud/__init__.py,sha256=gldSlClk78A2GPDhfjUN3_EdsBGXGeCMaEaxBTE5E1Y,28239
|
|
2
2
|
llama_cloud/client.py,sha256=u8yj_cznQCKssfheWFugUUUtsM8oVrlWbOyQBFlq5zA,6610
|
|
3
3
|
llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
|
|
4
4
|
llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
@@ -11,40 +11,40 @@ llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6
|
|
|
11
11
|
llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
|
|
12
12
|
llama_cloud/resources/__init__.py,sha256=axi8rRsGi4mjyG88TshNydTRZFYmXqWGwCIya6YIHI0,4321
|
|
13
13
|
llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
14
|
-
llama_cloud/resources/admin/client.py,sha256=
|
|
14
|
+
llama_cloud/resources/admin/client.py,sha256=iJClMzp6OQ_TOnAwgcPSb0BkEuuFeIq0r15lDmWUD0s,8502
|
|
15
15
|
llama_cloud/resources/agent_deployments/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
16
16
|
llama_cloud/resources/agent_deployments/client.py,sha256=3EOzOjmRs4KISgJ566enq3FCuN3YtskjO0OHqQGtkQ0,6122
|
|
17
17
|
llama_cloud/resources/alpha/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
18
|
-
llama_cloud/resources/alpha/client.py,sha256=
|
|
18
|
+
llama_cloud/resources/alpha/client.py,sha256=d5cRIUykNpnVryuxWBPUpmo-2L1vMIDeZIF3DvTIx7E,4322
|
|
19
19
|
llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
20
|
-
llama_cloud/resources/beta/client.py,sha256=
|
|
20
|
+
llama_cloud/resources/beta/client.py,sha256=_GNkHQxyZxhZOkLIRzfCw6PexQx-E8r_7R-3Wd9Y0uE,63128
|
|
21
21
|
llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
22
|
-
llama_cloud/resources/chat_apps/client.py,sha256=
|
|
22
|
+
llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
|
|
23
23
|
llama_cloud/resources/classifier/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
24
|
-
llama_cloud/resources/classifier/client.py,sha256=
|
|
24
|
+
llama_cloud/resources/classifier/client.py,sha256=Q1vdRTSe-QnZyFucTlxHIRmdCGa-PFf7XUazik7s59E,17618
|
|
25
25
|
llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
|
|
26
|
-
llama_cloud/resources/data_sinks/client.py,sha256=
|
|
26
|
+
llama_cloud/resources/data_sinks/client.py,sha256=GpD6FhbGqkg2oUToyMG6J8hPxG_iG7W5ZJRo0qg3yzk,20639
|
|
27
27
|
llama_cloud/resources/data_sinks/types/__init__.py,sha256=M1aTcufJwiEZo9B0KmYj9PfkSd6I1ooFt9tpIRGwgg8,168
|
|
28
28
|
llama_cloud/resources/data_sinks/types/data_sink_update_component.py,sha256=ynPdEg844hZaD6EcAK0jrMY_vogtvmLTZ7FZSwWcor8,912
|
|
29
29
|
llama_cloud/resources/data_sources/__init__.py,sha256=McURkcNBGHXH1hmRDRmZI1dRzJrekCTHZsgv03r2oZI,227
|
|
30
|
-
llama_cloud/resources/data_sources/client.py,sha256=
|
|
30
|
+
llama_cloud/resources/data_sources/client.py,sha256=SZFm8bW5nkaXringdSnmxHqvVjKM7cNNOtqVXjgTKhc,21855
|
|
31
31
|
llama_cloud/resources/data_sources/types/__init__.py,sha256=Cd5xEECTzXqQSfJALfJPSjudlSLeb3RENeJVi8vwPbM,303
|
|
32
32
|
llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=_jQY6FhcvenWdzi27SK1bSY8muXKLRkXlVrTqEWgKKc,1159
|
|
33
33
|
llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py,sha256=3aFC-p8MSxjhOu2nFtqk0pixj6RqNqcFnbOYngUdZUk,215
|
|
34
34
|
llama_cloud/resources/embedding_model_configs/__init__.py,sha256=cXDtKKq-gj7yjFjdQ5GrGyPs-T5tRV_0JjUMGlAbdUs,1115
|
|
35
|
-
llama_cloud/resources/embedding_model_configs/client.py,sha256=
|
|
35
|
+
llama_cloud/resources/embedding_model_configs/client.py,sha256=2JDvZJtSger9QJ8luPct-2zvwjaJAR8VcKsTZ1wgYTE,17769
|
|
36
36
|
llama_cloud/resources/embedding_model_configs/types/__init__.py,sha256=6-rcDwJhw_0shz3CjrPvlYBYXJJ1bLn-PpplhOsQ79w,1156
|
|
37
37
|
llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py,sha256=SQCHJk0AmBbKS5XKdcEJxhDhIMLQCmCI13IHC28v7vQ,3054
|
|
38
38
|
llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
39
39
|
llama_cloud/resources/evals/client.py,sha256=v2AyeQV0hVgC6xoP2gJNgneJMaeXALV1hIeirYGxlPw,3242
|
|
40
40
|
llama_cloud/resources/files/__init__.py,sha256=Ws53l-S3kyAGFinYPOb9WpN84DtbFn6gLYZtI2akBLQ,169
|
|
41
|
-
llama_cloud/resources/files/client.py,sha256=
|
|
41
|
+
llama_cloud/resources/files/client.py,sha256=Crd0IR0cV5fld4jUGAHE8VsIbw7vCYrOIyBTSwDyitA,56242
|
|
42
42
|
llama_cloud/resources/files/types/__init__.py,sha256=ZZuDQsYsxmQ9VwpfN7oqftzGRnFTR2EMYdCa7zARo4g,204
|
|
43
43
|
llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py,sha256=Wc8wFgujOO5pZvbbh2TMMzpa37GKZd14GYNJ9bdq7BE,214
|
|
44
44
|
llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
45
|
-
llama_cloud/resources/jobs/client.py,sha256=
|
|
45
|
+
llama_cloud/resources/jobs/client.py,sha256=b2R_Oj2OCtcv-IIJNz9aq42hDgrOk_huqTSJhTB9VaA,6202
|
|
46
46
|
llama_cloud/resources/llama_extract/__init__.py,sha256=V6VZ8hQXwAuvOOZyk43nnbINoDQqEr03AjKQPhYKluk,997
|
|
47
|
-
llama_cloud/resources/llama_extract/client.py,sha256=
|
|
47
|
+
llama_cloud/resources/llama_extract/client.py,sha256=U094X3Nlw0CDzXIIkYr8Mld-K7PPqeugidqyu6qE_8Q,83468
|
|
48
48
|
llama_cloud/resources/llama_extract/types/__init__.py,sha256=2Iu4w5LXZY2Govr1RzahIfY0b84y658SQjMDtj7rH_0,1497
|
|
49
49
|
llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py,sha256=zB31hJQ8hKaIsPkfTWiX5hqsPVFMyyeWEDZ_Aq237jo,305
|
|
50
50
|
llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py,sha256=xoyXH3f0Y5beMWBxmtXSz6QoB_df_-0QBsYdjBhZnGw,217
|
|
@@ -57,27 +57,27 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
|
|
|
57
57
|
llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py,sha256=lBblR9zgjJsbWL-2bDisCj7EQiX6aky6GQ4tuMr3LtU,325
|
|
58
58
|
llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py,sha256=4-ONLmkrEP36ZH0qRXp3sbXCtLVNQQX4dLXFeF4u47g,222
|
|
59
59
|
llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
60
|
-
llama_cloud/resources/organizations/client.py,sha256=
|
|
60
|
+
llama_cloud/resources/organizations/client.py,sha256=RoN-nkN7VeRZnrrElXhaPrgQFzGMHgNY41_XpbCXP0g,56623
|
|
61
61
|
llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
62
|
-
llama_cloud/resources/parsing/client.py,sha256=
|
|
62
|
+
llama_cloud/resources/parsing/client.py,sha256=aOoOGjpPqAYFNrkfNoqPjG1sXKQwGFYPGTzjpVuOLsY,89205
|
|
63
63
|
llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
|
|
64
|
-
llama_cloud/resources/pipelines/client.py,sha256=
|
|
64
|
+
llama_cloud/resources/pipelines/client.py,sha256=hecRBRZYiEbwKoM1-HEdilN_SSikkoqUu_mTDkg_Lq0,136403
|
|
65
65
|
llama_cloud/resources/pipelines/types/__init__.py,sha256=C68NQ5QzA0dFXf9oePFFGmV1vn96jcAp-QAznSgoRYQ,1375
|
|
66
66
|
llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
|
|
67
67
|
llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
|
|
68
68
|
llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256=KbkyULMv-qeS3qRd31ia6pd5rOdypS0o2UL42NRcA7E,321
|
|
69
69
|
llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py,sha256=hZWXYlTib0af85ECcerC4xD-bUQe8rG3Q6G1jFTMQcI,228
|
|
70
70
|
llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
71
|
-
llama_cloud/resources/projects/client.py,sha256=
|
|
71
|
+
llama_cloud/resources/projects/client.py,sha256=PF36iWtSa5amUt3q56YwLypOZjclIXSubCRv9NttpLs,25404
|
|
72
72
|
llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
|
|
73
|
-
llama_cloud/resources/reports/client.py,sha256=
|
|
73
|
+
llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA3vw-liRKBg,46736
|
|
74
74
|
llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
|
|
75
75
|
llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
|
|
76
76
|
llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
77
|
-
llama_cloud/resources/retrievers/client.py,sha256=
|
|
77
|
+
llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
|
|
78
78
|
llama_cloud/resources/users/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
79
79
|
llama_cloud/resources/users/client.py,sha256=A2s8e2syQHkkSwPz-Lrt_Zxp1K-8nqJqj5EafE6NWYs,5545
|
|
80
|
-
llama_cloud/types/__init__.py,sha256=
|
|
80
|
+
llama_cloud/types/__init__.py,sha256=SgGBOvabnZFRvnjCtoQRQxHyPm1QjWEjDRMXcg3AgEQ,34520
|
|
81
81
|
llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
|
|
82
82
|
llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
|
|
83
83
|
llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
|
|
@@ -170,9 +170,10 @@ llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKd
|
|
|
170
170
|
llama_cloud/types/extract_config.py,sha256=FqHNQ7a7Jlyb8Ulsh96SmSfykf2PJDy8CerJ-git5io,2527
|
|
171
171
|
llama_cloud/types/extract_config_priority.py,sha256=btl5lxl25Ve6_lTbQzQyjOKle8XoY0r16lk3364c3uw,795
|
|
172
172
|
llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
|
|
173
|
-
llama_cloud/types/extract_job_create.py,sha256=
|
|
173
|
+
llama_cloud/types/extract_job_create.py,sha256=5CcKnYprImF0wEqUJDqi6flAIJ0rzOWxmrCvtl_b8WM,1802
|
|
174
174
|
llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
|
|
175
175
|
llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
|
|
176
|
+
llama_cloud/types/extract_job_create_priority.py,sha256=_Qdc-ScGUcsgb0pv9-Viq2JgEoDYUi0AKStlw2E4Rb4,810
|
|
176
177
|
llama_cloud/types/extract_mode.py,sha256=S7H-XcH1wvPbOPVdwG9kVnZaH1pMY-LNzAD6TjCm0mc,785
|
|
177
178
|
llama_cloud/types/extract_models.py,sha256=tx4NquIoJ4irXncqRUjnuE542nPu5jMuzy-ZaMdg3PI,1958
|
|
178
179
|
llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
|
|
@@ -194,7 +195,7 @@ llama_cloud/types/extract_state.py,sha256=TNeVAXXKZaiM2srlbQlzRSn4_TDpR4xyT_yQhJ
|
|
|
194
195
|
llama_cloud/types/extract_target.py,sha256=Gt-FNqblzcjdfq1hxsqEjWWu-HNLXdKy4w98nog52Ms,478
|
|
195
196
|
llama_cloud/types/fail_page_mode.py,sha256=n4fgPpiEB5siPoEg0Sux4COg7ElNybjshxDoUihZwRU,786
|
|
196
197
|
llama_cloud/types/failure_handling_config.py,sha256=EmAQW0qm7-JTSYFwhmIWxqkVNWym_AyAJIMEmeI9Cqc,1216
|
|
197
|
-
llama_cloud/types/file.py,sha256=
|
|
198
|
+
llama_cloud/types/file.py,sha256=sXdF-cdHL3k1-DPIxAjYpb-kNHzcOAV_earVoYITzUA,1765
|
|
198
199
|
llama_cloud/types/file_classification.py,sha256=jKzAc_3rg0Usyf3TNr-bI5HZn9zGIj9vYH90RKoDtiY,1418
|
|
199
200
|
llama_cloud/types/file_count_by_status_response.py,sha256=WuorbZvKjDs9Ql1hUiQu4gN5iCm8d6fr92KLyHpRvQU,1356
|
|
200
201
|
llama_cloud/types/file_create.py,sha256=eLUC50CzXOdAR_P2mBtX_R7kGteIVbP1V3LzuP1s0Xs,1629
|
|
@@ -229,19 +230,20 @@ llama_cloud/types/image_block.py,sha256=Bccrsm1-B2hUzObP7Oy1H7IVnurixfTpL03i-yqf
|
|
|
229
230
|
llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_WyPcYQKP_DMTqaFPY,1259
|
|
230
231
|
llama_cloud/types/input_message.py,sha256=Ym6-tX6CMWKuHfxRtyM2y16kqSS3BzHged9rFRFkX0g,1346
|
|
231
232
|
llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
|
|
232
|
-
llama_cloud/types/job_names.py,sha256=
|
|
233
|
+
llama_cloud/types/job_names.py,sha256=CR7Bc8ViDuoF-Uk57ca2166hZTRedu5kIWQ2ZqB9t54,3647
|
|
233
234
|
llama_cloud/types/job_record.py,sha256=Z6sF9AruZJo-kTRgNufAWS3WK1yaEqop6kox1GpBYy4,2219
|
|
234
235
|
llama_cloud/types/job_record_parameters.py,sha256=Oqxp5y0owPfjLc_NR7AYE8P3zM2PJo36N9olbyNl7AA,3425
|
|
235
236
|
llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
|
|
236
237
|
llama_cloud/types/l_lama_parse_transform_config.py,sha256=YQRJZvKh1Ee2FUyW_N0nqYJoW599qBgH3JCH9SH6YLo,1249
|
|
237
238
|
llama_cloud/types/legacy_parse_job_config.py,sha256=eEPExbkUi9J7lQoY0Fuc2HK_RlhPmO30cMkfjtmmizs,12832
|
|
238
239
|
llama_cloud/types/license_info_response.py,sha256=fE9vcWO8k92SBqb_wOyBu_16C61s72utA-SifEi9iBc,1192
|
|
240
|
+
llama_cloud/types/llama_extract_feature_availability.py,sha256=oHJ3OyHf2rXmZhBSQfxVNnCFOp8IMKx_28EffCIEbLU,1228
|
|
239
241
|
llama_cloud/types/llama_extract_mode_availability.py,sha256=7XelUrLe9wteCeEnP_shnb485lwKo56A2EZ66bq9HQw,1257
|
|
240
242
|
llama_cloud/types/llama_extract_mode_availability_status.py,sha256=_ildgVCsBdqOLD__qdEjcYxqgKunXhJ_VHUeqjZJX8c,566
|
|
241
243
|
llama_cloud/types/llama_extract_settings.py,sha256=YKhhyUNgqpowTdTx715Uk13GdBsxCUZLVsLi5iYQIiY,2767
|
|
242
244
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
|
|
243
245
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=-aL8fh-w2Xf4uQs_LHzb3q6LL_onLAcVzCR5yMI4qJw,1571
|
|
244
|
-
llama_cloud/types/llama_parse_parameters.py,sha256=
|
|
246
|
+
llama_cloud/types/llama_parse_parameters.py,sha256=kNpKnxuClDUYy8wO09DNKszJ_kbjx_T-s82GSdLBcNw,6552
|
|
245
247
|
llama_cloud/types/llama_parse_parameters_priority.py,sha256=EFRudtaID_s8rLKlfW8O8O9TDbpZdniIidK-xchhfRI,830
|
|
246
248
|
llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
|
|
247
249
|
llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
|
|
@@ -280,7 +282,7 @@ llama_cloud/types/paginated_response_agent_data.py,sha256=u6Y-Cq9qjGF5tskMOQChUN
|
|
|
280
282
|
llama_cloud/types/paginated_response_aggregate_group.py,sha256=1ajZLZJLU6-GuQ_PPsEVRFZ6bm9he807F_F_DmB2HlQ,1179
|
|
281
283
|
llama_cloud/types/paginated_response_classify_job.py,sha256=ABpHn-ryRS8erj02ncxshAFe2Enw5JvSZqqbZuy0nWA,1167
|
|
282
284
|
llama_cloud/types/paginated_response_quota_configuration.py,sha256=S-miK621O7V6hBB05xcFBKCwa-gBK17iTHh29Saebz8,1123
|
|
283
|
-
llama_cloud/types/parse_job_config.py,sha256=
|
|
285
|
+
llama_cloud/types/parse_job_config.py,sha256=ISGxvIIujO-51ksx_lfVJLxze-Bq_yaC8uh8KnEt2GQ,7015
|
|
284
286
|
llama_cloud/types/parse_job_config_priority.py,sha256=__-gVv1GzktVCYZVyl6zeDt0pAZwYl-mxM0xkIHPEro,800
|
|
285
287
|
llama_cloud/types/parse_plan_level.py,sha256=GBkDS19qfHseBa17EXfuTPNT4GNv5alyPrWEvWji3GY,528
|
|
286
288
|
llama_cloud/types/parser_languages.py,sha256=Ps3IlaSt6tyxEI657N3-vZL96r2puk8wsf31cWnO-SI,10840
|
|
@@ -296,9 +298,9 @@ llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6
|
|
|
296
298
|
llama_cloud/types/pg_vector_distance_method.py,sha256=U81o0ARjPR-HuFcVspHiJUrjIDJo3jLhB46vkITDu7M,1203
|
|
297
299
|
llama_cloud/types/pg_vector_hnsw_settings.py,sha256=-RE59xUgHwNEyAwRYmOQ8SHeAqkSYBfCAROw7QomxUU,1758
|
|
298
300
|
llama_cloud/types/pg_vector_vector_type.py,sha256=VwOohN566zw42UMlnuKTJopYJypsSnzWjCFmKRoU-bo,952
|
|
299
|
-
llama_cloud/types/pipeline.py,sha256=
|
|
301
|
+
llama_cloud/types/pipeline.py,sha256=p2jZnDDDmBpkawjIYltnlKPlawLAJdKisEp0Bqqr_4s,2962
|
|
300
302
|
llama_cloud/types/pipeline_configuration_hashes.py,sha256=7_MbOcPWV6iyMflJeXoo9vLzD04E5WM7YxYp4ls0jQs,1169
|
|
301
|
-
llama_cloud/types/pipeline_create.py,sha256=
|
|
303
|
+
llama_cloud/types/pipeline_create.py,sha256=3JLFQjgvFGpqfkED43kcdZMfDHLKp1b2j7Apj45M0R0,2607
|
|
302
304
|
llama_cloud/types/pipeline_create_embedding_config.py,sha256=PQqmVBFUyZXYKKBmVQF2zPsGp1L6rje6g3RtXEcdfc8,2811
|
|
303
305
|
llama_cloud/types/pipeline_create_transform_config.py,sha256=HP6tzLsw_pomK1Ye2PYCS_XDZK_TMgg22mz17_zYKFg,303
|
|
304
306
|
llama_cloud/types/pipeline_data_source.py,sha256=iKB2NgpWQTl_rNDCvnXjNyd0gzohqwfCnupzWYT_CTE,2465
|
|
@@ -308,7 +310,7 @@ llama_cloud/types/pipeline_data_source_custom_metadata_value.py,sha256=8n3r60sxM
|
|
|
308
310
|
llama_cloud/types/pipeline_data_source_status.py,sha256=BD4xoftwp9lWC8EjJTnf3boIG_AyzjLPuP4qJxGhmcc,1039
|
|
309
311
|
llama_cloud/types/pipeline_deployment.py,sha256=eVBrz032aPb2cqtIIVYT5MTHQvBNm89XazoNrRWVugo,1356
|
|
310
312
|
llama_cloud/types/pipeline_embedding_config.py,sha256=7NJzlabQLFUFsvj7fye-oKLPasaXCWJBm-XuLxy-xmQ,3112
|
|
311
|
-
llama_cloud/types/pipeline_file.py,sha256=
|
|
313
|
+
llama_cloud/types/pipeline_file.py,sha256=zoE1A4pdD7S4cgDtR_aVToQ08JDt_siUJTYsdayG8s4,2510
|
|
312
314
|
llama_cloud/types/pipeline_file_config_hash_value.py,sha256=4lvLnDpzNAHdiMkGJTTNDTu3p3H7Nxw5MR1Mzte7-_M,201
|
|
313
315
|
llama_cloud/types/pipeline_file_create.py,sha256=yoMIzWED0ktKerE48kgzInBa3d0aNGO5JjTtDTDAn4A,1310
|
|
314
316
|
llama_cloud/types/pipeline_file_create_custom_metadata_value.py,sha256=olVj5yhQFx1QqWO1Wv9d6AtL-YyYO9_OYtOfcD2ZeGY,217
|
|
@@ -366,9 +368,13 @@ llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZk
|
|
|
366
368
|
llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
|
|
367
369
|
llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
|
|
368
370
|
llama_cloud/types/role.py,sha256=4pbyLVNPleDd624cDcOhu9y1WvqC0J0gmNirTOW97iA,1342
|
|
371
|
+
llama_cloud/types/schema_generation_availability.py,sha256=42x9DCjLVRH27ZQC8bB4Atxd2rKoHoX2EZTT5S3LIlU,1111
|
|
372
|
+
llama_cloud/types/schema_generation_availability_status.py,sha256=bRU9bKidO01Zh3qZLH7tTJQSMImeqOlFDzF30Rhff7o,566
|
|
369
373
|
llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
|
|
370
374
|
llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
|
|
371
375
|
llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
|
|
376
|
+
llama_cloud/types/sparse_model_config.py,sha256=vwt0_3ncjFCtNyWsMSYRrVuoTAWsdnQCHSTUM4HK-Lc,1529
|
|
377
|
+
llama_cloud/types/sparse_model_type.py,sha256=vmjOS3tSqopsvxWqw3keeIL4kgskJv6TJL-Gw_qQQ5s,933
|
|
372
378
|
llama_cloud/types/src_app_schema_chat_chat_message.py,sha256=ddMQXZybeExPVFMNe8FWghyXXWktsujpZ_0Xmou3Zz8,1596
|
|
373
379
|
llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
|
|
374
380
|
llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
|
|
@@ -396,9 +402,9 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
|
|
|
396
402
|
llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
|
|
397
403
|
llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
|
|
398
404
|
llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
|
|
399
|
-
llama_cloud/types/webhook_configuration.py,sha256=
|
|
405
|
+
llama_cloud/types/webhook_configuration.py,sha256=E0QIuApBLlFGgdsy5VjGIkodclJvAxSO8y8n3DsGHrg,1398
|
|
400
406
|
llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=OL3moFO_6hsKZYSBQBsSHmWA0NgLcLJgBPZfABwT60c,2544
|
|
401
|
-
llama_cloud-0.1.
|
|
402
|
-
llama_cloud-0.1.
|
|
403
|
-
llama_cloud-0.1.
|
|
404
|
-
llama_cloud-0.1.
|
|
407
|
+
llama_cloud-0.1.40.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
408
|
+
llama_cloud-0.1.40.dist-info/METADATA,sha256=ZLGLV8Q5nIzWMNiU1KpntEcxbweSwnu6FeAC0B_njDo,2706
|
|
409
|
+
llama_cloud-0.1.40.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
410
|
+
llama_cloud-0.1.40.dist-info/RECORD,,
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: llama-cloud
|
|
3
|
-
Version: 0.1.38
|
|
4
|
-
Summary:
|
|
5
|
-
License: MIT
|
|
6
|
-
Author: Logan Markewich
|
|
7
|
-
Author-email: logan@runllama.ai
|
|
8
|
-
Requires-Python: >=3.8,<4
|
|
9
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
-
Requires-Dist: certifi (>=2024.7.4)
|
|
17
|
-
Requires-Dist: httpx (>=0.20.0)
|
|
18
|
-
Requires-Dist: pydantic (>=1.10)
|
|
19
|
-
Description-Content-Type: text/markdown
|
|
20
|
-
|
|
21
|
-
# LlamaIndex Python Client
|
|
22
|
-
|
|
23
|
-
This client is auto-generated using [Fern](https://buildwithfern.com/docs/intro)
|
|
24
|
-
|
|
25
|
-
To publish:
|
|
26
|
-
- update the version in `pyproject.toml`
|
|
27
|
-
- run `poetry publish --build`
|
|
28
|
-
|
|
29
|
-
Setup credentials:
|
|
30
|
-
- run `poetry config pypi-token.pypi <my-token>`
|
|
31
|
-
- Get token form PyPi once logged in with credentials in [1Password](https://start.1password.com/open/i?a=32SA66TZ3JCRXOCMASLSDCT5TI&v=lhv7hvb5o46cwo257c3hviqkle&i=yvslwei7jtf6tgqamzcdantqi4&h=llamaindex.1password.com)
|
|
32
|
-
|
|
File without changes
|