llama-cloud 0.1.32__py3-none-any.whl → 0.1.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -37,6 +37,8 @@ from .types import (
37
37
  ChatAppResponse,
38
38
  ChatData,
39
39
  ChunkMode,
40
+ ClassificationResult,
41
+ ClassifyResponse,
40
42
  CloudAzStorageBlobDataSource,
41
43
  CloudAzureAiSearchVectorStore,
42
44
  CloudBoxDataSource,
@@ -73,6 +75,7 @@ from .types import (
73
75
  DataSourceCreateComponent,
74
76
  DataSourceCreateCustomMetadataValue,
75
77
  DataSourceCustomMetadataValue,
78
+ DataSourceReaderVersionMetadata,
76
79
  DataSourceUpdateDispatcherConfig,
77
80
  DeleteParams,
78
81
  DocumentBlock,
@@ -379,15 +382,16 @@ from .resources import (
379
382
  RetrievalParamsSearchFiltersInferenceSchemaValue,
380
383
  UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
381
384
  admin,
385
+ agent_deployments,
382
386
  beta,
383
387
  chat_apps,
388
+ classifier,
384
389
  data_sinks,
385
390
  data_sources,
386
391
  embedding_model_configs,
387
392
  evals,
388
393
  files,
389
394
  jobs,
390
- llama_apps,
391
395
  llama_extract,
392
396
  organizations,
393
397
  parsing,
@@ -435,6 +439,8 @@ __all__ = [
435
439
  "ChatAppResponse",
436
440
  "ChatData",
437
441
  "ChunkMode",
442
+ "ClassificationResult",
443
+ "ClassifyResponse",
438
444
  "CloudAzStorageBlobDataSource",
439
445
  "CloudAzureAiSearchVectorStore",
440
446
  "CloudBoxDataSource",
@@ -472,6 +478,7 @@ __all__ = [
472
478
  "DataSourceCreateComponent",
473
479
  "DataSourceCreateCustomMetadataValue",
474
480
  "DataSourceCustomMetadataValue",
481
+ "DataSourceReaderVersionMetadata",
475
482
  "DataSourceUpdateComponent",
476
483
  "DataSourceUpdateCustomMetadataValue",
477
484
  "DataSourceUpdateDispatcherConfig",
@@ -776,15 +783,16 @@ __all__ = [
776
783
  "WebhookConfiguration",
777
784
  "WebhookConfigurationWebhookEventsItem",
778
785
  "admin",
786
+ "agent_deployments",
779
787
  "beta",
780
788
  "chat_apps",
789
+ "classifier",
781
790
  "data_sinks",
782
791
  "data_sources",
783
792
  "embedding_model_configs",
784
793
  "evals",
785
794
  "files",
786
795
  "jobs",
787
- "llama_apps",
788
796
  "llama_extract",
789
797
  "organizations",
790
798
  "parsing",
llama_cloud/client.py CHANGED
@@ -7,15 +7,16 @@ import httpx
7
7
  from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
8
  from .environment import LlamaCloudEnvironment
9
9
  from .resources.admin.client import AdminClient, AsyncAdminClient
10
+ from .resources.agent_deployments.client import AgentDeploymentsClient, AsyncAgentDeploymentsClient
10
11
  from .resources.beta.client import AsyncBetaClient, BetaClient
11
12
  from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
13
+ from .resources.classifier.client import AsyncClassifierClient, ClassifierClient
12
14
  from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
13
15
  from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
14
16
  from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigsClient, EmbeddingModelConfigsClient
15
17
  from .resources.evals.client import AsyncEvalsClient, EvalsClient
16
18
  from .resources.files.client import AsyncFilesClient, FilesClient
17
19
  from .resources.jobs.client import AsyncJobsClient, JobsClient
18
- from .resources.llama_apps.client import AsyncLlamaAppsClient, LlamaAppsClient
19
20
  from .resources.llama_extract.client import AsyncLlamaExtractClient, LlamaExtractClient
20
21
  from .resources.organizations.client import AsyncOrganizationsClient, OrganizationsClient
21
22
  from .resources.parsing.client import AsyncParsingClient, ParsingClient
@@ -52,7 +53,8 @@ class LlamaCloud:
52
53
  self.evals = EvalsClient(client_wrapper=self._client_wrapper)
53
54
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
54
55
  self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
55
- self.llama_apps = LlamaAppsClient(client_wrapper=self._client_wrapper)
56
+ self.agent_deployments = AgentDeploymentsClient(client_wrapper=self._client_wrapper)
57
+ self.classifier = ClassifierClient(client_wrapper=self._client_wrapper)
56
58
  self.admin = AdminClient(client_wrapper=self._client_wrapper)
57
59
  self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
58
60
  self.reports = ReportsClient(client_wrapper=self._client_wrapper)
@@ -86,7 +88,8 @@ class AsyncLlamaCloud:
86
88
  self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
87
89
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
88
90
  self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
89
- self.llama_apps = AsyncLlamaAppsClient(client_wrapper=self._client_wrapper)
91
+ self.agent_deployments = AsyncAgentDeploymentsClient(client_wrapper=self._client_wrapper)
92
+ self.classifier = AsyncClassifierClient(client_wrapper=self._client_wrapper)
90
93
  self.admin = AsyncAdminClient(client_wrapper=self._client_wrapper)
91
94
  self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
92
95
  self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
@@ -2,15 +2,16 @@
2
2
 
3
3
  from . import (
4
4
  admin,
5
+ agent_deployments,
5
6
  beta,
6
7
  chat_apps,
8
+ classifier,
7
9
  data_sinks,
8
10
  data_sources,
9
11
  embedding_model_configs,
10
12
  evals,
11
13
  files,
12
14
  jobs,
13
- llama_apps,
14
15
  llama_extract,
15
16
  organizations,
16
17
  parsing,
@@ -93,15 +94,16 @@ __all__ = [
93
94
  "RetrievalParamsSearchFiltersInferenceSchemaValue",
94
95
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
95
96
  "admin",
97
+ "agent_deployments",
96
98
  "beta",
97
99
  "chat_apps",
100
+ "classifier",
98
101
  "data_sinks",
99
102
  "data_sources",
100
103
  "embedding_model_configs",
101
104
  "evals",
102
105
  "files",
103
106
  "jobs",
104
- "llama_apps",
105
107
  "llama_extract",
106
108
  "organizations",
107
109
  "parsing",
@@ -18,7 +18,7 @@ except ImportError:
18
18
  import pydantic # type: ignore
19
19
 
20
20
 
21
- class LlamaAppsClient:
21
+ class AgentDeploymentsClient:
22
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
23
23
  self._client_wrapper = client_wrapper
24
24
 
@@ -34,7 +34,7 @@ class LlamaAppsClient:
34
34
  client = LlamaCloud(
35
35
  token="YOUR_TOKEN",
36
36
  )
37
- client.llama_apps.list_deployments(
37
+ client.agent_deployments.list_deployments(
38
38
  project_id="string",
39
39
  )
40
40
  """
@@ -66,7 +66,7 @@ class LlamaAppsClient:
66
66
  client = LlamaCloud(
67
67
  token="YOUR_TOKEN",
68
68
  )
69
- client.llama_apps.sync_deployments(
69
+ client.agent_deployments.sync_deployments(
70
70
  project_id="string",
71
71
  )
72
72
  """
@@ -89,7 +89,7 @@ class LlamaAppsClient:
89
89
  raise ApiError(status_code=_response.status_code, body=_response_json)
90
90
 
91
91
 
92
- class AsyncLlamaAppsClient:
92
+ class AsyncAgentDeploymentsClient:
93
93
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
94
94
  self._client_wrapper = client_wrapper
95
95
 
@@ -105,7 +105,7 @@ class AsyncLlamaAppsClient:
105
105
  client = AsyncLlamaCloud(
106
106
  token="YOUR_TOKEN",
107
107
  )
108
- await client.llama_apps.list_deployments(
108
+ await client.agent_deployments.list_deployments(
109
109
  project_id="string",
110
110
  )
111
111
  """
@@ -137,7 +137,7 @@ class AsyncLlamaAppsClient:
137
137
  client = AsyncLlamaCloud(
138
138
  token="YOUR_TOKEN",
139
139
  )
140
- await client.llama_apps.sync_deployments(
140
+ await client.agent_deployments.sync_deployments(
141
141
  project_id="string",
142
142
  )
143
143
  """
@@ -308,7 +308,7 @@ class BetaClient:
308
308
  raise ApiError(status_code=_response.status_code, body=_response.text)
309
309
  raise ApiError(status_code=_response.status_code, body=_response_json)
310
310
 
311
- def create_agent_data_api_v_1_beta_agent_data_post(
311
+ def create_agent_data(
312
312
  self, *, agent_slug: str, collection: typing.Optional[str] = OMIT, data: typing.Dict[str, typing.Any]
313
313
  ) -> AgentData:
314
314
  """
@@ -326,7 +326,7 @@ class BetaClient:
326
326
  client = LlamaCloud(
327
327
  token="YOUR_TOKEN",
328
328
  )
329
- client.beta.create_agent_data_api_v_1_beta_agent_data_post(
329
+ client.beta.create_agent_data(
330
330
  agent_slug="string",
331
331
  data={"string": {}},
332
332
  )
@@ -785,7 +785,7 @@ class AsyncBetaClient:
785
785
  raise ApiError(status_code=_response.status_code, body=_response.text)
786
786
  raise ApiError(status_code=_response.status_code, body=_response_json)
787
787
 
788
- async def create_agent_data_api_v_1_beta_agent_data_post(
788
+ async def create_agent_data(
789
789
  self, *, agent_slug: str, collection: typing.Optional[str] = OMIT, data: typing.Dict[str, typing.Any]
790
790
  ) -> AgentData:
791
791
  """
@@ -803,7 +803,7 @@ class AsyncBetaClient:
803
803
  client = AsyncLlamaCloud(
804
804
  token="YOUR_TOKEN",
805
805
  )
806
- await client.beta.create_agent_data_api_v_1_beta_agent_data_post(
806
+ await client.beta.create_agent_data(
807
807
  agent_slug="string",
808
808
  data={"string": {}},
809
809
  )
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,290 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.classify_response import ClassifyResponse
13
+ from ...types.http_validation_error import HttpValidationError
14
+
15
+ try:
16
+ import pydantic
17
+ if pydantic.__version__.startswith("1."):
18
+ raise ImportError
19
+ import pydantic.v1 as pydantic # type: ignore
20
+ except ImportError:
21
+ import pydantic # type: ignore
22
+
23
+ # this is used as the default value for optional parameters
24
+ OMIT = typing.cast(typing.Any, ...)
25
+
26
+
27
+ class ClassifierClient:
28
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
29
+ self._client_wrapper = client_wrapper
30
+
31
+ def classify_documents(
32
+ self,
33
+ *,
34
+ project_id: typing.Optional[str] = None,
35
+ organization_id: typing.Optional[str] = None,
36
+ rules_json: str,
37
+ files: typing.Optional[typing.List[str]] = OMIT,
38
+ file_ids: typing.Optional[str] = OMIT,
39
+ matching_threshold: typing.Optional[float] = OMIT,
40
+ enable_metadata_heuristic: typing.Optional[bool] = OMIT,
41
+ ) -> ClassifyResponse:
42
+ """
43
+ **[BETA]** Classify documents based on provided rules - simplified classification system.
44
+
45
+ **This is a Beta feature** - API may change based on user feedback.
46
+
47
+ This endpoint supports:
48
+
49
+ - Classifying new uploaded files
50
+ - Classifying existing files by ID
51
+ - Both new files and existing file IDs in one request
52
+
53
+ ## v0 Features:
54
+
55
+ - **Simplified Rules**: Only `type` and `description` fields needed
56
+ - **Matching Threshold**: Confidence-based classification with configurable threshold
57
+ - **Smart Classification**: Filename heuristics + LLM content analysis
58
+ - **Document Type Filtering**: Automatically filters out non-document file types
59
+ - **Fast Processing**: Uses LlamaParse fast mode + GPT-4.1-nano
60
+ - **Optimized Performance**: Parses each file only once for all rules
61
+
62
+ ## Simplified Scoring Logic:
63
+
64
+ 1. **Evaluate All Rules**: Compare document against all classification rules
65
+ 2. **Best Match Selection**: Return the highest scoring rule above matching_threshold
66
+ 3. **Unknown Classification**: Return as "unknown" if no rules score above threshold
67
+
68
+ This ensures optimal classification by:
69
+
70
+ - Finding the best possible match among all rules
71
+ - Avoiding false positives with confidence thresholds
72
+ - Maximizing performance with single-pass file parsing
73
+
74
+ ## Rule Format:
75
+
76
+ ```json
77
+ [
78
+ {
79
+ "type": "invoice",
80
+ "description": "contains invoice number, line items, and total amount"
81
+ },
82
+ {
83
+ "type": "receipt",
84
+ "description": "purchase receipt with transaction details and payment info"
85
+ }
86
+ ]
87
+ ```
88
+
89
+ ## Classification Process:
90
+
91
+ 1. **Metadata Heuristics** (configurable via API):
92
+ - **Document Type Filter**: Only process document file types (PDF, DOC, DOCX, RTF, TXT, ODT, Pages, HTML, XML, Markdown)
93
+ - **Filename Heuristics**: Check if rule type appears in filename
94
+ - **Content Analysis**: Parse document content once and use LLM for semantic matching against all rules
95
+ 2. **Result**: Returns type, confidence score, and matched rule information
96
+
97
+ ## API Parameters:
98
+
99
+ - `matching_threshold` (0.1-0.99, default: 0.6): Minimum confidence threshold for acceptable matches
100
+ - `enable_metadata_heuristic` (boolean, default: true): Enable metadata-based features
101
+
102
+ ## Supported Document Types:
103
+
104
+ **Text Documents**: pdf, doc, docx, rtf, txt, odt, pages
105
+ **Web Documents**: html, htm, xml
106
+ **Markup**: md, markdown
107
+
108
+ ## Limits (Beta):
109
+
110
+ - Maximum 100 files per request
111
+ - Maximum 10 rules per request
112
+ - Rule descriptions: 10-500 characters
113
+ - Document types: 1-50 characters (alphanumeric, hyphens, underscores)
114
+
115
+ **Beta Notice**: This API is subject to change. Please provide feedback!
116
+
117
+ Parameters:
118
+ - project_id: typing.Optional[str].
119
+
120
+ - organization_id: typing.Optional[str].
121
+
122
+ - rules_json: str. JSON string containing classifier rules
123
+
124
+ - files: typing.Optional[typing.List[str]].
125
+
126
+ - file_ids: typing.Optional[str].
127
+
128
+ - matching_threshold: typing.Optional[float].
129
+
130
+ - enable_metadata_heuristic: typing.Optional[bool].
131
+ """
132
+ _request: typing.Dict[str, typing.Any] = {"rules_json": rules_json}
133
+ if files is not OMIT:
134
+ _request["files"] = files
135
+ if file_ids is not OMIT:
136
+ _request["file_ids"] = file_ids
137
+ if matching_threshold is not OMIT:
138
+ _request["matching_threshold"] = matching_threshold
139
+ if enable_metadata_heuristic is not OMIT:
140
+ _request["enable_metadata_heuristic"] = enable_metadata_heuristic
141
+ _response = self._client_wrapper.httpx_client.request(
142
+ "POST",
143
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/classify"),
144
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
145
+ json=jsonable_encoder(_request),
146
+ headers=self._client_wrapper.get_headers(),
147
+ timeout=60,
148
+ )
149
+ if 200 <= _response.status_code < 300:
150
+ return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore
151
+ if _response.status_code == 422:
152
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
153
+ try:
154
+ _response_json = _response.json()
155
+ except JSONDecodeError:
156
+ raise ApiError(status_code=_response.status_code, body=_response.text)
157
+ raise ApiError(status_code=_response.status_code, body=_response_json)
158
+
159
+
160
+ class AsyncClassifierClient:
161
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
162
+ self._client_wrapper = client_wrapper
163
+
164
+ async def classify_documents(
165
+ self,
166
+ *,
167
+ project_id: typing.Optional[str] = None,
168
+ organization_id: typing.Optional[str] = None,
169
+ rules_json: str,
170
+ files: typing.Optional[typing.List[str]] = OMIT,
171
+ file_ids: typing.Optional[str] = OMIT,
172
+ matching_threshold: typing.Optional[float] = OMIT,
173
+ enable_metadata_heuristic: typing.Optional[bool] = OMIT,
174
+ ) -> ClassifyResponse:
175
+ """
176
+ **[BETA]** Classify documents based on provided rules - simplified classification system.
177
+
178
+ **This is a Beta feature** - API may change based on user feedback.
179
+
180
+ This endpoint supports:
181
+
182
+ - Classifying new uploaded files
183
+ - Classifying existing files by ID
184
+ - Both new files and existing file IDs in one request
185
+
186
+ ## v0 Features:
187
+
188
+ - **Simplified Rules**: Only `type` and `description` fields needed
189
+ - **Matching Threshold**: Confidence-based classification with configurable threshold
190
+ - **Smart Classification**: Filename heuristics + LLM content analysis
191
+ - **Document Type Filtering**: Automatically filters out non-document file types
192
+ - **Fast Processing**: Uses LlamaParse fast mode + GPT-4.1-nano
193
+ - **Optimized Performance**: Parses each file only once for all rules
194
+
195
+ ## Simplified Scoring Logic:
196
+
197
+ 1. **Evaluate All Rules**: Compare document against all classification rules
198
+ 2. **Best Match Selection**: Return the highest scoring rule above matching_threshold
199
+ 3. **Unknown Classification**: Return as "unknown" if no rules score above threshold
200
+
201
+ This ensures optimal classification by:
202
+
203
+ - Finding the best possible match among all rules
204
+ - Avoiding false positives with confidence thresholds
205
+ - Maximizing performance with single-pass file parsing
206
+
207
+ ## Rule Format:
208
+
209
+ ```json
210
+ [
211
+ {
212
+ "type": "invoice",
213
+ "description": "contains invoice number, line items, and total amount"
214
+ },
215
+ {
216
+ "type": "receipt",
217
+ "description": "purchase receipt with transaction details and payment info"
218
+ }
219
+ ]
220
+ ```
221
+
222
+ ## Classification Process:
223
+
224
+ 1. **Metadata Heuristics** (configurable via API):
225
+ - **Document Type Filter**: Only process document file types (PDF, DOC, DOCX, RTF, TXT, ODT, Pages, HTML, XML, Markdown)
226
+ - **Filename Heuristics**: Check if rule type appears in filename
227
+ - **Content Analysis**: Parse document content once and use LLM for semantic matching against all rules
228
+ 2. **Result**: Returns type, confidence score, and matched rule information
229
+
230
+ ## API Parameters:
231
+
232
+ - `matching_threshold` (0.1-0.99, default: 0.6): Minimum confidence threshold for acceptable matches
233
+ - `enable_metadata_heuristic` (boolean, default: true): Enable metadata-based features
234
+
235
+ ## Supported Document Types:
236
+
237
+ **Text Documents**: pdf, doc, docx, rtf, txt, odt, pages
238
+ **Web Documents**: html, htm, xml
239
+ **Markup**: md, markdown
240
+
241
+ ## Limits (Beta):
242
+
243
+ - Maximum 100 files per request
244
+ - Maximum 10 rules per request
245
+ - Rule descriptions: 10-500 characters
246
+ - Document types: 1-50 characters (alphanumeric, hyphens, underscores)
247
+
248
+ **Beta Notice**: This API is subject to change. Please provide feedback!
249
+
250
+ Parameters:
251
+ - project_id: typing.Optional[str].
252
+
253
+ - organization_id: typing.Optional[str].
254
+
255
+ - rules_json: str. JSON string containing classifier rules
256
+
257
+ - files: typing.Optional[typing.List[str]].
258
+
259
+ - file_ids: typing.Optional[str].
260
+
261
+ - matching_threshold: typing.Optional[float].
262
+
263
+ - enable_metadata_heuristic: typing.Optional[bool].
264
+ """
265
+ _request: typing.Dict[str, typing.Any] = {"rules_json": rules_json}
266
+ if files is not OMIT:
267
+ _request["files"] = files
268
+ if file_ids is not OMIT:
269
+ _request["file_ids"] = file_ids
270
+ if matching_threshold is not OMIT:
271
+ _request["matching_threshold"] = matching_threshold
272
+ if enable_metadata_heuristic is not OMIT:
273
+ _request["enable_metadata_heuristic"] = enable_metadata_heuristic
274
+ _response = await self._client_wrapper.httpx_client.request(
275
+ "POST",
276
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/classify"),
277
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
278
+ json=jsonable_encoder(_request),
279
+ headers=self._client_wrapper.get_headers(),
280
+ timeout=60,
281
+ )
282
+ if 200 <= _response.status_code < 300:
283
+ return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore
284
+ if _response.status_code == 422:
285
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
286
+ try:
287
+ _response_json = _response.json()
288
+ except JSONDecodeError:
289
+ raise ApiError(status_code=_response.status_code, body=_response.text)
290
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -298,13 +298,13 @@ class OrganizationsClient:
298
298
  raise ApiError(status_code=_response.status_code, body=_response_json)
299
299
 
300
300
  def get_organization_usage(
301
- self, organization_id: typing.Optional[str], *, get_current_invoice_total: typing.Optional[bool] = None
301
+ self, organization_id: str, *, get_current_invoice_total: typing.Optional[bool] = None
302
302
  ) -> UsageAndPlan:
303
303
  """
304
304
  Get usage for a specific organization.
305
305
 
306
306
  Parameters:
307
- - organization_id: typing.Optional[str].
307
+ - organization_id: str.
308
308
 
309
309
  - get_current_invoice_total: typing.Optional[bool].
310
310
  ---
@@ -313,7 +313,9 @@ class OrganizationsClient:
313
313
  client = LlamaCloud(
314
314
  token="YOUR_TOKEN",
315
315
  )
316
- client.organizations.get_organization_usage()
316
+ client.organizations.get_organization_usage(
317
+ organization_id="string",
318
+ )
317
319
  """
318
320
  _response = self._client_wrapper.httpx_client.request(
319
321
  "GET",
@@ -1004,13 +1006,13 @@ class AsyncOrganizationsClient:
1004
1006
  raise ApiError(status_code=_response.status_code, body=_response_json)
1005
1007
 
1006
1008
  async def get_organization_usage(
1007
- self, organization_id: typing.Optional[str], *, get_current_invoice_total: typing.Optional[bool] = None
1009
+ self, organization_id: str, *, get_current_invoice_total: typing.Optional[bool] = None
1008
1010
  ) -> UsageAndPlan:
1009
1011
  """
1010
1012
  Get usage for a specific organization.
1011
1013
 
1012
1014
  Parameters:
1013
- - organization_id: typing.Optional[str].
1015
+ - organization_id: str.
1014
1016
 
1015
1017
  - get_current_invoice_total: typing.Optional[bool].
1016
1018
  ---
@@ -1019,7 +1021,9 @@ class AsyncOrganizationsClient:
1019
1021
  client = AsyncLlamaCloud(
1020
1022
  token="YOUR_TOKEN",
1021
1023
  )
1022
- await client.organizations.get_organization_usage()
1024
+ await client.organizations.get_organization_usage(
1025
+ organization_id="string",
1026
+ )
1023
1027
  """
1024
1028
  _response = await self._client_wrapper.httpx_client.request(
1025
1029
  "GET",
@@ -40,6 +40,8 @@ from .chat_app import ChatApp
40
40
  from .chat_app_response import ChatAppResponse
41
41
  from .chat_data import ChatData
42
42
  from .chunk_mode import ChunkMode
43
+ from .classification_result import ClassificationResult
44
+ from .classify_response import ClassifyResponse
43
45
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
44
46
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
45
47
  from .cloud_box_data_source import CloudBoxDataSource
@@ -76,6 +78,7 @@ from .data_source_create import DataSourceCreate
76
78
  from .data_source_create_component import DataSourceCreateComponent
77
79
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
78
80
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
81
+ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
79
82
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
80
83
  from .delete_params import DeleteParams
81
84
  from .document_block import DocumentBlock
@@ -400,6 +403,8 @@ __all__ = [
400
403
  "ChatAppResponse",
401
404
  "ChatData",
402
405
  "ChunkMode",
406
+ "ClassificationResult",
407
+ "ClassifyResponse",
403
408
  "CloudAzStorageBlobDataSource",
404
409
  "CloudAzureAiSearchVectorStore",
405
410
  "CloudBoxDataSource",
@@ -436,6 +441,7 @@ __all__ = [
436
441
  "DataSourceCreateComponent",
437
442
  "DataSourceCreateCustomMetadataValue",
438
443
  "DataSourceCustomMetadataValue",
444
+ "DataSourceReaderVersionMetadata",
439
445
  "DataSourceUpdateDispatcherConfig",
440
446
  "DeleteParams",
441
447
  "DocumentBlock",
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ClassificationResult(pydantic.BaseModel):
18
+ """
19
+ Result of classifying a single file.
20
+
21
+ Contains the classification outcome with confidence score and matched rule info.
22
+ """
23
+
24
+ file_id: str = pydantic.Field(description="The ID of the classified file")
25
+ type: str = pydantic.Field(description="The assigned document type ('unknown' if no rules matched)")
26
+ confidence: float = pydantic.Field(description="Confidence score of the classification (0.0-1.0)")
27
+ matched_rule: typing.Optional[str]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .classification_result import ClassificationResult
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ClassifyResponse(pydantic.BaseModel):
19
+ """
20
+ Response model for the classify endpoint following AIP-132 pagination standard.
21
+
22
+ Contains classification results with pagination support and summary statistics.
23
+ """
24
+
25
+ items: typing.List[ClassificationResult] = pydantic.Field(description="The list of items.")
26
+ next_page_token: typing.Optional[str]
27
+ total_size: typing.Optional[int]
28
+ unknown_count: int = pydantic.Field(description="Number of files that couldn't be classified")
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -7,6 +7,7 @@ from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
8
  from .data_source_component import DataSourceComponent
9
9
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
10
+ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
10
11
 
11
12
  try:
12
13
  import pydantic
@@ -29,7 +30,7 @@ class DataSource(pydantic.BaseModel):
29
30
  source_type: ConfigurableDataSourceNames
30
31
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
31
32
  component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
- version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
33
+ version_metadata: typing.Optional[DataSourceReaderVersionMetadata]
33
34
  project_id: str
34
35
 
35
36
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class DataSourceReaderVersionMetadata(pydantic.BaseModel):
18
+ reader_version: typing.Optional[str]
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -32,6 +32,9 @@ class ExtractConfig(pydantic.BaseModel):
32
32
  system_prompt: typing.Optional[str]
33
33
  use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
34
34
  cite_sources: typing.Optional[bool] = pydantic.Field(description="Whether to cite sources for the extraction.")
35
+ confidence_scores: typing.Optional[bool] = pydantic.Field(
36
+ description="Whether to fetch confidence scores for the extraction."
37
+ )
35
38
  chunk_mode: typing.Optional[DocumentChunkMode] = pydantic.Field(
36
39
  description="The mode to use for chunking the document."
37
40
  )
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
+ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
8
9
  from .pipeline_data_source_component import PipelineDataSourceComponent
9
10
  from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
11
  from .pipeline_data_source_status import PipelineDataSourceStatus
@@ -30,7 +31,7 @@ class PipelineDataSource(pydantic.BaseModel):
30
31
  source_type: ConfigurableDataSourceNames
31
32
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
32
33
  component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
33
- version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
34
+ version_metadata: typing.Optional[DataSourceReaderVersionMetadata]
34
35
  project_id: str
35
36
  data_source_id: str = pydantic.Field(description="The ID of the data source.")
36
37
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
@@ -29,9 +29,6 @@ class PipelineFileUpdaterConfig(pydantic.BaseModel):
29
29
  data_source_project_file_changed: typing.Optional[bool] = pydantic.Field(
30
30
  description="Whether the data source project file has changed"
31
31
  )
32
- should_migrate_pipeline_file_to_external_file_id: typing.Optional[bool] = pydantic.Field(
33
- description="Whether to migrate the pipeline file to the external file id"
34
- )
35
32
 
36
33
  def json(self, **kwargs: typing.Any) -> str:
37
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -33,6 +33,9 @@ class StructParseConf(pydantic.BaseModel):
33
33
  struct_mode: typing.Optional[StructMode] = pydantic.Field(
34
34
  description="The struct mode to use for the structured parsing."
35
35
  )
36
+ fetch_logprobs: typing.Optional[bool] = pydantic.Field(
37
+ description="Whether to fetch logprobs for the structured parsing."
38
+ )
36
39
  handle_missing: typing.Optional[bool] = pydantic.Field(
37
40
  description="Whether to handle missing fields in the schema."
38
41
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.32
3
+ Version: 0.1.34
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,5 +1,5 @@
1
- llama_cloud/__init__.py,sha256=T-HghZZ4yA4QPgXeEvHQsmp5o8o1K2amrf7SftKYwE4,25511
2
- llama_cloud/client.py,sha256=VNO5-JE1H0zWJudlDA9GJ2N6qEKQvxN5Q5QgVNTQPSI,5893
1
+ llama_cloud/__init__.py,sha256=IYnzCBris1anU_Welfu0tW6QXK8YepHdjsC0U5HxZIY,25735
2
+ llama_cloud/client.py,sha256=6kvyLEhvgy6TJfhm3VGvbtdQsjgpi51289Q13K9WDK0,6188
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
5
5
  llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWltcsVM,1507
@@ -9,13 +9,17 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=cFMt4FZb8n6SMbRXYzYqIR-PlJbO7C-jX4iBeCym_8E,4179
12
+ llama_cloud/resources/__init__.py,sha256=oa1g-G2rd9TLuef9tVaX3wycQgTQ05zIbfP3l8umG-k,4227
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=mzA_ezCjugKNmvWCMWEF0Z0k86ErACWov1VtPV1J2tU,3678
15
+ llama_cloud/resources/agent_deployments/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
+ llama_cloud/resources/agent_deployments/client.py,sha256=3EOzOjmRs4KISgJ566enq3FCuN3YtskjO0OHqQGtkQ0,6122
15
17
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
- llama_cloud/resources/beta/client.py,sha256=uJO08z4WF3I_tVyZEu0SiwfeSx3iQaTUPZkoh6Pevs8,39144
18
+ llama_cloud/resources/beta/client.py,sha256=iDbMr3dQJd6ZMEOC_vbyZhm4m_s3VM5nwneuuMDgJeE,39028
17
19
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
18
20
  llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
21
+ llama_cloud/resources/classifier/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
22
+ llama_cloud/resources/classifier/client.py,sha256=EJyTdjuKhESP1Ew_kEOP_GUz2o1I_Zh2xnGyjJkA5iI,11804
19
23
  llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
20
24
  llama_cloud/resources/data_sinks/client.py,sha256=GpD6FhbGqkg2oUToyMG6J8hPxG_iG7W5ZJRo0qg3yzk,20639
21
25
  llama_cloud/resources/data_sinks/types/__init__.py,sha256=M1aTcufJwiEZo9B0KmYj9PfkSd6I1ooFt9tpIRGwgg8,168
@@ -39,8 +43,6 @@ llama_cloud/resources/files/types/file_create_permission_info_value.py,sha256=KP
39
43
  llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-CJf7fnbvIqE3xOI5XOrmPwLbVJLC7zpxMu8Zopk,201
40
44
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
41
45
  llama_cloud/resources/jobs/client.py,sha256=gv_N8e0lay7cjt6MCwx-Cj4FiCXKhbyCDaWbadaJpgY,6270
42
- llama_cloud/resources/llama_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
43
- llama_cloud/resources/llama_apps/client.py,sha256=snJGm761NcuTTGUuERT3DBL8w3VQCMUyfcanIsWcnqM,6080
44
46
  llama_cloud/resources/llama_extract/__init__.py,sha256=jRUugj6XARMpKZi3e2RkfTdcOSuE-Zy0IfScRLlyYMs,819
45
47
  llama_cloud/resources/llama_extract/client.py,sha256=i6m2sDv540ZrLWYcxjAbkTWPYlNtNx7CY1AhX5ol1ps,71971
46
48
  llama_cloud/resources/llama_extract/types/__init__.py,sha256=ZRBD-jg1qdXyiJKTxgH7zaadoDzuof1TYpjK4P5z4zA,1216
@@ -53,7 +55,7 @@ llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_o
53
55
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py,sha256=uMqpKJdCmUNtryS2bkQTNA1AgDlWdtsBOP31iMt3zNA,346
54
56
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py,sha256=cUS7ez5r0Vx8T7SxwLYptZMmvpT5JoDVMyn54Q6VL-g,227
55
57
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
56
- llama_cloud/resources/organizations/client.py,sha256=yJ2TYvr7tPRS_Zhdb_IbknKo8aIIRSWm-63d0nh535s,56597
58
+ llama_cloud/resources/organizations/client.py,sha256=RoN-nkN7VeRZnrrElXhaPrgQFzGMHgNY41_XpbCXP0g,56623
57
59
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
58
60
  llama_cloud/resources/parsing/client.py,sha256=EHrQKjOl_VPPbcbaXi5TSah8HBf7ooHijhMF7IEzBMg,88117
59
61
  llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
@@ -71,7 +73,7 @@ llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwH
71
73
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
72
74
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
73
75
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
74
- llama_cloud/types/__init__.py,sha256=ZYnUvMdFPye-wlq-XeyWUmhtVeLpi8c0UR0vSemiHP4,30490
76
+ llama_cloud/types/__init__.py,sha256=Ve6yUuDwvdy3w68HcO_USG_z2ulG2GcvRBUC7dUNgow,30766
75
77
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
76
78
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
77
79
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -100,6 +102,8 @@ llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA
100
102
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
101
103
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
102
104
  llama_cloud/types/chunk_mode.py,sha256=J4vqAQfQG6PWsIv1Fe_99nVsAfDbv_P81_KVsJ9AkU4,790
105
+ llama_cloud/types/classification_result.py,sha256=aRuD2xfIQQUxGsW1jFA091b4SZFTnDFDrJxv3z0kP5E,1425
106
+ llama_cloud/types/classify_response.py,sha256=qhw71pDfClb9karjfP2cmZHbRBZgm1i6pWUM7r7IF8o,1467
103
107
  llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
104
108
  llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
105
109
  llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
@@ -130,12 +134,13 @@ llama_cloud/types/data_sink.py,sha256=PeexYHHoD8WkVp9WsFtfC-AIWszcgeJUprG1bwC8Ws
130
134
  llama_cloud/types/data_sink_component.py,sha256=uvuxLY3MPDpv_bkT0y-tHSZVPRSHCkDBDHVff-036Dg,749
131
135
  llama_cloud/types/data_sink_create.py,sha256=dAaFPCwZ5oX0Fbf7ij62dzSaYnrhj3EHmnLnYnw2KgI,1360
132
136
  llama_cloud/types/data_sink_create_component.py,sha256=8QfNKSTJV_sQ0nJxlpfh0fBkMTSnQD1DTJR8ZMYaesI,755
133
- llama_cloud/types/data_source.py,sha256=4_lTRToLO4u9LYK66VygCPycrZuyct_aiovlxG5H2sE,1768
137
+ llama_cloud/types/data_source.py,sha256=QkJsQBlLt7cX0FxYuNF1w9yZw1BnNcGiQTTfMAuxiEM,1852
134
138
  llama_cloud/types/data_source_component.py,sha256=QBxAneOFe8crS0z-eFo3gd1siToQ4hYsLdfB4p3ZeVU,974
135
139
  llama_cloud/types/data_source_create.py,sha256=s0bAX_GUwiRdrL-PXS9ROrvq3xpmqbqzdMa6thqL2P4,1581
136
140
  llama_cloud/types/data_source_create_component.py,sha256=6dlkvut0gyy6JA_F4--xPHYOCHi14N6oooWOnOEugzE,980
137
141
  llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
138
142
  llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
143
+ llama_cloud/types/data_source_reader_version_metadata.py,sha256=zP2hkqne17Vg2rd-mOXw-MpOGPmz-y82lNERFpz6E3g,1007
139
144
  llama_cloud/types/data_source_update_dispatcher_config.py,sha256=Sh6HhXfEV2Z6PYhkYQucs2MxyKVpL3UPV-I4cbf--bA,1242
140
145
  llama_cloud/types/delete_params.py,sha256=1snPrd3WO9C1bKf0WdMslE2HQMF0yYLI3U7N53cmurM,1285
141
146
  llama_cloud/types/document_block.py,sha256=OYKd5M3LgJ0Cz0K0YNuVRoHz9HcUdVuf2Vcqku8fck4,1116
@@ -151,7 +156,7 @@ llama_cloud/types/embedding_model_config_update_embedding_config.py,sha256=mrXFx
151
156
  llama_cloud/types/eval_execution_params.py,sha256=ntVaJh5SMZMPL4QLUiihVjUlg2SKbrezvbMKGlrF66Q,1369
152
157
  llama_cloud/types/extract_agent.py,sha256=T98IOueut4M52Qm7hqcUOcWFFDhZ-ye0OFdXgfFGtS4,1763
153
158
  llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKdfpefeTJrr1hl0c95WRETYkM,201
154
- llama_cloud/types/extract_config.py,sha256=pYErVV6Lq4VteqO3Wxu4exCfiGnJ9_aqSuXiLuNI6JE,2194
159
+ llama_cloud/types/extract_config.py,sha256=LIkZK7kPiXMun9wrGpjJiMXVpTWmPfvZLx-u2sDWEp0,2340
155
160
  llama_cloud/types/extract_config_priority.py,sha256=btl5lxl25Ve6_lTbQzQyjOKle8XoY0r16lk3364c3uw,795
156
161
  llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
157
162
  llama_cloud/types/extract_job_create.py,sha256=yLtrh46fsK8Q2_hz8Ub3mvGriSn5BI2OjjwpWRy5YsA,1680
@@ -268,7 +273,7 @@ llama_cloud/types/pipeline_configuration_hashes.py,sha256=7_MbOcPWV6iyMflJeXoo9v
268
273
  llama_cloud/types/pipeline_create.py,sha256=PKchM5cxkidXVFv2qON0uVh5lv8aqsy5OrZvT5UzqTU,2496
269
274
  llama_cloud/types/pipeline_create_embedding_config.py,sha256=PQqmVBFUyZXYKKBmVQF2zPsGp1L6rje6g3RtXEcdfc8,2811
270
275
  llama_cloud/types/pipeline_create_transform_config.py,sha256=HP6tzLsw_pomK1Ye2PYCS_XDZK_TMgg22mz17_zYKFg,303
271
- llama_cloud/types/pipeline_data_source.py,sha256=g8coq6ohp09TtqzvB3_A8Nzery3J5knIfxGWzUtozmg,2381
276
+ llama_cloud/types/pipeline_data_source.py,sha256=iKB2NgpWQTl_rNDCvnXjNyd0gzohqwfCnupzWYT_CTE,2465
272
277
  llama_cloud/types/pipeline_data_source_component.py,sha256=pcAIb6xuRJajDVBF_a4_2USPLtZ8ve-WQvSdKKQu50Q,982
273
278
  llama_cloud/types/pipeline_data_source_create.py,sha256=wMsymqB-YGyf3jdQr-N5ODVG6v0w68EMxGBNdQXeJe0,1178
274
279
  llama_cloud/types/pipeline_data_source_custom_metadata_value.py,sha256=8n3r60sxMx4_udW0yzJZxzyWeK6L3cc2-jLGZFW4EDs,217
@@ -284,7 +289,7 @@ llama_cloud/types/pipeline_file_permission_info_value.py,sha256=a9yfg5n9po0-4ljG
284
289
  llama_cloud/types/pipeline_file_resource_info_value.py,sha256=s3uFGQNwlUEr-X4TJZkW_kMBvX3h1sXRJoYlJRvHSDc,209
285
290
  llama_cloud/types/pipeline_file_status.py,sha256=7AJOlwqZVcsk6aPF6Q-x7UzjdzdBj4FeXAZ4m35Bb5M,1003
286
291
  llama_cloud/types/pipeline_file_update_dispatcher_config.py,sha256=PiJ1brbKGyq07GmD2VouFfm_Y3KShiyhBXJkwFJsKXw,1222
287
- llama_cloud/types/pipeline_file_updater_config.py,sha256=KMHBYpH3fYDQaDVvxVgckosiWz0Dl3v5dC53Cgnmtb8,1761
292
+ llama_cloud/types/pipeline_file_updater_config.py,sha256=TFVPzCeXDBIPBOdjCmTh7KZX9bqO1NiIT48_8pTELOE,1578
288
293
  llama_cloud/types/pipeline_managed_ingestion_job_params.py,sha256=ahliOe6YnLI-upIq1v5HZd9p8xH6pPdkh2M_n_zM9TA,1180
289
294
  llama_cloud/types/pipeline_metadata_config.py,sha256=yMnPu6FnhagjuJ_rQ756WbIvVG5dzyXT1fmCYUAmCS0,1291
290
295
  llama_cloud/types/pipeline_status.py,sha256=aC340nhfuPSrFVZOH_DhgYHWe985J3WNHrwvUtjXTRA,481
@@ -334,7 +339,7 @@ llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9
334
339
  llama_cloud/types/src_app_schema_chat_chat_message.py,sha256=ddMQXZybeExPVFMNe8FWghyXXWktsujpZ_0Xmou3Zz8,1596
335
340
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
336
341
  llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
337
- llama_cloud/types/struct_parse_conf.py,sha256=WlL8y0IBvdzGsDtFUlEZLzoUODwmOWAJi0viS9unL18,2297
342
+ llama_cloud/types/struct_parse_conf.py,sha256=3QQBy8VP9JB16d4fTGK_GiU6PUALIOWCN9GYI3in6ic,2439
338
343
  llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
339
344
  llama_cloud/types/supported_llm_model_names.py,sha256=PXL0gA1lc0GJNzZHnjOscoxHpPW787A8Adh-2egAKo8,2512
340
345
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
@@ -358,7 +363,7 @@ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2B
358
363
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
359
364
  llama_cloud/types/webhook_configuration.py,sha256=_Xm15whrWoKNBuCoO5y_NunA-ByhCAYK87LnC4W-Pzg,1350
360
365
  llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=LTfOwphnoYUQYwsHGTlCxoVU_PseIRAbmQJRBdyXnbg,1519
361
- llama_cloud-0.1.32.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
362
- llama_cloud-0.1.32.dist-info/METADATA,sha256=1nAROO_4DqpEvwvY8WwhsVU5rRTnFx9K08C3-G6b7H0,1194
363
- llama_cloud-0.1.32.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
364
- llama_cloud-0.1.32.dist-info/RECORD,,
366
+ llama_cloud-0.1.34.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
367
+ llama_cloud-0.1.34.dist-info/METADATA,sha256=ppcOOqix2JsINMcHaep9YetTNGClITBAESbX30Iyqqw,1194
368
+ llama_cloud-0.1.34.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
369
+ llama_cloud-0.1.34.dist-info/RECORD,,