llama-cloud 0.1.32__py3-none-any.whl → 0.1.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -37,6 +37,8 @@ from .types import (
37
37
  ChatAppResponse,
38
38
  ChatData,
39
39
  ChunkMode,
40
+ ClassificationResult,
41
+ ClassifyResponse,
40
42
  CloudAzStorageBlobDataSource,
41
43
  CloudAzureAiSearchVectorStore,
42
44
  CloudBoxDataSource,
@@ -379,15 +381,16 @@ from .resources import (
379
381
  RetrievalParamsSearchFiltersInferenceSchemaValue,
380
382
  UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
381
383
  admin,
384
+ agent_deployments,
382
385
  beta,
383
386
  chat_apps,
387
+ classifier,
384
388
  data_sinks,
385
389
  data_sources,
386
390
  embedding_model_configs,
387
391
  evals,
388
392
  files,
389
393
  jobs,
390
- llama_apps,
391
394
  llama_extract,
392
395
  organizations,
393
396
  parsing,
@@ -435,6 +438,8 @@ __all__ = [
435
438
  "ChatAppResponse",
436
439
  "ChatData",
437
440
  "ChunkMode",
441
+ "ClassificationResult",
442
+ "ClassifyResponse",
438
443
  "CloudAzStorageBlobDataSource",
439
444
  "CloudAzureAiSearchVectorStore",
440
445
  "CloudBoxDataSource",
@@ -776,15 +781,16 @@ __all__ = [
776
781
  "WebhookConfiguration",
777
782
  "WebhookConfigurationWebhookEventsItem",
778
783
  "admin",
784
+ "agent_deployments",
779
785
  "beta",
780
786
  "chat_apps",
787
+ "classifier",
781
788
  "data_sinks",
782
789
  "data_sources",
783
790
  "embedding_model_configs",
784
791
  "evals",
785
792
  "files",
786
793
  "jobs",
787
- "llama_apps",
788
794
  "llama_extract",
789
795
  "organizations",
790
796
  "parsing",
llama_cloud/client.py CHANGED
@@ -7,15 +7,16 @@ import httpx
7
7
  from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
8
  from .environment import LlamaCloudEnvironment
9
9
  from .resources.admin.client import AdminClient, AsyncAdminClient
10
+ from .resources.agent_deployments.client import AgentDeploymentsClient, AsyncAgentDeploymentsClient
10
11
  from .resources.beta.client import AsyncBetaClient, BetaClient
11
12
  from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
13
+ from .resources.classifier.client import AsyncClassifierClient, ClassifierClient
12
14
  from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
13
15
  from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
14
16
  from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigsClient, EmbeddingModelConfigsClient
15
17
  from .resources.evals.client import AsyncEvalsClient, EvalsClient
16
18
  from .resources.files.client import AsyncFilesClient, FilesClient
17
19
  from .resources.jobs.client import AsyncJobsClient, JobsClient
18
- from .resources.llama_apps.client import AsyncLlamaAppsClient, LlamaAppsClient
19
20
  from .resources.llama_extract.client import AsyncLlamaExtractClient, LlamaExtractClient
20
21
  from .resources.organizations.client import AsyncOrganizationsClient, OrganizationsClient
21
22
  from .resources.parsing.client import AsyncParsingClient, ParsingClient
@@ -52,7 +53,8 @@ class LlamaCloud:
52
53
  self.evals = EvalsClient(client_wrapper=self._client_wrapper)
53
54
  self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
54
55
  self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
55
- self.llama_apps = LlamaAppsClient(client_wrapper=self._client_wrapper)
56
+ self.agent_deployments = AgentDeploymentsClient(client_wrapper=self._client_wrapper)
57
+ self.classifier = ClassifierClient(client_wrapper=self._client_wrapper)
56
58
  self.admin = AdminClient(client_wrapper=self._client_wrapper)
57
59
  self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
58
60
  self.reports = ReportsClient(client_wrapper=self._client_wrapper)
@@ -86,7 +88,8 @@ class AsyncLlamaCloud:
86
88
  self.evals = AsyncEvalsClient(client_wrapper=self._client_wrapper)
87
89
  self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
88
90
  self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
89
- self.llama_apps = AsyncLlamaAppsClient(client_wrapper=self._client_wrapper)
91
+ self.agent_deployments = AsyncAgentDeploymentsClient(client_wrapper=self._client_wrapper)
92
+ self.classifier = AsyncClassifierClient(client_wrapper=self._client_wrapper)
90
93
  self.admin = AsyncAdminClient(client_wrapper=self._client_wrapper)
91
94
  self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
92
95
  self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
@@ -2,15 +2,16 @@
2
2
 
3
3
  from . import (
4
4
  admin,
5
+ agent_deployments,
5
6
  beta,
6
7
  chat_apps,
8
+ classifier,
7
9
  data_sinks,
8
10
  data_sources,
9
11
  embedding_model_configs,
10
12
  evals,
11
13
  files,
12
14
  jobs,
13
- llama_apps,
14
15
  llama_extract,
15
16
  organizations,
16
17
  parsing,
@@ -93,15 +94,16 @@ __all__ = [
93
94
  "RetrievalParamsSearchFiltersInferenceSchemaValue",
94
95
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
95
96
  "admin",
97
+ "agent_deployments",
96
98
  "beta",
97
99
  "chat_apps",
100
+ "classifier",
98
101
  "data_sinks",
99
102
  "data_sources",
100
103
  "embedding_model_configs",
101
104
  "evals",
102
105
  "files",
103
106
  "jobs",
104
- "llama_apps",
105
107
  "llama_extract",
106
108
  "organizations",
107
109
  "parsing",
@@ -18,7 +18,7 @@ except ImportError:
18
18
  import pydantic # type: ignore
19
19
 
20
20
 
21
- class LlamaAppsClient:
21
+ class AgentDeploymentsClient:
22
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
23
23
  self._client_wrapper = client_wrapper
24
24
 
@@ -34,7 +34,7 @@ class LlamaAppsClient:
34
34
  client = LlamaCloud(
35
35
  token="YOUR_TOKEN",
36
36
  )
37
- client.llama_apps.list_deployments(
37
+ client.agent_deployments.list_deployments(
38
38
  project_id="string",
39
39
  )
40
40
  """
@@ -66,7 +66,7 @@ class LlamaAppsClient:
66
66
  client = LlamaCloud(
67
67
  token="YOUR_TOKEN",
68
68
  )
69
- client.llama_apps.sync_deployments(
69
+ client.agent_deployments.sync_deployments(
70
70
  project_id="string",
71
71
  )
72
72
  """
@@ -89,7 +89,7 @@ class LlamaAppsClient:
89
89
  raise ApiError(status_code=_response.status_code, body=_response_json)
90
90
 
91
91
 
92
- class AsyncLlamaAppsClient:
92
+ class AsyncAgentDeploymentsClient:
93
93
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
94
94
  self._client_wrapper = client_wrapper
95
95
 
@@ -105,7 +105,7 @@ class AsyncLlamaAppsClient:
105
105
  client = AsyncLlamaCloud(
106
106
  token="YOUR_TOKEN",
107
107
  )
108
- await client.llama_apps.list_deployments(
108
+ await client.agent_deployments.list_deployments(
109
109
  project_id="string",
110
110
  )
111
111
  """
@@ -137,7 +137,7 @@ class AsyncLlamaAppsClient:
137
137
  client = AsyncLlamaCloud(
138
138
  token="YOUR_TOKEN",
139
139
  )
140
- await client.llama_apps.sync_deployments(
140
+ await client.agent_deployments.sync_deployments(
141
141
  project_id="string",
142
142
  )
143
143
  """
@@ -308,7 +308,7 @@ class BetaClient:
308
308
  raise ApiError(status_code=_response.status_code, body=_response.text)
309
309
  raise ApiError(status_code=_response.status_code, body=_response_json)
310
310
 
311
- def create_agent_data_api_v_1_beta_agent_data_post(
311
+ def create_agent_data(
312
312
  self, *, agent_slug: str, collection: typing.Optional[str] = OMIT, data: typing.Dict[str, typing.Any]
313
313
  ) -> AgentData:
314
314
  """
@@ -326,7 +326,7 @@ class BetaClient:
326
326
  client = LlamaCloud(
327
327
  token="YOUR_TOKEN",
328
328
  )
329
- client.beta.create_agent_data_api_v_1_beta_agent_data_post(
329
+ client.beta.create_agent_data(
330
330
  agent_slug="string",
331
331
  data={"string": {}},
332
332
  )
@@ -785,7 +785,7 @@ class AsyncBetaClient:
785
785
  raise ApiError(status_code=_response.status_code, body=_response.text)
786
786
  raise ApiError(status_code=_response.status_code, body=_response_json)
787
787
 
788
- async def create_agent_data_api_v_1_beta_agent_data_post(
788
+ async def create_agent_data(
789
789
  self, *, agent_slug: str, collection: typing.Optional[str] = OMIT, data: typing.Dict[str, typing.Any]
790
790
  ) -> AgentData:
791
791
  """
@@ -803,7 +803,7 @@ class AsyncBetaClient:
803
803
  client = AsyncLlamaCloud(
804
804
  token="YOUR_TOKEN",
805
805
  )
806
- await client.beta.create_agent_data_api_v_1_beta_agent_data_post(
806
+ await client.beta.create_agent_data(
807
807
  agent_slug="string",
808
808
  data={"string": {}},
809
809
  )
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,290 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.classify_response import ClassifyResponse
13
+ from ...types.http_validation_error import HttpValidationError
14
+
15
+ try:
16
+ import pydantic
17
+ if pydantic.__version__.startswith("1."):
18
+ raise ImportError
19
+ import pydantic.v1 as pydantic # type: ignore
20
+ except ImportError:
21
+ import pydantic # type: ignore
22
+
23
+ # this is used as the default value for optional parameters
24
+ OMIT = typing.cast(typing.Any, ...)
25
+
26
+
27
+ class ClassifierClient:
28
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
29
+ self._client_wrapper = client_wrapper
30
+
31
+ def classify_documents(
32
+ self,
33
+ *,
34
+ project_id: typing.Optional[str] = None,
35
+ organization_id: typing.Optional[str] = None,
36
+ rules_json: str,
37
+ files: typing.Optional[typing.List[str]] = OMIT,
38
+ file_ids: typing.Optional[str] = OMIT,
39
+ matching_threshold: typing.Optional[float] = OMIT,
40
+ enable_metadata_heuristic: typing.Optional[bool] = OMIT,
41
+ ) -> ClassifyResponse:
42
+ """
43
+ **[BETA]** Classify documents based on provided rules - simplified classification system.
44
+
45
+ **This is a Beta feature** - API may change based on user feedback.
46
+
47
+ This endpoint supports:
48
+
49
+ - Classifying new uploaded files
50
+ - Classifying existing files by ID
51
+ - Both new files and existing file IDs in one request
52
+
53
+ ## v0 Features:
54
+
55
+ - **Simplified Rules**: Only `type` and `description` fields needed
56
+ - **Matching Threshold**: Confidence-based classification with configurable threshold
57
+ - **Smart Classification**: Filename heuristics + LLM content analysis
58
+ - **Document Type Filtering**: Automatically filters out non-document file types
59
+ - **Fast Processing**: Uses LlamaParse fast mode + GPT-4.1-nano
60
+ - **Optimized Performance**: Parses each file only once for all rules
61
+
62
+ ## Simplified Scoring Logic:
63
+
64
+ 1. **Evaluate All Rules**: Compare document against all classification rules
65
+ 2. **Best Match Selection**: Return the highest scoring rule above matching_threshold
66
+ 3. **Unknown Classification**: Return as "unknown" if no rules score above threshold
67
+
68
+ This ensures optimal classification by:
69
+
70
+ - Finding the best possible match among all rules
71
+ - Avoiding false positives with confidence thresholds
72
+ - Maximizing performance with single-pass file parsing
73
+
74
+ ## Rule Format:
75
+
76
+ ```json
77
+ [
78
+ {
79
+ "type": "invoice",
80
+ "description": "contains invoice number, line items, and total amount"
81
+ },
82
+ {
83
+ "type": "receipt",
84
+ "description": "purchase receipt with transaction details and payment info"
85
+ }
86
+ ]
87
+ ```
88
+
89
+ ## Classification Process:
90
+
91
+ 1. **Metadata Heuristics** (configurable via API):
92
+ - **Document Type Filter**: Only process document file types (PDF, DOC, DOCX, RTF, TXT, ODT, Pages, HTML, XML, Markdown)
93
+ - **Filename Heuristics**: Check if rule type appears in filename
94
+ - **Content Analysis**: Parse document content once and use LLM for semantic matching against all rules
95
+ 2. **Result**: Returns type, confidence score, and matched rule information
96
+
97
+ ## API Parameters:
98
+
99
+ - `matching_threshold` (0.1-0.99, default: 0.6): Minimum confidence threshold for acceptable matches
100
+ - `enable_metadata_heuristic` (boolean, default: true): Enable metadata-based features
101
+
102
+ ## Supported Document Types:
103
+
104
+ **Text Documents**: pdf, doc, docx, rtf, txt, odt, pages
105
+ **Web Documents**: html, htm, xml
106
+ **Markup**: md, markdown
107
+
108
+ ## Limits (Beta):
109
+
110
+ - Maximum 100 files per request
111
+ - Maximum 10 rules per request
112
+ - Rule descriptions: 10-500 characters
113
+ - Document types: 1-50 characters (alphanumeric, hyphens, underscores)
114
+
115
+ **Beta Notice**: This API is subject to change. Please provide feedback!
116
+
117
+ Parameters:
118
+ - project_id: typing.Optional[str].
119
+
120
+ - organization_id: typing.Optional[str].
121
+
122
+ - rules_json: str. JSON string containing classifier rules
123
+
124
+ - files: typing.Optional[typing.List[str]].
125
+
126
+ - file_ids: typing.Optional[str].
127
+
128
+ - matching_threshold: typing.Optional[float].
129
+
130
+ - enable_metadata_heuristic: typing.Optional[bool].
131
+ """
132
+ _request: typing.Dict[str, typing.Any] = {"rules_json": rules_json}
133
+ if files is not OMIT:
134
+ _request["files"] = files
135
+ if file_ids is not OMIT:
136
+ _request["file_ids"] = file_ids
137
+ if matching_threshold is not OMIT:
138
+ _request["matching_threshold"] = matching_threshold
139
+ if enable_metadata_heuristic is not OMIT:
140
+ _request["enable_metadata_heuristic"] = enable_metadata_heuristic
141
+ _response = self._client_wrapper.httpx_client.request(
142
+ "POST",
143
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/classify"),
144
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
145
+ json=jsonable_encoder(_request),
146
+ headers=self._client_wrapper.get_headers(),
147
+ timeout=60,
148
+ )
149
+ if 200 <= _response.status_code < 300:
150
+ return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore
151
+ if _response.status_code == 422:
152
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
153
+ try:
154
+ _response_json = _response.json()
155
+ except JSONDecodeError:
156
+ raise ApiError(status_code=_response.status_code, body=_response.text)
157
+ raise ApiError(status_code=_response.status_code, body=_response_json)
158
+
159
+
160
+ class AsyncClassifierClient:
161
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
162
+ self._client_wrapper = client_wrapper
163
+
164
+ async def classify_documents(
165
+ self,
166
+ *,
167
+ project_id: typing.Optional[str] = None,
168
+ organization_id: typing.Optional[str] = None,
169
+ rules_json: str,
170
+ files: typing.Optional[typing.List[str]] = OMIT,
171
+ file_ids: typing.Optional[str] = OMIT,
172
+ matching_threshold: typing.Optional[float] = OMIT,
173
+ enable_metadata_heuristic: typing.Optional[bool] = OMIT,
174
+ ) -> ClassifyResponse:
175
+ """
176
+ **[BETA]** Classify documents based on provided rules - simplified classification system.
177
+
178
+ **This is a Beta feature** - API may change based on user feedback.
179
+
180
+ This endpoint supports:
181
+
182
+ - Classifying new uploaded files
183
+ - Classifying existing files by ID
184
+ - Both new files and existing file IDs in one request
185
+
186
+ ## v0 Features:
187
+
188
+ - **Simplified Rules**: Only `type` and `description` fields needed
189
+ - **Matching Threshold**: Confidence-based classification with configurable threshold
190
+ - **Smart Classification**: Filename heuristics + LLM content analysis
191
+ - **Document Type Filtering**: Automatically filters out non-document file types
192
+ - **Fast Processing**: Uses LlamaParse fast mode + GPT-4.1-nano
193
+ - **Optimized Performance**: Parses each file only once for all rules
194
+
195
+ ## Simplified Scoring Logic:
196
+
197
+ 1. **Evaluate All Rules**: Compare document against all classification rules
198
+ 2. **Best Match Selection**: Return the highest scoring rule above matching_threshold
199
+ 3. **Unknown Classification**: Return as "unknown" if no rules score above threshold
200
+
201
+ This ensures optimal classification by:
202
+
203
+ - Finding the best possible match among all rules
204
+ - Avoiding false positives with confidence thresholds
205
+ - Maximizing performance with single-pass file parsing
206
+
207
+ ## Rule Format:
208
+
209
+ ```json
210
+ [
211
+ {
212
+ "type": "invoice",
213
+ "description": "contains invoice number, line items, and total amount"
214
+ },
215
+ {
216
+ "type": "receipt",
217
+ "description": "purchase receipt with transaction details and payment info"
218
+ }
219
+ ]
220
+ ```
221
+
222
+ ## Classification Process:
223
+
224
+ 1. **Metadata Heuristics** (configurable via API):
225
+ - **Document Type Filter**: Only process document file types (PDF, DOC, DOCX, RTF, TXT, ODT, Pages, HTML, XML, Markdown)
226
+ - **Filename Heuristics**: Check if rule type appears in filename
227
+ - **Content Analysis**: Parse document content once and use LLM for semantic matching against all rules
228
+ 2. **Result**: Returns type, confidence score, and matched rule information
229
+
230
+ ## API Parameters:
231
+
232
+ - `matching_threshold` (0.1-0.99, default: 0.6): Minimum confidence threshold for acceptable matches
233
+ - `enable_metadata_heuristic` (boolean, default: true): Enable metadata-based features
234
+
235
+ ## Supported Document Types:
236
+
237
+ **Text Documents**: pdf, doc, docx, rtf, txt, odt, pages
238
+ **Web Documents**: html, htm, xml
239
+ **Markup**: md, markdown
240
+
241
+ ## Limits (Beta):
242
+
243
+ - Maximum 100 files per request
244
+ - Maximum 10 rules per request
245
+ - Rule descriptions: 10-500 characters
246
+ - Document types: 1-50 characters (alphanumeric, hyphens, underscores)
247
+
248
+ **Beta Notice**: This API is subject to change. Please provide feedback!
249
+
250
+ Parameters:
251
+ - project_id: typing.Optional[str].
252
+
253
+ - organization_id: typing.Optional[str].
254
+
255
+ - rules_json: str. JSON string containing classifier rules
256
+
257
+ - files: typing.Optional[typing.List[str]].
258
+
259
+ - file_ids: typing.Optional[str].
260
+
261
+ - matching_threshold: typing.Optional[float].
262
+
263
+ - enable_metadata_heuristic: typing.Optional[bool].
264
+ """
265
+ _request: typing.Dict[str, typing.Any] = {"rules_json": rules_json}
266
+ if files is not OMIT:
267
+ _request["files"] = files
268
+ if file_ids is not OMIT:
269
+ _request["file_ids"] = file_ids
270
+ if matching_threshold is not OMIT:
271
+ _request["matching_threshold"] = matching_threshold
272
+ if enable_metadata_heuristic is not OMIT:
273
+ _request["enable_metadata_heuristic"] = enable_metadata_heuristic
274
+ _response = await self._client_wrapper.httpx_client.request(
275
+ "POST",
276
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/classifier/classify"),
277
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
278
+ json=jsonable_encoder(_request),
279
+ headers=self._client_wrapper.get_headers(),
280
+ timeout=60,
281
+ )
282
+ if 200 <= _response.status_code < 300:
283
+ return pydantic.parse_obj_as(ClassifyResponse, _response.json()) # type: ignore
284
+ if _response.status_code == 422:
285
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
286
+ try:
287
+ _response_json = _response.json()
288
+ except JSONDecodeError:
289
+ raise ApiError(status_code=_response.status_code, body=_response.text)
290
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -40,6 +40,8 @@ from .chat_app import ChatApp
40
40
  from .chat_app_response import ChatAppResponse
41
41
  from .chat_data import ChatData
42
42
  from .chunk_mode import ChunkMode
43
+ from .classification_result import ClassificationResult
44
+ from .classify_response import ClassifyResponse
43
45
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
44
46
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
45
47
  from .cloud_box_data_source import CloudBoxDataSource
@@ -400,6 +402,8 @@ __all__ = [
400
402
  "ChatAppResponse",
401
403
  "ChatData",
402
404
  "ChunkMode",
405
+ "ClassificationResult",
406
+ "ClassifyResponse",
403
407
  "CloudAzStorageBlobDataSource",
404
408
  "CloudAzureAiSearchVectorStore",
405
409
  "CloudBoxDataSource",
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ClassificationResult(pydantic.BaseModel):
18
+ """
19
+ Result of classifying a single file.
20
+
21
+ Contains the classification outcome with confidence score and matched rule info.
22
+ """
23
+
24
+ file_id: str = pydantic.Field(description="The ID of the classified file")
25
+ type: str = pydantic.Field(description="The assigned document type ('unknown' if no rules matched)")
26
+ confidence: float = pydantic.Field(description="Confidence score of the classification (0.0-1.0)")
27
+ matched_rule: typing.Optional[str]
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .classification_result import ClassificationResult
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ClassifyResponse(pydantic.BaseModel):
19
+ """
20
+ Response model for the classify endpoint following AIP-132 pagination standard.
21
+
22
+ Contains classification results with pagination support and summary statistics.
23
+ """
24
+
25
+ items: typing.List[ClassificationResult] = pydantic.Field(description="The list of items.")
26
+ next_page_token: typing.Optional[str]
27
+ total_size: typing.Optional[int]
28
+ unknown_count: int = pydantic.Field(description="Number of files that couldn't be classified")
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -29,9 +29,6 @@ class PipelineFileUpdaterConfig(pydantic.BaseModel):
29
29
  data_source_project_file_changed: typing.Optional[bool] = pydantic.Field(
30
30
  description="Whether the data source project file has changed"
31
31
  )
32
- should_migrate_pipeline_file_to_external_file_id: typing.Optional[bool] = pydantic.Field(
33
- description="Whether to migrate the pipeline file to the external file id"
34
- )
35
32
 
36
33
  def json(self, **kwargs: typing.Any) -> str:
37
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.32
3
+ Version: 0.1.33
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,5 +1,5 @@
1
- llama_cloud/__init__.py,sha256=T-HghZZ4yA4QPgXeEvHQsmp5o8o1K2amrf7SftKYwE4,25511
2
- llama_cloud/client.py,sha256=VNO5-JE1H0zWJudlDA9GJ2N6qEKQvxN5Q5QgVNTQPSI,5893
1
+ llama_cloud/__init__.py,sha256=ox9Kw39O5VxO8WczDW0rXojbQwcL6v9bTT80745ME5I,25659
2
+ llama_cloud/client.py,sha256=6kvyLEhvgy6TJfhm3VGvbtdQsjgpi51289Q13K9WDK0,6188
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
5
5
  llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWltcsVM,1507
@@ -9,13 +9,17 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=cFMt4FZb8n6SMbRXYzYqIR-PlJbO7C-jX4iBeCym_8E,4179
12
+ llama_cloud/resources/__init__.py,sha256=oa1g-G2rd9TLuef9tVaX3wycQgTQ05zIbfP3l8umG-k,4227
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=mzA_ezCjugKNmvWCMWEF0Z0k86ErACWov1VtPV1J2tU,3678
15
+ llama_cloud/resources/agent_deployments/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
+ llama_cloud/resources/agent_deployments/client.py,sha256=3EOzOjmRs4KISgJ566enq3FCuN3YtskjO0OHqQGtkQ0,6122
15
17
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
- llama_cloud/resources/beta/client.py,sha256=uJO08z4WF3I_tVyZEu0SiwfeSx3iQaTUPZkoh6Pevs8,39144
18
+ llama_cloud/resources/beta/client.py,sha256=iDbMr3dQJd6ZMEOC_vbyZhm4m_s3VM5nwneuuMDgJeE,39028
17
19
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
18
20
  llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
21
+ llama_cloud/resources/classifier/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
22
+ llama_cloud/resources/classifier/client.py,sha256=EJyTdjuKhESP1Ew_kEOP_GUz2o1I_Zh2xnGyjJkA5iI,11804
19
23
  llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
20
24
  llama_cloud/resources/data_sinks/client.py,sha256=GpD6FhbGqkg2oUToyMG6J8hPxG_iG7W5ZJRo0qg3yzk,20639
21
25
  llama_cloud/resources/data_sinks/types/__init__.py,sha256=M1aTcufJwiEZo9B0KmYj9PfkSd6I1ooFt9tpIRGwgg8,168
@@ -39,8 +43,6 @@ llama_cloud/resources/files/types/file_create_permission_info_value.py,sha256=KP
39
43
  llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-CJf7fnbvIqE3xOI5XOrmPwLbVJLC7zpxMu8Zopk,201
40
44
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
41
45
  llama_cloud/resources/jobs/client.py,sha256=gv_N8e0lay7cjt6MCwx-Cj4FiCXKhbyCDaWbadaJpgY,6270
42
- llama_cloud/resources/llama_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
43
- llama_cloud/resources/llama_apps/client.py,sha256=snJGm761NcuTTGUuERT3DBL8w3VQCMUyfcanIsWcnqM,6080
44
46
  llama_cloud/resources/llama_extract/__init__.py,sha256=jRUugj6XARMpKZi3e2RkfTdcOSuE-Zy0IfScRLlyYMs,819
45
47
  llama_cloud/resources/llama_extract/client.py,sha256=i6m2sDv540ZrLWYcxjAbkTWPYlNtNx7CY1AhX5ol1ps,71971
46
48
  llama_cloud/resources/llama_extract/types/__init__.py,sha256=ZRBD-jg1qdXyiJKTxgH7zaadoDzuof1TYpjK4P5z4zA,1216
@@ -71,7 +73,7 @@ llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwH
71
73
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
72
74
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
73
75
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
74
- llama_cloud/types/__init__.py,sha256=ZYnUvMdFPye-wlq-XeyWUmhtVeLpi8c0UR0vSemiHP4,30490
76
+ llama_cloud/types/__init__.py,sha256=nmqOY9oVawaz9MfgPCLUGG27jC42tsgKXwji02Ig5kY,30646
75
77
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
76
78
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
77
79
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -100,6 +102,8 @@ llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA
100
102
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
101
103
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
102
104
  llama_cloud/types/chunk_mode.py,sha256=J4vqAQfQG6PWsIv1Fe_99nVsAfDbv_P81_KVsJ9AkU4,790
105
+ llama_cloud/types/classification_result.py,sha256=aRuD2xfIQQUxGsW1jFA091b4SZFTnDFDrJxv3z0kP5E,1425
106
+ llama_cloud/types/classify_response.py,sha256=qhw71pDfClb9karjfP2cmZHbRBZgm1i6pWUM7r7IF8o,1467
103
107
  llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
104
108
  llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
105
109
  llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
@@ -284,7 +288,7 @@ llama_cloud/types/pipeline_file_permission_info_value.py,sha256=a9yfg5n9po0-4ljG
284
288
  llama_cloud/types/pipeline_file_resource_info_value.py,sha256=s3uFGQNwlUEr-X4TJZkW_kMBvX3h1sXRJoYlJRvHSDc,209
285
289
  llama_cloud/types/pipeline_file_status.py,sha256=7AJOlwqZVcsk6aPF6Q-x7UzjdzdBj4FeXAZ4m35Bb5M,1003
286
290
  llama_cloud/types/pipeline_file_update_dispatcher_config.py,sha256=PiJ1brbKGyq07GmD2VouFfm_Y3KShiyhBXJkwFJsKXw,1222
287
- llama_cloud/types/pipeline_file_updater_config.py,sha256=KMHBYpH3fYDQaDVvxVgckosiWz0Dl3v5dC53Cgnmtb8,1761
291
+ llama_cloud/types/pipeline_file_updater_config.py,sha256=TFVPzCeXDBIPBOdjCmTh7KZX9bqO1NiIT48_8pTELOE,1578
288
292
  llama_cloud/types/pipeline_managed_ingestion_job_params.py,sha256=ahliOe6YnLI-upIq1v5HZd9p8xH6pPdkh2M_n_zM9TA,1180
289
293
  llama_cloud/types/pipeline_metadata_config.py,sha256=yMnPu6FnhagjuJ_rQ756WbIvVG5dzyXT1fmCYUAmCS0,1291
290
294
  llama_cloud/types/pipeline_status.py,sha256=aC340nhfuPSrFVZOH_DhgYHWe985J3WNHrwvUtjXTRA,481
@@ -358,7 +362,7 @@ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2B
358
362
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
359
363
  llama_cloud/types/webhook_configuration.py,sha256=_Xm15whrWoKNBuCoO5y_NunA-ByhCAYK87LnC4W-Pzg,1350
360
364
  llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=LTfOwphnoYUQYwsHGTlCxoVU_PseIRAbmQJRBdyXnbg,1519
361
- llama_cloud-0.1.32.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
362
- llama_cloud-0.1.32.dist-info/METADATA,sha256=1nAROO_4DqpEvwvY8WwhsVU5rRTnFx9K08C3-G6b7H0,1194
363
- llama_cloud-0.1.32.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
364
- llama_cloud-0.1.32.dist-info/RECORD,,
365
+ llama_cloud-0.1.33.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
366
+ llama_cloud-0.1.33.dist-info/METADATA,sha256=dGeTMx4aPlxYbOoSOUe7DBl18ERrZjjtRhNTfqmllMg,1194
367
+ llama_cloud-0.1.33.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
368
+ llama_cloud-0.1.33.dist-info/RECORD,,