llama-cloud 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +4 -2
- llama_cloud/resources/pipelines/client.py +98 -22
- llama_cloud/types/__init__.py +4 -2
- llama_cloud/types/llama_parse_parameters.py +47 -0
- llama_cloud/types/{pipeline_file_status_response.py → managed_ingestion_status_response.py} +2 -8
- llama_cloud/types/pipeline.py +4 -0
- llama_cloud/types/pipeline_create.py +4 -0
- llama_cloud/types/project.py +0 -2
- {llama_cloud-0.0.5.dist-info → llama_cloud-0.0.6.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.5.dist-info → llama_cloud-0.0.6.dist-info}/RECORD +12 -11
- {llama_cloud-0.0.5.dist-info → llama_cloud-0.0.6.dist-info}/WHEEL +1 -1
- {llama_cloud-0.0.5.dist-info → llama_cloud-0.0.6.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -63,12 +63,14 @@ from .types import (
|
|
|
63
63
|
HuggingFaceInferenceApiEmbedding,
|
|
64
64
|
HuggingFaceInferenceApiEmbeddingToken,
|
|
65
65
|
JsonNodeParser,
|
|
66
|
+
LlamaParseParameters,
|
|
66
67
|
LlamaParseSupportedFileExtensions,
|
|
67
68
|
Llm,
|
|
68
69
|
LocalEval,
|
|
69
70
|
LocalEvalResults,
|
|
70
71
|
LocalEvalSets,
|
|
71
72
|
ManagedIngestionStatus,
|
|
73
|
+
ManagedIngestionStatusResponse,
|
|
72
74
|
MarkdownElementNodeParser,
|
|
73
75
|
MarkdownNodeParser,
|
|
74
76
|
MessageRole,
|
|
@@ -100,7 +102,6 @@ from .types import (
|
|
|
100
102
|
PipelineFileCreateCustomMetadataValue,
|
|
101
103
|
PipelineFileCustomMetadataValue,
|
|
102
104
|
PipelineFileResourceInfoValue,
|
|
103
|
-
PipelineFileStatusResponse,
|
|
104
105
|
PipelineType,
|
|
105
106
|
Pooling,
|
|
106
107
|
PresetRetrievalParams,
|
|
@@ -215,12 +216,14 @@ __all__ = [
|
|
|
215
216
|
"HuggingFaceInferenceApiEmbeddingToken",
|
|
216
217
|
"JsonNodeParser",
|
|
217
218
|
"LlamaCloudEnvironment",
|
|
219
|
+
"LlamaParseParameters",
|
|
218
220
|
"LlamaParseSupportedFileExtensions",
|
|
219
221
|
"Llm",
|
|
220
222
|
"LocalEval",
|
|
221
223
|
"LocalEvalResults",
|
|
222
224
|
"LocalEvalSets",
|
|
223
225
|
"ManagedIngestionStatus",
|
|
226
|
+
"ManagedIngestionStatusResponse",
|
|
224
227
|
"MarkdownElementNodeParser",
|
|
225
228
|
"MarkdownNodeParser",
|
|
226
229
|
"MessageRole",
|
|
@@ -252,7 +255,6 @@ __all__ = [
|
|
|
252
255
|
"PipelineFileCreateCustomMetadataValue",
|
|
253
256
|
"PipelineFileCustomMetadataValue",
|
|
254
257
|
"PipelineFileResourceInfoValue",
|
|
255
|
-
"PipelineFileStatusResponse",
|
|
256
258
|
"PipelineFileUpdateCustomMetadataValue",
|
|
257
259
|
"PipelineType",
|
|
258
260
|
"Pooling",
|
|
@@ -18,7 +18,8 @@ from ...types.eval_execution_params import EvalExecutionParams
|
|
|
18
18
|
from ...types.eval_execution_params_override import EvalExecutionParamsOverride
|
|
19
19
|
from ...types.eval_question_result import EvalQuestionResult
|
|
20
20
|
from ...types.http_validation_error import HttpValidationError
|
|
21
|
-
from ...types.
|
|
21
|
+
from ...types.llama_parse_parameters import LlamaParseParameters
|
|
22
|
+
from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
22
23
|
from ...types.metadata_filters import MetadataFilters
|
|
23
24
|
from ...types.pipeline import Pipeline
|
|
24
25
|
from ...types.pipeline_create import PipelineCreate
|
|
@@ -27,7 +28,6 @@ from ...types.pipeline_data_source_create import PipelineDataSourceCreate
|
|
|
27
28
|
from ...types.pipeline_deployment import PipelineDeployment
|
|
28
29
|
from ...types.pipeline_file import PipelineFile
|
|
29
30
|
from ...types.pipeline_file_create import PipelineFileCreate
|
|
30
|
-
from ...types.pipeline_file_status_response import PipelineFileStatusResponse
|
|
31
31
|
from ...types.pipeline_type import PipelineType
|
|
32
32
|
from ...types.preset_retrieval_params import PresetRetrievalParams
|
|
33
33
|
from ...types.retrieve_results import RetrieveResults
|
|
@@ -110,6 +110,7 @@ class PipelinesClient:
|
|
|
110
110
|
DataSinkCreate,
|
|
111
111
|
EvalExecutionParams,
|
|
112
112
|
FilterCondition,
|
|
113
|
+
LlamaParseParameters,
|
|
113
114
|
MetadataFilters,
|
|
114
115
|
PipelineCreate,
|
|
115
116
|
PipelineType,
|
|
@@ -136,6 +137,7 @@ class PipelinesClient:
|
|
|
136
137
|
eval_parameters=EvalExecutionParams(
|
|
137
138
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
138
139
|
),
|
|
140
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
139
141
|
name="string",
|
|
140
142
|
pipeline_type=PipelineType.PLAYGROUND,
|
|
141
143
|
),
|
|
@@ -174,6 +176,7 @@ class PipelinesClient:
|
|
|
174
176
|
DataSinkCreate,
|
|
175
177
|
EvalExecutionParams,
|
|
176
178
|
FilterCondition,
|
|
179
|
+
LlamaParseParameters,
|
|
177
180
|
MetadataFilters,
|
|
178
181
|
PipelineCreate,
|
|
179
182
|
PipelineType,
|
|
@@ -200,6 +203,7 @@ class PipelinesClient:
|
|
|
200
203
|
eval_parameters=EvalExecutionParams(
|
|
201
204
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
202
205
|
),
|
|
206
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
203
207
|
name="string",
|
|
204
208
|
pipeline_type=PipelineType.PLAYGROUND,
|
|
205
209
|
),
|
|
@@ -223,16 +227,12 @@ class PipelinesClient:
|
|
|
223
227
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
224
228
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
225
229
|
|
|
226
|
-
def get_pipeline(
|
|
227
|
-
self, pipeline_id: str, *, with_managed_ingestion_status: typing.Optional[bool] = None
|
|
228
|
-
) -> Pipeline:
|
|
230
|
+
def get_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
229
231
|
"""
|
|
230
232
|
Get a pipeline by ID for a given project.
|
|
231
233
|
|
|
232
234
|
Parameters:
|
|
233
235
|
- pipeline_id: str.
|
|
234
|
-
|
|
235
|
-
- with_managed_ingestion_status: typing.Optional[bool].
|
|
236
236
|
---
|
|
237
237
|
from llama_cloud.client import LlamaCloud
|
|
238
238
|
|
|
@@ -246,7 +246,6 @@ class PipelinesClient:
|
|
|
246
246
|
_response = self._client_wrapper.httpx_client.request(
|
|
247
247
|
"GET",
|
|
248
248
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
249
|
-
params=remove_none_from_dict({"with_managed_ingestion_status": with_managed_ingestion_status}),
|
|
250
249
|
headers=self._client_wrapper.get_headers(),
|
|
251
250
|
timeout=60,
|
|
252
251
|
)
|
|
@@ -270,6 +269,7 @@ class PipelinesClient:
|
|
|
270
269
|
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
|
|
271
270
|
eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
|
|
272
271
|
llama_parse_enabled: typing.Optional[bool] = OMIT,
|
|
272
|
+
llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
273
273
|
name: typing.Optional[str] = OMIT,
|
|
274
274
|
managed_pipeline_id: typing.Optional[str] = OMIT,
|
|
275
275
|
) -> Pipeline:
|
|
@@ -291,6 +291,8 @@ class PipelinesClient:
|
|
|
291
291
|
|
|
292
292
|
- llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
|
|
293
293
|
|
|
294
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
|
|
295
|
+
|
|
294
296
|
- name: typing.Optional[str].
|
|
295
297
|
|
|
296
298
|
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
@@ -300,6 +302,7 @@ class PipelinesClient:
|
|
|
300
302
|
DataSinkCreate,
|
|
301
303
|
EvalExecutionParams,
|
|
302
304
|
FilterCondition,
|
|
305
|
+
LlamaParseParameters,
|
|
303
306
|
MetadataFilters,
|
|
304
307
|
PresetRetrievalParams,
|
|
305
308
|
SupportedEvalLlmModelNames,
|
|
@@ -324,6 +327,7 @@ class PipelinesClient:
|
|
|
324
327
|
eval_parameters=EvalExecutionParams(
|
|
325
328
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
326
329
|
),
|
|
330
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
327
331
|
)
|
|
328
332
|
"""
|
|
329
333
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -339,6 +343,8 @@ class PipelinesClient:
|
|
|
339
343
|
_request["eval_parameters"] = eval_parameters
|
|
340
344
|
if llama_parse_enabled is not OMIT:
|
|
341
345
|
_request["llama_parse_enabled"] = llama_parse_enabled
|
|
346
|
+
if llama_parse_parameters is not OMIT:
|
|
347
|
+
_request["llama_parse_parameters"] = llama_parse_parameters
|
|
342
348
|
if name is not OMIT:
|
|
343
349
|
_request["name"] = name
|
|
344
350
|
if managed_pipeline_id is not OMIT:
|
|
@@ -392,6 +398,38 @@ class PipelinesClient:
|
|
|
392
398
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
393
399
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
394
400
|
|
|
401
|
+
def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
402
|
+
"""
|
|
403
|
+
Get the status of a pipeline by ID.
|
|
404
|
+
|
|
405
|
+
Parameters:
|
|
406
|
+
- pipeline_id: str.
|
|
407
|
+
---
|
|
408
|
+
from llama_cloud.client import LlamaCloud
|
|
409
|
+
|
|
410
|
+
client = LlamaCloud(
|
|
411
|
+
token="YOUR_TOKEN",
|
|
412
|
+
)
|
|
413
|
+
client.pipelines.get_pipeline_status(
|
|
414
|
+
pipeline_id="string",
|
|
415
|
+
)
|
|
416
|
+
"""
|
|
417
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
418
|
+
"GET",
|
|
419
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
|
|
420
|
+
headers=self._client_wrapper.get_headers(),
|
|
421
|
+
timeout=60,
|
|
422
|
+
)
|
|
423
|
+
if 200 <= _response.status_code < 300:
|
|
424
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
425
|
+
if _response.status_code == 422:
|
|
426
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
427
|
+
try:
|
|
428
|
+
_response_json = _response.json()
|
|
429
|
+
except JSONDecodeError:
|
|
430
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
431
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
432
|
+
|
|
395
433
|
def sync_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
396
434
|
"""
|
|
397
435
|
Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
|
|
@@ -676,7 +714,7 @@ class PipelinesClient:
|
|
|
676
714
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
677
715
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
678
716
|
|
|
679
|
-
def get_pipeline_file_status(self, pipeline_id: str, file_id: str) ->
|
|
717
|
+
def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
|
|
680
718
|
"""
|
|
681
719
|
Get status of a file for a pipeline.
|
|
682
720
|
|
|
@@ -704,7 +742,7 @@ class PipelinesClient:
|
|
|
704
742
|
timeout=60,
|
|
705
743
|
)
|
|
706
744
|
if 200 <= _response.status_code < 300:
|
|
707
|
-
return pydantic.parse_obj_as(
|
|
745
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
708
746
|
if _response.status_code == 422:
|
|
709
747
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
710
748
|
try:
|
|
@@ -1290,7 +1328,7 @@ class PipelinesClient:
|
|
|
1290
1328
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1291
1329
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1292
1330
|
|
|
1293
|
-
def get_pipeline_document_status(self, pipeline_id: str, document_id: str) ->
|
|
1331
|
+
def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
|
|
1294
1332
|
"""
|
|
1295
1333
|
Return a single document for a pipeline.
|
|
1296
1334
|
|
|
@@ -1319,7 +1357,7 @@ class PipelinesClient:
|
|
|
1319
1357
|
timeout=60,
|
|
1320
1358
|
)
|
|
1321
1359
|
if 200 <= _response.status_code < 300:
|
|
1322
|
-
return pydantic.parse_obj_as(
|
|
1360
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1323
1361
|
if _response.status_code == 422:
|
|
1324
1362
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1325
1363
|
try:
|
|
@@ -1394,6 +1432,7 @@ class AsyncPipelinesClient:
|
|
|
1394
1432
|
DataSinkCreate,
|
|
1395
1433
|
EvalExecutionParams,
|
|
1396
1434
|
FilterCondition,
|
|
1435
|
+
LlamaParseParameters,
|
|
1397
1436
|
MetadataFilters,
|
|
1398
1437
|
PipelineCreate,
|
|
1399
1438
|
PipelineType,
|
|
@@ -1420,6 +1459,7 @@ class AsyncPipelinesClient:
|
|
|
1420
1459
|
eval_parameters=EvalExecutionParams(
|
|
1421
1460
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
1422
1461
|
),
|
|
1462
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
1423
1463
|
name="string",
|
|
1424
1464
|
pipeline_type=PipelineType.PLAYGROUND,
|
|
1425
1465
|
),
|
|
@@ -1458,6 +1498,7 @@ class AsyncPipelinesClient:
|
|
|
1458
1498
|
DataSinkCreate,
|
|
1459
1499
|
EvalExecutionParams,
|
|
1460
1500
|
FilterCondition,
|
|
1501
|
+
LlamaParseParameters,
|
|
1461
1502
|
MetadataFilters,
|
|
1462
1503
|
PipelineCreate,
|
|
1463
1504
|
PipelineType,
|
|
@@ -1484,6 +1525,7 @@ class AsyncPipelinesClient:
|
|
|
1484
1525
|
eval_parameters=EvalExecutionParams(
|
|
1485
1526
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
1486
1527
|
),
|
|
1528
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
1487
1529
|
name="string",
|
|
1488
1530
|
pipeline_type=PipelineType.PLAYGROUND,
|
|
1489
1531
|
),
|
|
@@ -1507,16 +1549,12 @@ class AsyncPipelinesClient:
|
|
|
1507
1549
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1508
1550
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1509
1551
|
|
|
1510
|
-
async def get_pipeline(
|
|
1511
|
-
self, pipeline_id: str, *, with_managed_ingestion_status: typing.Optional[bool] = None
|
|
1512
|
-
) -> Pipeline:
|
|
1552
|
+
async def get_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
1513
1553
|
"""
|
|
1514
1554
|
Get a pipeline by ID for a given project.
|
|
1515
1555
|
|
|
1516
1556
|
Parameters:
|
|
1517
1557
|
- pipeline_id: str.
|
|
1518
|
-
|
|
1519
|
-
- with_managed_ingestion_status: typing.Optional[bool].
|
|
1520
1558
|
---
|
|
1521
1559
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1522
1560
|
|
|
@@ -1530,7 +1568,6 @@ class AsyncPipelinesClient:
|
|
|
1530
1568
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1531
1569
|
"GET",
|
|
1532
1570
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
1533
|
-
params=remove_none_from_dict({"with_managed_ingestion_status": with_managed_ingestion_status}),
|
|
1534
1571
|
headers=self._client_wrapper.get_headers(),
|
|
1535
1572
|
timeout=60,
|
|
1536
1573
|
)
|
|
@@ -1554,6 +1591,7 @@ class AsyncPipelinesClient:
|
|
|
1554
1591
|
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
|
|
1555
1592
|
eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
|
|
1556
1593
|
llama_parse_enabled: typing.Optional[bool] = OMIT,
|
|
1594
|
+
llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
1557
1595
|
name: typing.Optional[str] = OMIT,
|
|
1558
1596
|
managed_pipeline_id: typing.Optional[str] = OMIT,
|
|
1559
1597
|
) -> Pipeline:
|
|
@@ -1575,6 +1613,8 @@ class AsyncPipelinesClient:
|
|
|
1575
1613
|
|
|
1576
1614
|
- llama_parse_enabled: typing.Optional[bool]. Whether to use LlamaParse during pipeline execution.
|
|
1577
1615
|
|
|
1616
|
+
- llama_parse_parameters: typing.Optional[LlamaParseParameters]. Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
|
|
1617
|
+
|
|
1578
1618
|
- name: typing.Optional[str].
|
|
1579
1619
|
|
|
1580
1620
|
- managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
|
|
@@ -1584,6 +1624,7 @@ class AsyncPipelinesClient:
|
|
|
1584
1624
|
DataSinkCreate,
|
|
1585
1625
|
EvalExecutionParams,
|
|
1586
1626
|
FilterCondition,
|
|
1627
|
+
LlamaParseParameters,
|
|
1587
1628
|
MetadataFilters,
|
|
1588
1629
|
PresetRetrievalParams,
|
|
1589
1630
|
SupportedEvalLlmModelNames,
|
|
@@ -1608,6 +1649,7 @@ class AsyncPipelinesClient:
|
|
|
1608
1649
|
eval_parameters=EvalExecutionParams(
|
|
1609
1650
|
llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
|
|
1610
1651
|
),
|
|
1652
|
+
llama_parse_parameters=LlamaParseParameters(),
|
|
1611
1653
|
)
|
|
1612
1654
|
"""
|
|
1613
1655
|
_request: typing.Dict[str, typing.Any] = {}
|
|
@@ -1623,6 +1665,8 @@ class AsyncPipelinesClient:
|
|
|
1623
1665
|
_request["eval_parameters"] = eval_parameters
|
|
1624
1666
|
if llama_parse_enabled is not OMIT:
|
|
1625
1667
|
_request["llama_parse_enabled"] = llama_parse_enabled
|
|
1668
|
+
if llama_parse_parameters is not OMIT:
|
|
1669
|
+
_request["llama_parse_parameters"] = llama_parse_parameters
|
|
1626
1670
|
if name is not OMIT:
|
|
1627
1671
|
_request["name"] = name
|
|
1628
1672
|
if managed_pipeline_id is not OMIT:
|
|
@@ -1676,6 +1720,38 @@ class AsyncPipelinesClient:
|
|
|
1676
1720
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1677
1721
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1678
1722
|
|
|
1723
|
+
async def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
1724
|
+
"""
|
|
1725
|
+
Get the status of a pipeline by ID.
|
|
1726
|
+
|
|
1727
|
+
Parameters:
|
|
1728
|
+
- pipeline_id: str.
|
|
1729
|
+
---
|
|
1730
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1731
|
+
|
|
1732
|
+
client = AsyncLlamaCloud(
|
|
1733
|
+
token="YOUR_TOKEN",
|
|
1734
|
+
)
|
|
1735
|
+
await client.pipelines.get_pipeline_status(
|
|
1736
|
+
pipeline_id="string",
|
|
1737
|
+
)
|
|
1738
|
+
"""
|
|
1739
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1740
|
+
"GET",
|
|
1741
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
|
|
1742
|
+
headers=self._client_wrapper.get_headers(),
|
|
1743
|
+
timeout=60,
|
|
1744
|
+
)
|
|
1745
|
+
if 200 <= _response.status_code < 300:
|
|
1746
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1747
|
+
if _response.status_code == 422:
|
|
1748
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1749
|
+
try:
|
|
1750
|
+
_response_json = _response.json()
|
|
1751
|
+
except JSONDecodeError:
|
|
1752
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1753
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1754
|
+
|
|
1679
1755
|
async def sync_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
1680
1756
|
"""
|
|
1681
1757
|
Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
|
|
@@ -1962,7 +2038,7 @@ class AsyncPipelinesClient:
|
|
|
1962
2038
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1963
2039
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1964
2040
|
|
|
1965
|
-
async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) ->
|
|
2041
|
+
async def get_pipeline_file_status(self, pipeline_id: str, file_id: str) -> ManagedIngestionStatusResponse:
|
|
1966
2042
|
"""
|
|
1967
2043
|
Get status of a file for a pipeline.
|
|
1968
2044
|
|
|
@@ -1990,7 +2066,7 @@ class AsyncPipelinesClient:
|
|
|
1990
2066
|
timeout=60,
|
|
1991
2067
|
)
|
|
1992
2068
|
if 200 <= _response.status_code < 300:
|
|
1993
|
-
return pydantic.parse_obj_as(
|
|
2069
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1994
2070
|
if _response.status_code == 422:
|
|
1995
2071
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1996
2072
|
try:
|
|
@@ -2576,7 +2652,7 @@ class AsyncPipelinesClient:
|
|
|
2576
2652
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2577
2653
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2578
2654
|
|
|
2579
|
-
async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) ->
|
|
2655
|
+
async def get_pipeline_document_status(self, pipeline_id: str, document_id: str) -> ManagedIngestionStatusResponse:
|
|
2580
2656
|
"""
|
|
2581
2657
|
Return a single document for a pipeline.
|
|
2582
2658
|
|
|
@@ -2605,7 +2681,7 @@ class AsyncPipelinesClient:
|
|
|
2605
2681
|
timeout=60,
|
|
2606
2682
|
)
|
|
2607
2683
|
if 200 <= _response.status_code < 300:
|
|
2608
|
-
return pydantic.parse_obj_as(
|
|
2684
|
+
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
2609
2685
|
if _response.status_code == 422:
|
|
2610
2686
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2611
2687
|
try:
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -62,12 +62,14 @@ from .http_validation_error import HttpValidationError
|
|
|
62
62
|
from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
|
|
63
63
|
from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
|
|
64
64
|
from .json_node_parser import JsonNodeParser
|
|
65
|
+
from .llama_parse_parameters import LlamaParseParameters
|
|
65
66
|
from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
|
|
66
67
|
from .llm import Llm
|
|
67
68
|
from .local_eval import LocalEval
|
|
68
69
|
from .local_eval_results import LocalEvalResults
|
|
69
70
|
from .local_eval_sets import LocalEvalSets
|
|
70
71
|
from .managed_ingestion_status import ManagedIngestionStatus
|
|
72
|
+
from .managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
71
73
|
from .markdown_element_node_parser import MarkdownElementNodeParser
|
|
72
74
|
from .markdown_node_parser import MarkdownNodeParser
|
|
73
75
|
from .message_role import MessageRole
|
|
@@ -99,7 +101,6 @@ from .pipeline_file_create import PipelineFileCreate
|
|
|
99
101
|
from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
|
|
100
102
|
from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
|
|
101
103
|
from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
|
|
102
|
-
from .pipeline_file_status_response import PipelineFileStatusResponse
|
|
103
104
|
from .pipeline_type import PipelineType
|
|
104
105
|
from .pooling import Pooling
|
|
105
106
|
from .preset_retrieval_params import PresetRetrievalParams
|
|
@@ -187,12 +188,14 @@ __all__ = [
|
|
|
187
188
|
"HuggingFaceInferenceApiEmbedding",
|
|
188
189
|
"HuggingFaceInferenceApiEmbeddingToken",
|
|
189
190
|
"JsonNodeParser",
|
|
191
|
+
"LlamaParseParameters",
|
|
190
192
|
"LlamaParseSupportedFileExtensions",
|
|
191
193
|
"Llm",
|
|
192
194
|
"LocalEval",
|
|
193
195
|
"LocalEvalResults",
|
|
194
196
|
"LocalEvalSets",
|
|
195
197
|
"ManagedIngestionStatus",
|
|
198
|
+
"ManagedIngestionStatusResponse",
|
|
196
199
|
"MarkdownElementNodeParser",
|
|
197
200
|
"MarkdownNodeParser",
|
|
198
201
|
"MessageRole",
|
|
@@ -224,7 +227,6 @@ __all__ = [
|
|
|
224
227
|
"PipelineFileCreateCustomMetadataValue",
|
|
225
228
|
"PipelineFileCustomMetadataValue",
|
|
226
229
|
"PipelineFileResourceInfoValue",
|
|
227
|
-
"PipelineFileStatusResponse",
|
|
228
230
|
"PipelineType",
|
|
229
231
|
"Pooling",
|
|
230
232
|
"PresetRetrievalParams",
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .parser_languages import ParserLanguages
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LlamaParseParameters(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Settings that can be configured for how to use LlamaParse to parse files witin a LlamaCloud pipeline.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
languages: typing.Optional[typing.List[ParserLanguages]]
|
|
24
|
+
parsing_instruction: typing.Optional[str]
|
|
25
|
+
disable_ocr: typing.Optional[bool]
|
|
26
|
+
invalidate_cache: typing.Optional[bool]
|
|
27
|
+
do_not_cache: typing.Optional[bool]
|
|
28
|
+
fast_mode: typing.Optional[bool]
|
|
29
|
+
skip_diagonal_text: typing.Optional[bool]
|
|
30
|
+
gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
|
|
31
|
+
gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
|
|
32
|
+
do_not_unroll_columns: typing.Optional[bool]
|
|
33
|
+
page_separator: typing.Optional[str]
|
|
34
|
+
|
|
35
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
+
return super().json(**kwargs_with_defaults)
|
|
38
|
+
|
|
39
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
40
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
41
|
+
return super().dict(**kwargs_with_defaults)
|
|
42
|
+
|
|
43
|
+
class Config:
|
|
44
|
+
frozen = True
|
|
45
|
+
smart_union = True
|
|
46
|
+
allow_population_by_field_name = True
|
|
47
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -15,14 +15,8 @@ except ImportError:
|
|
|
15
15
|
import pydantic # type: ignore
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
class
|
|
19
|
-
""
|
|
20
|
-
Schema for the status of a pipeline file.
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
file_id: str = pydantic.Field(description="The ID of the file")
|
|
24
|
-
pipeline_id: str = pydantic.Field(description="The ID of the pipeline")
|
|
25
|
-
status: ManagedIngestionStatus = pydantic.Field(description="The status of the pipeline file")
|
|
18
|
+
class ManagedIngestionStatusResponse(pydantic.BaseModel):
|
|
19
|
+
status: ManagedIngestionStatus = pydantic.Field(description="Status of the ingestion.")
|
|
26
20
|
|
|
27
21
|
def json(self, **kwargs: typing.Any) -> str:
|
|
28
22
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/pipeline.py
CHANGED
|
@@ -7,6 +7,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
|
7
7
|
from .configured_transformation_item import ConfiguredTransformationItem
|
|
8
8
|
from .data_sink import DataSink
|
|
9
9
|
from .eval_execution_params import EvalExecutionParams
|
|
10
|
+
from .llama_parse_parameters import LlamaParseParameters
|
|
10
11
|
from .managed_ingestion_status import ManagedIngestionStatus
|
|
11
12
|
from .pipeline_type import PipelineType
|
|
12
13
|
from .preset_retrieval_params import PresetRetrievalParams
|
|
@@ -46,6 +47,9 @@ class Pipeline(pydantic.BaseModel):
|
|
|
46
47
|
llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
|
|
47
48
|
description="Whether to use LlamaParse during pipeline execution."
|
|
48
49
|
)
|
|
50
|
+
llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
51
|
+
description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
|
|
52
|
+
)
|
|
49
53
|
managed_ingestion_status: typing.Optional[ManagedIngestionStatus] = pydantic.Field(
|
|
50
54
|
description="Status of Managed Ingestion."
|
|
51
55
|
)
|
|
@@ -7,6 +7,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
|
7
7
|
from .configured_transformation_item import ConfiguredTransformationItem
|
|
8
8
|
from .data_sink_create import DataSinkCreate
|
|
9
9
|
from .eval_execution_params import EvalExecutionParams
|
|
10
|
+
from .llama_parse_parameters import LlamaParseParameters
|
|
10
11
|
from .pipeline_type import PipelineType
|
|
11
12
|
from .preset_retrieval_params import PresetRetrievalParams
|
|
12
13
|
|
|
@@ -42,6 +43,9 @@ class PipelineCreate(pydantic.BaseModel):
|
|
|
42
43
|
llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
|
|
43
44
|
description="Whether to use LlamaParse during pipeline execution."
|
|
44
45
|
)
|
|
46
|
+
llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
47
|
+
description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
|
|
48
|
+
)
|
|
45
49
|
name: str
|
|
46
50
|
pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
|
|
47
51
|
description="Type of pipeline. Either PLAYGROUND or MANAGED."
|
llama_cloud/types/project.py
CHANGED
|
@@ -4,7 +4,6 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .pipeline import Pipeline
|
|
8
7
|
|
|
9
8
|
try:
|
|
10
9
|
import pydantic
|
|
@@ -24,7 +23,6 @@ class Project(pydantic.BaseModel):
|
|
|
24
23
|
id: str = pydantic.Field(description="Unique identifier")
|
|
25
24
|
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
26
25
|
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
27
|
-
pipelines: typing.List[Pipeline]
|
|
28
26
|
ad_hoc_eval_dataset_id: typing.Optional[str]
|
|
29
27
|
user_id: str = pydantic.Field(description="The user ID of the project owner.")
|
|
30
28
|
is_default: typing.Optional[bool] = pydantic.Field(
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama_cloud/__init__.py,sha256=
|
|
1
|
+
llama_cloud/__init__.py,sha256=kw-9ebdu7WbO7zDdlEW2X1qhoAiPwb90AJtm-X849-I,7581
|
|
2
2
|
llama_cloud/client.py,sha256=zteEQ5dmzOW5mgEqQ-i9PBh01Ocx0LIN6jxHPy9CBlI,3786
|
|
3
3
|
llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
|
|
4
4
|
llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
@@ -32,12 +32,12 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
|
|
|
32
32
|
llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
33
33
|
llama_cloud/resources/parsing/client.py,sha256=lm02dcjE6U1BpMMIrLaJZ3Yzji-gRX4jPgZrxgN_t50,36084
|
|
34
34
|
llama_cloud/resources/pipelines/__init__.py,sha256=H7yaFIN62vjuhU3TOKzzuf8qpxZRgw1xVa-eyig-2YU,175
|
|
35
|
-
llama_cloud/resources/pipelines/client.py,sha256=
|
|
35
|
+
llama_cloud/resources/pipelines/client.py,sha256=4_6o20jUBMfSDRUExN_LAtbc-w_qA7LdroLNMBqr49w,106966
|
|
36
36
|
llama_cloud/resources/pipelines/types/__init__.py,sha256=xuT4OBPLrRfEe-E3UVdJvRjl9jTp7tNBK_YzZBb6Kj8,212
|
|
37
37
|
llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
|
|
38
38
|
llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
39
39
|
llama_cloud/resources/projects/client.py,sha256=gqjSRfpNK8rxECpHGluiTLxF8qGX2LvNIPJNujfNQ9E,46742
|
|
40
|
-
llama_cloud/types/__init__.py,sha256=
|
|
40
|
+
llama_cloud/types/__init__.py,sha256=UtfkKO9KZ03jpjMHHsW1y-VB8C3f0zKtXcLtbFZSqEI,10402
|
|
41
41
|
llama_cloud/types/azure_open_ai_embedding.py,sha256=Ne7DkOTpdwGsH2DUVIGdT5T8Nmk6J61lHGbmgG90LuY,3438
|
|
42
42
|
llama_cloud/types/base.py,sha256=cn_Zn61yLMDCMm1iZTPvKILSRlqRzOqZtSYyYBY5dIE,938
|
|
43
43
|
llama_cloud/types/base_prompt_template.py,sha256=GO9k4EDVMf3gRQIA4bVfXqgIMKnKTXhi1JlGvhqXDRY,1576
|
|
@@ -100,12 +100,14 @@ llama_cloud/types/http_validation_error.py,sha256=iOSKYv0dJGjyIq8DAeLVKNJY-GiM1b
|
|
|
100
100
|
llama_cloud/types/hugging_face_inference_api_embedding.py,sha256=_nXn3KkPnnQiuspEUsBASHJOjeGYHuDUq1eBfXr6xwg,3315
|
|
101
101
|
llama_cloud/types/hugging_face_inference_api_embedding_token.py,sha256=A7-_YryBcsP4G5uRyJ9acao3XwX5-YC3NRndTeDAPj4,144
|
|
102
102
|
llama_cloud/types/json_node_parser.py,sha256=w7U_HbyxIDTEyJCdrk4j_8IUaqVsqEkpOJ6cq-0xz0A,1577
|
|
103
|
+
llama_cloud/types/llama_parse_parameters.py,sha256=KiLSFyCAgxV9Ebk6TGYamwCWlsizj_MtFgAjQcnZENs,1732
|
|
103
104
|
llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=wapP4esgu0fSNjQ-Qs5NkZcuUwYEn4YFHKV_HH_yc_M,5519
|
|
104
105
|
llama_cloud/types/llm.py,sha256=T-Uv5OO0E6Rscpn841302jx3c7G1uo9LJkdrGlNGk30,2238
|
|
105
106
|
llama_cloud/types/local_eval.py,sha256=77NY_rq4zr0V3iB-PXE7Om6LcjRrytLbQ55f_ovAF-M,2050
|
|
106
107
|
llama_cloud/types/local_eval_results.py,sha256=G1rLE6vO2lEziHQ6bAbZvpJMTrkSYWFvsS1iyZZ44Jw,1449
|
|
107
108
|
llama_cloud/types/local_eval_sets.py,sha256=XJSSriwRvkma889pPiBQrpRakKejKOX3tWPu1TGb1ug,1181
|
|
108
109
|
llama_cloud/types/managed_ingestion_status.py,sha256=IW5WpBSofGlJfypFrl3mp4yx9Lq4eHFs-1IOl1R33dI,1128
|
|
110
|
+
llama_cloud/types/managed_ingestion_status_response.py,sha256=oqpY5Iw08offH-0xlEj-F4YN7BUJgP3gSw0EBWmjFGg,1118
|
|
109
111
|
llama_cloud/types/markdown_element_node_parser.py,sha256=N3HKe8ZVVzJvley9UxATSbXhNkgVafhJgtnyMePjMBU,2121
|
|
110
112
|
llama_cloud/types/markdown_node_parser.py,sha256=T4VNqkKmwQtItpdIC2uwfBnIGEfGQ8s6F9vR9ChW64M,1589
|
|
111
113
|
llama_cloud/types/message_role.py,sha256=38ES71HMWfKesfFqSkTBxDcqdNqJHlNYQr1pPKlxSXs,1208
|
|
@@ -124,8 +126,8 @@ llama_cloud/types/parsing_job_json_result.py,sha256=vC0FNMklitCgcB0esthMfv_RbbyF
|
|
|
124
126
|
llama_cloud/types/parsing_job_markdown_result.py,sha256=E3-CVNFH1IMyuGs_xzYfYdNgq9AdnDshA_CxOTXz_dQ,1094
|
|
125
127
|
llama_cloud/types/parsing_job_text_result.py,sha256=1QZielAWXuzPFOgr_DWshXPjmbExAAgAHKAEYVQVtJ8,1082
|
|
126
128
|
llama_cloud/types/parsing_usage.py,sha256=Wy_c-kAFADDBZgDwqNglsJv_t7vcjOm-8EY32oZEYzU,995
|
|
127
|
-
llama_cloud/types/pipeline.py,sha256=
|
|
128
|
-
llama_cloud/types/pipeline_create.py,sha256=
|
|
129
|
+
llama_cloud/types/pipeline.py,sha256=wZ68MphMPSw_tNLEErphPGnkX3te8RsxR0YbfnulwcE,3013
|
|
130
|
+
llama_cloud/types/pipeline_create.py,sha256=_8qO8PVbD6zHW4xsYEHD4TQ-LhD5YE0iWK2x8BIALs0,2833
|
|
129
131
|
llama_cloud/types/pipeline_data_source.py,sha256=A3AlRzTD7zr1y-u5O5LFESqIupbbG-fqUndQgeYj77w,2062
|
|
130
132
|
llama_cloud/types/pipeline_data_source_component.py,sha256=Pk_K0Gv7xSWe5BKCdxz82EFd6AQDvZGN-6t3zg9h8NY,265
|
|
131
133
|
llama_cloud/types/pipeline_data_source_component_one.py,sha256=9j6n_Mhp3_nGg1O-V8Xeb46vLRTRH7hJzVPShFYOMfM,690
|
|
@@ -137,12 +139,11 @@ llama_cloud/types/pipeline_file_create.py,sha256=2h7EVJk2Hez8FJ5AVqynWUpWDOkLmTO
|
|
|
137
139
|
llama_cloud/types/pipeline_file_create_custom_metadata_value.py,sha256=olVj5yhQFx1QqWO1Wv9d6AtL-YyYO9_OYtOfcD2ZeGY,217
|
|
138
140
|
llama_cloud/types/pipeline_file_custom_metadata_value.py,sha256=ClFphYDNlHxeyLF5BWxIUhs2rooS0Xtqxr_Ae8dn8zE,211
|
|
139
141
|
llama_cloud/types/pipeline_file_resource_info_value.py,sha256=s3uFGQNwlUEr-X4TJZkW_kMBvX3h1sXRJoYlJRvHSDc,209
|
|
140
|
-
llama_cloud/types/pipeline_file_status_response.py,sha256=lVFL9CdsFo_TV-vsrhrrVwnPqJzVX5nCuG71aM2qVD8,1328
|
|
141
142
|
llama_cloud/types/pipeline_type.py,sha256=tTqrhxHP5xd7W2dQGD0e5FOv886nwJssyaVlXpWrtRo,551
|
|
142
143
|
llama_cloud/types/pooling.py,sha256=5Fr6c8rx9SDWwWzEvD78suob2d79ktodUtLUAUHMbP8,651
|
|
143
144
|
llama_cloud/types/preset_retrieval_params.py,sha256=y63ynv_SUYSq2vfYHNvw7LhiUtuVkvRDVmu1Xn8RY90,1907
|
|
144
145
|
llama_cloud/types/presigned_url.py,sha256=pUOIs2hFESZCuiqMsnn7pB6dgh_XO6w7vAV4OhKrq94,1345
|
|
145
|
-
llama_cloud/types/project.py,sha256
|
|
146
|
+
llama_cloud/types/project.py,sha256=-EWRwtaBs6rPeEVH8T-3eWvM3VrGNCL4zkr3-loMiik,1523
|
|
146
147
|
llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
|
|
147
148
|
llama_cloud/types/prompt_mixin_prompts.py,sha256=HRJlfFXFDOaGjqkB4prCDuz2fgwXhUi5I5roGykjRmU,1381
|
|
148
149
|
llama_cloud/types/prompt_spec.py,sha256=dCJOp3Gn5Y7EmC3iDIH4mM_fBtCMCwCPwPRgzyDY-q0,1516
|
|
@@ -161,7 +162,7 @@ llama_cloud/types/token_text_splitter.py,sha256=Mv8xBCvMXyYuQq1KInPe65O0YYCLWxs6
|
|
|
161
162
|
llama_cloud/types/transformation_category_names.py,sha256=0xjYe-mDW9OKbTGqL5fSbTvqsfrG4LDu_stW_ubVLl4,582
|
|
162
163
|
llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
|
|
163
164
|
llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
|
|
164
|
-
llama_cloud-0.0.
|
|
165
|
-
llama_cloud-0.0.
|
|
166
|
-
llama_cloud-0.0.
|
|
167
|
-
llama_cloud-0.0.
|
|
165
|
+
llama_cloud-0.0.6.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
166
|
+
llama_cloud-0.0.6.dist-info/METADATA,sha256=mgK5dEdoILzh7V9_WLp2E2Wbie57LovcqDNXpq3Dmis,750
|
|
167
|
+
llama_cloud-0.0.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
168
|
+
llama_cloud-0.0.6.dist-info/RECORD,,
|
|
File without changes
|