llama-cloud 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +36 -18
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +20 -0
- llama_cloud/resources/beta/__init__.py +2 -0
- llama_cloud/resources/beta/client.py +371 -0
- llama_cloud/resources/chat_apps/client.py +4 -4
- llama_cloud/resources/embedding_model_configs/client.py +82 -22
- llama_cloud/resources/llama_extract/__init__.py +21 -0
- llama_cloud/resources/llama_extract/client.py +223 -114
- llama_cloud/resources/llama_extract/types/__init__.py +21 -0
- llama_cloud/resources/parsing/client.py +83 -29
- llama_cloud/resources/pipelines/client.py +107 -2
- llama_cloud/resources/projects/client.py +70 -0
- llama_cloud/types/__init__.py +26 -26
- llama_cloud/types/{parsing_usage.py → audio_block.py} +5 -3
- llama_cloud/types/batch.py +47 -0
- llama_cloud/types/batch_item.py +40 -0
- llama_cloud/types/{extract_agent_update.py → batch_paginated_list.py} +6 -9
- llama_cloud/types/{extract_schema_validate_request.py → batch_public_output.py} +7 -3
- llama_cloud/types/cloud_confluence_data_source.py +1 -0
- llama_cloud/types/cloud_postgres_vector_store.py +2 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/fail_page_mode.py +29 -0
- llama_cloud/types/{extract_agent_create.py → file_count_by_status_response.py} +8 -10
- llama_cloud/types/file_parse_public.py +36 -0
- llama_cloud/types/job_names.py +8 -12
- llama_cloud/types/llama_extract_settings.py +2 -2
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +13 -1
- llama_cloud/types/llama_parse_parameters.py +10 -2
- llama_cloud/types/markdown_node_parser.py +4 -0
- llama_cloud/types/message_role.py +4 -0
- llama_cloud/types/pg_vector_distance_method.py +43 -0
- llama_cloud/types/{extract_job_create_batch.py → pg_vector_hnsw_settings.py} +12 -9
- llama_cloud/types/pg_vector_vector_type.py +35 -0
- llama_cloud/types/pipeline_create.py +1 -0
- llama_cloud/types/pipeline_data_source.py +3 -0
- llama_cloud/types/pipeline_data_source_status.py +33 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/prompt_conf.py +3 -0
- llama_cloud/types/struct_parse_conf.py +4 -1
- llama_cloud/types/supported_llm_model_names.py +0 -12
- llama_cloud/types/token_text_splitter.py +3 -0
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.19.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.19.dist-info}/RECORD +55 -45
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.19.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.19.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -13,6 +13,7 @@ from .types import (
|
|
|
13
13
|
AdvancedModeTransformConfigSegmentationConfig_None,
|
|
14
14
|
AdvancedModeTransformConfigSegmentationConfig_Page,
|
|
15
15
|
AppSchemaChatChatMessage,
|
|
16
|
+
AudioBlock,
|
|
16
17
|
AutoTransformConfig,
|
|
17
18
|
AzureOpenAiEmbedding,
|
|
18
19
|
AzureOpenAiEmbeddingConfig,
|
|
@@ -21,6 +22,10 @@ from .types import (
|
|
|
21
22
|
BasePlanName,
|
|
22
23
|
BasePlanPlanFrequency,
|
|
23
24
|
BasePromptTemplate,
|
|
25
|
+
Batch,
|
|
26
|
+
BatchItem,
|
|
27
|
+
BatchPaginatedList,
|
|
28
|
+
BatchPublicOutput,
|
|
24
29
|
BedrockEmbedding,
|
|
25
30
|
BedrockEmbeddingConfig,
|
|
26
31
|
BillingPeriod,
|
|
@@ -98,19 +103,10 @@ from .types import (
|
|
|
98
103
|
EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
99
104
|
EvalExecutionParams,
|
|
100
105
|
ExtractAgent,
|
|
101
|
-
ExtractAgentCreate,
|
|
102
|
-
ExtractAgentCreateDataSchema,
|
|
103
|
-
ExtractAgentCreateDataSchemaZeroValue,
|
|
104
106
|
ExtractAgentDataSchemaValue,
|
|
105
|
-
ExtractAgentUpdate,
|
|
106
|
-
ExtractAgentUpdateDataSchema,
|
|
107
|
-
ExtractAgentUpdateDataSchemaZeroValue,
|
|
108
107
|
ExtractConfig,
|
|
109
108
|
ExtractJob,
|
|
110
109
|
ExtractJobCreate,
|
|
111
|
-
ExtractJobCreateBatch,
|
|
112
|
-
ExtractJobCreateBatchDataSchemaOverride,
|
|
113
|
-
ExtractJobCreateBatchDataSchemaOverrideZeroValue,
|
|
114
110
|
ExtractJobCreateDataSchemaOverride,
|
|
115
111
|
ExtractJobCreateDataSchemaOverrideZeroValue,
|
|
116
112
|
ExtractMode,
|
|
@@ -125,14 +121,14 @@ from .types import (
|
|
|
125
121
|
ExtractRunDataSchemaValue,
|
|
126
122
|
ExtractRunDataZeroValue,
|
|
127
123
|
ExtractRunExtractionMetadataValue,
|
|
128
|
-
ExtractSchemaValidateRequest,
|
|
129
|
-
ExtractSchemaValidateRequestDataSchema,
|
|
130
|
-
ExtractSchemaValidateRequestDataSchemaZeroValue,
|
|
131
124
|
ExtractSchemaValidateResponse,
|
|
132
125
|
ExtractSchemaValidateResponseDataSchemaValue,
|
|
133
126
|
ExtractState,
|
|
134
127
|
ExtractTarget,
|
|
128
|
+
FailPageMode,
|
|
135
129
|
File,
|
|
130
|
+
FileCountByStatusResponse,
|
|
131
|
+
FileParsePublic,
|
|
136
132
|
FilePermissionInfoValue,
|
|
137
133
|
FileResourceInfoValue,
|
|
138
134
|
FilterCondition,
|
|
@@ -154,6 +150,7 @@ from .types import (
|
|
|
154
150
|
LlamaExtractSettings,
|
|
155
151
|
LlamaIndexCoreBaseLlmsTypesChatMessage,
|
|
156
152
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
|
|
153
|
+
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
|
|
157
154
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
158
155
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
159
156
|
LlamaParseParameters,
|
|
@@ -199,9 +196,11 @@ from .types import (
|
|
|
199
196
|
ParsingJobStructuredResult,
|
|
200
197
|
ParsingJobTextResult,
|
|
201
198
|
ParsingMode,
|
|
202
|
-
ParsingUsage,
|
|
203
199
|
PartitionNames,
|
|
204
200
|
Permission,
|
|
201
|
+
PgVectorDistanceMethod,
|
|
202
|
+
PgVectorHnswSettings,
|
|
203
|
+
PgVectorVectorType,
|
|
205
204
|
Pipeline,
|
|
206
205
|
PipelineConfigurationHashes,
|
|
207
206
|
PipelineCreate,
|
|
@@ -218,6 +217,7 @@ from .types import (
|
|
|
218
217
|
PipelineDataSourceComponent,
|
|
219
218
|
PipelineDataSourceCreate,
|
|
220
219
|
PipelineDataSourceCustomMetadataValue,
|
|
220
|
+
PipelineDataSourceStatus,
|
|
221
221
|
PipelineDeployment,
|
|
222
222
|
PipelineEmbeddingConfig,
|
|
223
223
|
PipelineEmbeddingConfig_AzureEmbedding,
|
|
@@ -324,6 +324,14 @@ from .resources import (
|
|
|
324
324
|
EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
325
325
|
EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
|
|
326
326
|
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
327
|
+
ExtractAgentCreateDataSchema,
|
|
328
|
+
ExtractAgentCreateDataSchemaZeroValue,
|
|
329
|
+
ExtractAgentUpdateDataSchema,
|
|
330
|
+
ExtractAgentUpdateDataSchemaZeroValue,
|
|
331
|
+
ExtractJobCreateBatchDataSchemaOverride,
|
|
332
|
+
ExtractJobCreateBatchDataSchemaOverrideZeroValue,
|
|
333
|
+
ExtractSchemaValidateRequestDataSchema,
|
|
334
|
+
ExtractSchemaValidateRequestDataSchemaZeroValue,
|
|
327
335
|
FileCreateFromUrlResourceInfoValue,
|
|
328
336
|
FileCreatePermissionInfoValue,
|
|
329
337
|
FileCreateResourceInfoValue,
|
|
@@ -338,6 +346,7 @@ from .resources import (
|
|
|
338
346
|
PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
339
347
|
PipelineUpdateTransformConfig,
|
|
340
348
|
UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
|
|
349
|
+
beta,
|
|
341
350
|
chat_apps,
|
|
342
351
|
component_definitions,
|
|
343
352
|
data_sinks,
|
|
@@ -369,6 +378,7 @@ __all__ = [
|
|
|
369
378
|
"AdvancedModeTransformConfigSegmentationConfig_None",
|
|
370
379
|
"AdvancedModeTransformConfigSegmentationConfig_Page",
|
|
371
380
|
"AppSchemaChatChatMessage",
|
|
381
|
+
"AudioBlock",
|
|
372
382
|
"AutoTransformConfig",
|
|
373
383
|
"AzureOpenAiEmbedding",
|
|
374
384
|
"AzureOpenAiEmbeddingConfig",
|
|
@@ -377,6 +387,10 @@ __all__ = [
|
|
|
377
387
|
"BasePlanName",
|
|
378
388
|
"BasePlanPlanFrequency",
|
|
379
389
|
"BasePromptTemplate",
|
|
390
|
+
"Batch",
|
|
391
|
+
"BatchItem",
|
|
392
|
+
"BatchPaginatedList",
|
|
393
|
+
"BatchPublicOutput",
|
|
380
394
|
"BedrockEmbedding",
|
|
381
395
|
"BedrockEmbeddingConfig",
|
|
382
396
|
"BillingPeriod",
|
|
@@ -465,17 +479,14 @@ __all__ = [
|
|
|
465
479
|
"EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
466
480
|
"EvalExecutionParams",
|
|
467
481
|
"ExtractAgent",
|
|
468
|
-
"ExtractAgentCreate",
|
|
469
482
|
"ExtractAgentCreateDataSchema",
|
|
470
483
|
"ExtractAgentCreateDataSchemaZeroValue",
|
|
471
484
|
"ExtractAgentDataSchemaValue",
|
|
472
|
-
"ExtractAgentUpdate",
|
|
473
485
|
"ExtractAgentUpdateDataSchema",
|
|
474
486
|
"ExtractAgentUpdateDataSchemaZeroValue",
|
|
475
487
|
"ExtractConfig",
|
|
476
488
|
"ExtractJob",
|
|
477
489
|
"ExtractJobCreate",
|
|
478
|
-
"ExtractJobCreateBatch",
|
|
479
490
|
"ExtractJobCreateBatchDataSchemaOverride",
|
|
480
491
|
"ExtractJobCreateBatchDataSchemaOverrideZeroValue",
|
|
481
492
|
"ExtractJobCreateDataSchemaOverride",
|
|
@@ -492,17 +503,19 @@ __all__ = [
|
|
|
492
503
|
"ExtractRunDataSchemaValue",
|
|
493
504
|
"ExtractRunDataZeroValue",
|
|
494
505
|
"ExtractRunExtractionMetadataValue",
|
|
495
|
-
"ExtractSchemaValidateRequest",
|
|
496
506
|
"ExtractSchemaValidateRequestDataSchema",
|
|
497
507
|
"ExtractSchemaValidateRequestDataSchemaZeroValue",
|
|
498
508
|
"ExtractSchemaValidateResponse",
|
|
499
509
|
"ExtractSchemaValidateResponseDataSchemaValue",
|
|
500
510
|
"ExtractState",
|
|
501
511
|
"ExtractTarget",
|
|
512
|
+
"FailPageMode",
|
|
502
513
|
"File",
|
|
514
|
+
"FileCountByStatusResponse",
|
|
503
515
|
"FileCreateFromUrlResourceInfoValue",
|
|
504
516
|
"FileCreatePermissionInfoValue",
|
|
505
517
|
"FileCreateResourceInfoValue",
|
|
518
|
+
"FileParsePublic",
|
|
506
519
|
"FilePermissionInfoValue",
|
|
507
520
|
"FileResourceInfoValue",
|
|
508
521
|
"FilterCondition",
|
|
@@ -525,6 +538,7 @@ __all__ = [
|
|
|
525
538
|
"LlamaExtractSettings",
|
|
526
539
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
527
540
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
541
|
+
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
|
|
528
542
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
|
|
529
543
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
530
544
|
"LlamaParseParameters",
|
|
@@ -570,9 +584,11 @@ __all__ = [
|
|
|
570
584
|
"ParsingJobStructuredResult",
|
|
571
585
|
"ParsingJobTextResult",
|
|
572
586
|
"ParsingMode",
|
|
573
|
-
"ParsingUsage",
|
|
574
587
|
"PartitionNames",
|
|
575
588
|
"Permission",
|
|
589
|
+
"PgVectorDistanceMethod",
|
|
590
|
+
"PgVectorHnswSettings",
|
|
591
|
+
"PgVectorVectorType",
|
|
576
592
|
"Pipeline",
|
|
577
593
|
"PipelineConfigurationHashes",
|
|
578
594
|
"PipelineCreate",
|
|
@@ -589,6 +605,7 @@ __all__ = [
|
|
|
589
605
|
"PipelineDataSourceComponent",
|
|
590
606
|
"PipelineDataSourceCreate",
|
|
591
607
|
"PipelineDataSourceCustomMetadataValue",
|
|
608
|
+
"PipelineDataSourceStatus",
|
|
592
609
|
"PipelineDeployment",
|
|
593
610
|
"PipelineEmbeddingConfig",
|
|
594
611
|
"PipelineEmbeddingConfig_AzureEmbedding",
|
|
@@ -693,6 +710,7 @@ __all__ = [
|
|
|
693
710
|
"VertexAiEmbeddingConfig",
|
|
694
711
|
"VertexEmbeddingMode",
|
|
695
712
|
"VertexTextEmbedding",
|
|
713
|
+
"beta",
|
|
696
714
|
"chat_apps",
|
|
697
715
|
"component_definitions",
|
|
698
716
|
"data_sinks",
|
llama_cloud/client.py
CHANGED
|
@@ -6,6 +6,7 @@ import httpx
|
|
|
6
6
|
|
|
7
7
|
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
8
|
from .environment import LlamaCloudEnvironment
|
|
9
|
+
from .resources.beta.client import AsyncBetaClient, BetaClient
|
|
9
10
|
from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
|
|
10
11
|
from .resources.component_definitions.client import AsyncComponentDefinitionsClient, ComponentDefinitionsClient
|
|
11
12
|
from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
|
|
@@ -53,6 +54,7 @@ class LlamaCloud:
|
|
|
53
54
|
self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
|
|
54
55
|
self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
55
56
|
self.reports = ReportsClient(client_wrapper=self._client_wrapper)
|
|
57
|
+
self.beta = BetaClient(client_wrapper=self._client_wrapper)
|
|
56
58
|
|
|
57
59
|
|
|
58
60
|
class AsyncLlamaCloud:
|
|
@@ -85,6 +87,7 @@ class AsyncLlamaCloud:
|
|
|
85
87
|
self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
|
|
86
88
|
self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
87
89
|
self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
|
|
90
|
+
self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
|
|
88
91
|
|
|
89
92
|
|
|
90
93
|
def _get_base_url(*, base_url: typing.Optional[str] = None, environment: LlamaCloudEnvironment) -> str:
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
|
2
2
|
|
|
3
3
|
from . import (
|
|
4
|
+
beta,
|
|
4
5
|
chat_apps,
|
|
5
6
|
component_definitions,
|
|
6
7
|
data_sinks,
|
|
@@ -30,6 +31,16 @@ from .embedding_model_configs import (
|
|
|
30
31
|
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
31
32
|
)
|
|
32
33
|
from .files import FileCreateFromUrlResourceInfoValue, FileCreatePermissionInfoValue, FileCreateResourceInfoValue
|
|
34
|
+
from .llama_extract import (
|
|
35
|
+
ExtractAgentCreateDataSchema,
|
|
36
|
+
ExtractAgentCreateDataSchemaZeroValue,
|
|
37
|
+
ExtractAgentUpdateDataSchema,
|
|
38
|
+
ExtractAgentUpdateDataSchemaZeroValue,
|
|
39
|
+
ExtractJobCreateBatchDataSchemaOverride,
|
|
40
|
+
ExtractJobCreateBatchDataSchemaOverrideZeroValue,
|
|
41
|
+
ExtractSchemaValidateRequestDataSchema,
|
|
42
|
+
ExtractSchemaValidateRequestDataSchemaZeroValue,
|
|
43
|
+
)
|
|
33
44
|
from .pipelines import (
|
|
34
45
|
PipelineFileUpdateCustomMetadataValue,
|
|
35
46
|
PipelineUpdateEmbeddingConfig,
|
|
@@ -56,6 +67,14 @@ __all__ = [
|
|
|
56
67
|
"EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
57
68
|
"EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
|
|
58
69
|
"EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
|
|
70
|
+
"ExtractAgentCreateDataSchema",
|
|
71
|
+
"ExtractAgentCreateDataSchemaZeroValue",
|
|
72
|
+
"ExtractAgentUpdateDataSchema",
|
|
73
|
+
"ExtractAgentUpdateDataSchemaZeroValue",
|
|
74
|
+
"ExtractJobCreateBatchDataSchemaOverride",
|
|
75
|
+
"ExtractJobCreateBatchDataSchemaOverrideZeroValue",
|
|
76
|
+
"ExtractSchemaValidateRequestDataSchema",
|
|
77
|
+
"ExtractSchemaValidateRequestDataSchemaZeroValue",
|
|
59
78
|
"FileCreateFromUrlResourceInfoValue",
|
|
60
79
|
"FileCreatePermissionInfoValue",
|
|
61
80
|
"FileCreateResourceInfoValue",
|
|
@@ -70,6 +89,7 @@ __all__ = [
|
|
|
70
89
|
"PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
71
90
|
"PipelineUpdateTransformConfig",
|
|
72
91
|
"UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
|
|
92
|
+
"beta",
|
|
73
93
|
"chat_apps",
|
|
74
94
|
"component_definitions",
|
|
75
95
|
"data_sinks",
|
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.batch import Batch
|
|
13
|
+
from ...types.batch_paginated_list import BatchPaginatedList
|
|
14
|
+
from ...types.batch_public_output import BatchPublicOutput
|
|
15
|
+
from ...types.http_validation_error import HttpValidationError
|
|
16
|
+
from ...types.llama_parse_parameters import LlamaParseParameters
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
import pydantic
|
|
20
|
+
if pydantic.__version__.startswith("1."):
|
|
21
|
+
raise ImportError
|
|
22
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
23
|
+
except ImportError:
|
|
24
|
+
import pydantic # type: ignore
|
|
25
|
+
|
|
26
|
+
# this is used as the default value for optional parameters
|
|
27
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class BetaClient:
|
|
31
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
32
|
+
self._client_wrapper = client_wrapper
|
|
33
|
+
|
|
34
|
+
def list_batches(
|
|
35
|
+
self,
|
|
36
|
+
*,
|
|
37
|
+
limit: typing.Optional[int] = None,
|
|
38
|
+
offset: typing.Optional[int] = None,
|
|
39
|
+
project_id: typing.Optional[str] = None,
|
|
40
|
+
organization_id: typing.Optional[str] = None,
|
|
41
|
+
) -> BatchPaginatedList:
|
|
42
|
+
"""
|
|
43
|
+
Parameters:
|
|
44
|
+
- limit: typing.Optional[int].
|
|
45
|
+
|
|
46
|
+
- offset: typing.Optional[int].
|
|
47
|
+
|
|
48
|
+
- project_id: typing.Optional[str].
|
|
49
|
+
|
|
50
|
+
- organization_id: typing.Optional[str].
|
|
51
|
+
---
|
|
52
|
+
from llama_cloud.client import LlamaCloud
|
|
53
|
+
|
|
54
|
+
client = LlamaCloud(
|
|
55
|
+
token="YOUR_TOKEN",
|
|
56
|
+
)
|
|
57
|
+
client.beta.list_batches()
|
|
58
|
+
"""
|
|
59
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
60
|
+
"GET",
|
|
61
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
62
|
+
params=remove_none_from_dict(
|
|
63
|
+
{"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
|
|
64
|
+
),
|
|
65
|
+
headers=self._client_wrapper.get_headers(),
|
|
66
|
+
timeout=60,
|
|
67
|
+
)
|
|
68
|
+
if 200 <= _response.status_code < 300:
|
|
69
|
+
return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
|
|
70
|
+
if _response.status_code == 422:
|
|
71
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
72
|
+
try:
|
|
73
|
+
_response_json = _response.json()
|
|
74
|
+
except JSONDecodeError:
|
|
75
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
76
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
77
|
+
|
|
78
|
+
def create_batch(
|
|
79
|
+
self,
|
|
80
|
+
*,
|
|
81
|
+
organization_id: typing.Optional[str] = None,
|
|
82
|
+
project_id: typing.Optional[str] = None,
|
|
83
|
+
tool: str,
|
|
84
|
+
tool_data: typing.Optional[LlamaParseParameters] = OMIT,
|
|
85
|
+
input_type: str,
|
|
86
|
+
input_id: str,
|
|
87
|
+
output_type: typing.Optional[str] = OMIT,
|
|
88
|
+
output_id: typing.Optional[str] = OMIT,
|
|
89
|
+
batch_create_project_id: str,
|
|
90
|
+
external_id: str,
|
|
91
|
+
completion_window: typing.Optional[int] = OMIT,
|
|
92
|
+
) -> Batch:
|
|
93
|
+
"""
|
|
94
|
+
Parameters:
|
|
95
|
+
- organization_id: typing.Optional[str].
|
|
96
|
+
|
|
97
|
+
- project_id: typing.Optional[str].
|
|
98
|
+
|
|
99
|
+
- tool: str. The tool to be used for all requests in the batch.
|
|
100
|
+
|
|
101
|
+
- tool_data: typing.Optional[LlamaParseParameters].
|
|
102
|
+
|
|
103
|
+
- input_type: str. The type of input file. Currently only 'datasource' is supported.
|
|
104
|
+
|
|
105
|
+
- input_id: str. The ID of the input file for the batch.
|
|
106
|
+
|
|
107
|
+
- output_type: typing.Optional[str].
|
|
108
|
+
|
|
109
|
+
- output_id: typing.Optional[str].
|
|
110
|
+
|
|
111
|
+
- batch_create_project_id: str. The ID of the project to which the batch belongs
|
|
112
|
+
|
|
113
|
+
- external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
|
|
114
|
+
|
|
115
|
+
- completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
|
|
116
|
+
---
|
|
117
|
+
from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
|
|
118
|
+
from llama_cloud.client import LlamaCloud
|
|
119
|
+
|
|
120
|
+
client = LlamaCloud(
|
|
121
|
+
token="YOUR_TOKEN",
|
|
122
|
+
)
|
|
123
|
+
client.beta.create_batch(
|
|
124
|
+
tool="string",
|
|
125
|
+
tool_data=LlamaParseParameters(
|
|
126
|
+
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
127
|
+
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
128
|
+
),
|
|
129
|
+
input_type="string",
|
|
130
|
+
input_id="string",
|
|
131
|
+
batch_create_project_id="string",
|
|
132
|
+
external_id="string",
|
|
133
|
+
)
|
|
134
|
+
"""
|
|
135
|
+
_request: typing.Dict[str, typing.Any] = {
|
|
136
|
+
"tool": tool,
|
|
137
|
+
"input_type": input_type,
|
|
138
|
+
"input_id": input_id,
|
|
139
|
+
"project_id": batch_create_project_id,
|
|
140
|
+
"external_id": external_id,
|
|
141
|
+
}
|
|
142
|
+
if tool_data is not OMIT:
|
|
143
|
+
_request["tool_data"] = tool_data
|
|
144
|
+
if output_type is not OMIT:
|
|
145
|
+
_request["output_type"] = output_type
|
|
146
|
+
if output_id is not OMIT:
|
|
147
|
+
_request["output_id"] = output_id
|
|
148
|
+
if completion_window is not OMIT:
|
|
149
|
+
_request["completion_window"] = completion_window
|
|
150
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
151
|
+
"POST",
|
|
152
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
153
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
|
|
154
|
+
json=jsonable_encoder(_request),
|
|
155
|
+
headers=self._client_wrapper.get_headers(),
|
|
156
|
+
timeout=60,
|
|
157
|
+
)
|
|
158
|
+
if 200 <= _response.status_code < 300:
|
|
159
|
+
return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
|
|
160
|
+
if _response.status_code == 422:
|
|
161
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
162
|
+
try:
|
|
163
|
+
_response_json = _response.json()
|
|
164
|
+
except JSONDecodeError:
|
|
165
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
166
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
167
|
+
|
|
168
|
+
def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
|
|
169
|
+
"""
|
|
170
|
+
Parameters:
|
|
171
|
+
- batch_id: str.
|
|
172
|
+
|
|
173
|
+
- organization_id: typing.Optional[str].
|
|
174
|
+
---
|
|
175
|
+
from llama_cloud.client import LlamaCloud
|
|
176
|
+
|
|
177
|
+
client = LlamaCloud(
|
|
178
|
+
token="YOUR_TOKEN",
|
|
179
|
+
)
|
|
180
|
+
client.beta.get_batch(
|
|
181
|
+
batch_id="string",
|
|
182
|
+
)
|
|
183
|
+
"""
|
|
184
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
185
|
+
"GET",
|
|
186
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
|
|
187
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
188
|
+
headers=self._client_wrapper.get_headers(),
|
|
189
|
+
timeout=60,
|
|
190
|
+
)
|
|
191
|
+
if 200 <= _response.status_code < 300:
|
|
192
|
+
return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
|
|
193
|
+
if _response.status_code == 422:
|
|
194
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
195
|
+
try:
|
|
196
|
+
_response_json = _response.json()
|
|
197
|
+
except JSONDecodeError:
|
|
198
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
199
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class AsyncBetaClient:
|
|
203
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
204
|
+
self._client_wrapper = client_wrapper
|
|
205
|
+
|
|
206
|
+
async def list_batches(
|
|
207
|
+
self,
|
|
208
|
+
*,
|
|
209
|
+
limit: typing.Optional[int] = None,
|
|
210
|
+
offset: typing.Optional[int] = None,
|
|
211
|
+
project_id: typing.Optional[str] = None,
|
|
212
|
+
organization_id: typing.Optional[str] = None,
|
|
213
|
+
) -> BatchPaginatedList:
|
|
214
|
+
"""
|
|
215
|
+
Parameters:
|
|
216
|
+
- limit: typing.Optional[int].
|
|
217
|
+
|
|
218
|
+
- offset: typing.Optional[int].
|
|
219
|
+
|
|
220
|
+
- project_id: typing.Optional[str].
|
|
221
|
+
|
|
222
|
+
- organization_id: typing.Optional[str].
|
|
223
|
+
---
|
|
224
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
225
|
+
|
|
226
|
+
client = AsyncLlamaCloud(
|
|
227
|
+
token="YOUR_TOKEN",
|
|
228
|
+
)
|
|
229
|
+
await client.beta.list_batches()
|
|
230
|
+
"""
|
|
231
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
232
|
+
"GET",
|
|
233
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
234
|
+
params=remove_none_from_dict(
|
|
235
|
+
{"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
|
|
236
|
+
),
|
|
237
|
+
headers=self._client_wrapper.get_headers(),
|
|
238
|
+
timeout=60,
|
|
239
|
+
)
|
|
240
|
+
if 200 <= _response.status_code < 300:
|
|
241
|
+
return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
|
|
242
|
+
if _response.status_code == 422:
|
|
243
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
244
|
+
try:
|
|
245
|
+
_response_json = _response.json()
|
|
246
|
+
except JSONDecodeError:
|
|
247
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
248
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
249
|
+
|
|
250
|
+
async def create_batch(
|
|
251
|
+
self,
|
|
252
|
+
*,
|
|
253
|
+
organization_id: typing.Optional[str] = None,
|
|
254
|
+
project_id: typing.Optional[str] = None,
|
|
255
|
+
tool: str,
|
|
256
|
+
tool_data: typing.Optional[LlamaParseParameters] = OMIT,
|
|
257
|
+
input_type: str,
|
|
258
|
+
input_id: str,
|
|
259
|
+
output_type: typing.Optional[str] = OMIT,
|
|
260
|
+
output_id: typing.Optional[str] = OMIT,
|
|
261
|
+
batch_create_project_id: str,
|
|
262
|
+
external_id: str,
|
|
263
|
+
completion_window: typing.Optional[int] = OMIT,
|
|
264
|
+
) -> Batch:
|
|
265
|
+
"""
|
|
266
|
+
Parameters:
|
|
267
|
+
- organization_id: typing.Optional[str].
|
|
268
|
+
|
|
269
|
+
- project_id: typing.Optional[str].
|
|
270
|
+
|
|
271
|
+
- tool: str. The tool to be used for all requests in the batch.
|
|
272
|
+
|
|
273
|
+
- tool_data: typing.Optional[LlamaParseParameters].
|
|
274
|
+
|
|
275
|
+
- input_type: str. The type of input file. Currently only 'datasource' is supported.
|
|
276
|
+
|
|
277
|
+
- input_id: str. The ID of the input file for the batch.
|
|
278
|
+
|
|
279
|
+
- output_type: typing.Optional[str].
|
|
280
|
+
|
|
281
|
+
- output_id: typing.Optional[str].
|
|
282
|
+
|
|
283
|
+
- batch_create_project_id: str. The ID of the project to which the batch belongs
|
|
284
|
+
|
|
285
|
+
- external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
|
|
286
|
+
|
|
287
|
+
- completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
|
|
288
|
+
---
|
|
289
|
+
from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
|
|
290
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
291
|
+
|
|
292
|
+
client = AsyncLlamaCloud(
|
|
293
|
+
token="YOUR_TOKEN",
|
|
294
|
+
)
|
|
295
|
+
await client.beta.create_batch(
|
|
296
|
+
tool="string",
|
|
297
|
+
tool_data=LlamaParseParameters(
|
|
298
|
+
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
299
|
+
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
300
|
+
),
|
|
301
|
+
input_type="string",
|
|
302
|
+
input_id="string",
|
|
303
|
+
batch_create_project_id="string",
|
|
304
|
+
external_id="string",
|
|
305
|
+
)
|
|
306
|
+
"""
|
|
307
|
+
_request: typing.Dict[str, typing.Any] = {
|
|
308
|
+
"tool": tool,
|
|
309
|
+
"input_type": input_type,
|
|
310
|
+
"input_id": input_id,
|
|
311
|
+
"project_id": batch_create_project_id,
|
|
312
|
+
"external_id": external_id,
|
|
313
|
+
}
|
|
314
|
+
if tool_data is not OMIT:
|
|
315
|
+
_request["tool_data"] = tool_data
|
|
316
|
+
if output_type is not OMIT:
|
|
317
|
+
_request["output_type"] = output_type
|
|
318
|
+
if output_id is not OMIT:
|
|
319
|
+
_request["output_id"] = output_id
|
|
320
|
+
if completion_window is not OMIT:
|
|
321
|
+
_request["completion_window"] = completion_window
|
|
322
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
323
|
+
"POST",
|
|
324
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
325
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
|
|
326
|
+
json=jsonable_encoder(_request),
|
|
327
|
+
headers=self._client_wrapper.get_headers(),
|
|
328
|
+
timeout=60,
|
|
329
|
+
)
|
|
330
|
+
if 200 <= _response.status_code < 300:
|
|
331
|
+
return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
|
|
332
|
+
if _response.status_code == 422:
|
|
333
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
334
|
+
try:
|
|
335
|
+
_response_json = _response.json()
|
|
336
|
+
except JSONDecodeError:
|
|
337
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
338
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
339
|
+
|
|
340
|
+
async def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
|
|
341
|
+
"""
|
|
342
|
+
Parameters:
|
|
343
|
+
- batch_id: str.
|
|
344
|
+
|
|
345
|
+
- organization_id: typing.Optional[str].
|
|
346
|
+
---
|
|
347
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
348
|
+
|
|
349
|
+
client = AsyncLlamaCloud(
|
|
350
|
+
token="YOUR_TOKEN",
|
|
351
|
+
)
|
|
352
|
+
await client.beta.get_batch(
|
|
353
|
+
batch_id="string",
|
|
354
|
+
)
|
|
355
|
+
"""
|
|
356
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
357
|
+
"GET",
|
|
358
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
|
|
359
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
360
|
+
headers=self._client_wrapper.get_headers(),
|
|
361
|
+
timeout=60,
|
|
362
|
+
)
|
|
363
|
+
if 200 <= _response.status_code < 300:
|
|
364
|
+
return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
|
|
365
|
+
if _response.status_code == 422:
|
|
366
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
367
|
+
try:
|
|
368
|
+
_response_json = _response.json()
|
|
369
|
+
except JSONDecodeError:
|
|
370
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
371
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -108,7 +108,7 @@ class ChatAppsClient:
|
|
|
108
108
|
name="string",
|
|
109
109
|
retriever_id="string",
|
|
110
110
|
llm_config=LlmParameters(
|
|
111
|
-
model_name=SupportedLlmModelNames.
|
|
111
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
112
112
|
),
|
|
113
113
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
114
114
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -217,7 +217,7 @@ class ChatAppsClient:
|
|
|
217
217
|
client.chat_apps.update_chat_app(
|
|
218
218
|
id="string",
|
|
219
219
|
llm_config=LlmParameters(
|
|
220
|
-
model_name=SupportedLlmModelNames.
|
|
220
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
221
221
|
),
|
|
222
222
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
223
223
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -401,7 +401,7 @@ class AsyncChatAppsClient:
|
|
|
401
401
|
name="string",
|
|
402
402
|
retriever_id="string",
|
|
403
403
|
llm_config=LlmParameters(
|
|
404
|
-
model_name=SupportedLlmModelNames.
|
|
404
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
405
405
|
),
|
|
406
406
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
407
407
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -510,7 +510,7 @@ class AsyncChatAppsClient:
|
|
|
510
510
|
await client.chat_apps.update_chat_app(
|
|
511
511
|
id="string",
|
|
512
512
|
llm_config=LlmParameters(
|
|
513
|
-
model_name=SupportedLlmModelNames.
|
|
513
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
514
514
|
),
|
|
515
515
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
516
516
|
mode=CompositeRetrievalMode.ROUTING,
|