llama-cloud 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +64 -0
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +22 -1
- llama_cloud/resources/data_sinks/client.py +12 -6
- llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
- llama_cloud/resources/embedding_model_configs/client.py +360 -0
- llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
- llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
- llama_cloud/resources/files/__init__.py +2 -2
- llama_cloud/resources/files/client.py +265 -34
- llama_cloud/resources/files/types/__init__.py +2 -1
- llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
- llama_cloud/resources/organizations/client.py +65 -0
- llama_cloud/resources/parsing/client.py +157 -0
- llama_cloud/resources/pipelines/client.py +177 -14
- llama_cloud/resources/projects/client.py +71 -0
- llama_cloud/types/__init__.py +48 -0
- llama_cloud/types/cloud_one_drive_data_source.py +1 -0
- llama_cloud/types/cloud_postgres_vector_store.py +1 -1
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/embedding_model_config.py +43 -0
- llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
- llama_cloud/types/embedding_model_config_update.py +33 -0
- llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
- llama_cloud/types/interval_usage_and_plan.py +36 -0
- llama_cloud/types/llama_parse_parameters.py +10 -0
- llama_cloud/types/markdown_node_parser.py +2 -1
- llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
- llama_cloud/types/pipeline.py +1 -0
- llama_cloud/types/pipeline_create.py +1 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/plan.py +40 -0
- llama_cloud/types/usage.py +41 -0
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.5.dist-info}/METADATA +1 -2
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.5.dist-info}/RECORD +37 -24
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.5.dist-info}/WHEEL +1 -1
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.5.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -64,6 +64,24 @@ from .types import (
|
|
|
64
64
|
DataSourceCustomMetadataValue,
|
|
65
65
|
DataSourceDefinition,
|
|
66
66
|
ElementSegmentationConfig,
|
|
67
|
+
EmbeddingModelConfig,
|
|
68
|
+
EmbeddingModelConfigEmbeddingConfig,
|
|
69
|
+
EmbeddingModelConfigEmbeddingConfig_AzureEmbedding,
|
|
70
|
+
EmbeddingModelConfigEmbeddingConfig_BedrockEmbedding,
|
|
71
|
+
EmbeddingModelConfigEmbeddingConfig_CohereEmbedding,
|
|
72
|
+
EmbeddingModelConfigEmbeddingConfig_GeminiEmbedding,
|
|
73
|
+
EmbeddingModelConfigEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
74
|
+
EmbeddingModelConfigEmbeddingConfig_OpenaiEmbedding,
|
|
75
|
+
EmbeddingModelConfigEmbeddingConfig_VertexaiEmbedding,
|
|
76
|
+
EmbeddingModelConfigUpdate,
|
|
77
|
+
EmbeddingModelConfigUpdateEmbeddingConfig,
|
|
78
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_AzureEmbedding,
|
|
79
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_BedrockEmbedding,
|
|
80
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_CohereEmbedding,
|
|
81
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_GeminiEmbedding,
|
|
82
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
83
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding,
|
|
84
|
+
EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
|
|
67
85
|
EvalDataset,
|
|
68
86
|
EvalDatasetJobParams,
|
|
69
87
|
EvalDatasetJobRecord,
|
|
@@ -90,6 +108,7 @@ from .types import (
|
|
|
90
108
|
HuggingFaceInferenceApiEmbeddingToken,
|
|
91
109
|
IngestionErrorResponse,
|
|
92
110
|
InputMessage,
|
|
111
|
+
IntervalUsageAndPlan,
|
|
93
112
|
JobNameMapping,
|
|
94
113
|
LlamaParseParameters,
|
|
95
114
|
LlamaParseSupportedFileExtensions,
|
|
@@ -123,6 +142,7 @@ from .types import (
|
|
|
123
142
|
PageScreenshotNodeWithScore,
|
|
124
143
|
PageSegmentationConfig,
|
|
125
144
|
PageSplitterNodeParser,
|
|
145
|
+
PaginatedListPipelineFilesResponse,
|
|
126
146
|
ParserLanguages,
|
|
127
147
|
ParsingHistoryItem,
|
|
128
148
|
ParsingJob,
|
|
@@ -167,6 +187,7 @@ from .types import (
|
|
|
167
187
|
PipelineTransformConfig_Advanced,
|
|
168
188
|
PipelineTransformConfig_Auto,
|
|
169
189
|
PipelineType,
|
|
190
|
+
Plan,
|
|
170
191
|
PlaygroundSession,
|
|
171
192
|
Pooling,
|
|
172
193
|
PresetRetrievalParams,
|
|
@@ -192,6 +213,7 @@ from .types import (
|
|
|
192
213
|
TokenChunkingConfig,
|
|
193
214
|
TokenTextSplitter,
|
|
194
215
|
TransformationCategoryNames,
|
|
216
|
+
Usage,
|
|
195
217
|
UserOrganization,
|
|
196
218
|
UserOrganizationCreate,
|
|
197
219
|
UserOrganizationDelete,
|
|
@@ -207,8 +229,17 @@ from .resources import (
|
|
|
207
229
|
DataSinkUpdateComponent,
|
|
208
230
|
DataSourceUpdateComponent,
|
|
209
231
|
DataSourceUpdateCustomMetadataValue,
|
|
232
|
+
EmbeddingModelConfigCreateEmbeddingConfig,
|
|
233
|
+
EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding,
|
|
234
|
+
EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding,
|
|
235
|
+
EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding,
|
|
236
|
+
EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding,
|
|
237
|
+
EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
238
|
+
EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
|
|
239
|
+
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
210
240
|
ExtractionSchemaCreateDataSchemaValue,
|
|
211
241
|
ExtractionSchemaUpdateDataSchemaValue,
|
|
242
|
+
FileCreateFromUrlResourceInfoValue,
|
|
212
243
|
FileCreateResourceInfoValue,
|
|
213
244
|
PipelineFileUpdateCustomMetadataValue,
|
|
214
245
|
PipelineUpdateEmbeddingConfig,
|
|
@@ -223,6 +254,7 @@ from .resources import (
|
|
|
223
254
|
component_definitions,
|
|
224
255
|
data_sinks,
|
|
225
256
|
data_sources,
|
|
257
|
+
embedding_model_configs,
|
|
226
258
|
evals,
|
|
227
259
|
extraction,
|
|
228
260
|
files,
|
|
@@ -300,6 +332,32 @@ __all__ = [
|
|
|
300
332
|
"DataSourceUpdateComponent",
|
|
301
333
|
"DataSourceUpdateCustomMetadataValue",
|
|
302
334
|
"ElementSegmentationConfig",
|
|
335
|
+
"EmbeddingModelConfig",
|
|
336
|
+
"EmbeddingModelConfigCreateEmbeddingConfig",
|
|
337
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding",
|
|
338
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding",
|
|
339
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding",
|
|
340
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding",
|
|
341
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
342
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
|
|
343
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
|
|
344
|
+
"EmbeddingModelConfigEmbeddingConfig",
|
|
345
|
+
"EmbeddingModelConfigEmbeddingConfig_AzureEmbedding",
|
|
346
|
+
"EmbeddingModelConfigEmbeddingConfig_BedrockEmbedding",
|
|
347
|
+
"EmbeddingModelConfigEmbeddingConfig_CohereEmbedding",
|
|
348
|
+
"EmbeddingModelConfigEmbeddingConfig_GeminiEmbedding",
|
|
349
|
+
"EmbeddingModelConfigEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
350
|
+
"EmbeddingModelConfigEmbeddingConfig_OpenaiEmbedding",
|
|
351
|
+
"EmbeddingModelConfigEmbeddingConfig_VertexaiEmbedding",
|
|
352
|
+
"EmbeddingModelConfigUpdate",
|
|
353
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig",
|
|
354
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_AzureEmbedding",
|
|
355
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_BedrockEmbedding",
|
|
356
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_CohereEmbedding",
|
|
357
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_GeminiEmbedding",
|
|
358
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
359
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding",
|
|
360
|
+
"EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
303
361
|
"EvalDataset",
|
|
304
362
|
"EvalDatasetJobParams",
|
|
305
363
|
"EvalDatasetJobRecord",
|
|
@@ -317,6 +375,7 @@ __all__ = [
|
|
|
317
375
|
"ExtractionSchemaDataSchemaValue",
|
|
318
376
|
"ExtractionSchemaUpdateDataSchemaValue",
|
|
319
377
|
"File",
|
|
378
|
+
"FileCreateFromUrlResourceInfoValue",
|
|
320
379
|
"FileCreateResourceInfoValue",
|
|
321
380
|
"FileResourceInfoValue",
|
|
322
381
|
"FilterCondition",
|
|
@@ -329,6 +388,7 @@ __all__ = [
|
|
|
329
388
|
"HuggingFaceInferenceApiEmbeddingToken",
|
|
330
389
|
"IngestionErrorResponse",
|
|
331
390
|
"InputMessage",
|
|
391
|
+
"IntervalUsageAndPlan",
|
|
332
392
|
"JobNameMapping",
|
|
333
393
|
"LlamaCloudEnvironment",
|
|
334
394
|
"LlamaParseParameters",
|
|
@@ -363,6 +423,7 @@ __all__ = [
|
|
|
363
423
|
"PageScreenshotNodeWithScore",
|
|
364
424
|
"PageSegmentationConfig",
|
|
365
425
|
"PageSplitterNodeParser",
|
|
426
|
+
"PaginatedListPipelineFilesResponse",
|
|
366
427
|
"ParserLanguages",
|
|
367
428
|
"ParsingHistoryItem",
|
|
368
429
|
"ParsingJob",
|
|
@@ -417,6 +478,7 @@ __all__ = [
|
|
|
417
478
|
"PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
|
|
418
479
|
"PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
|
|
419
480
|
"PipelineUpdateTransformConfig",
|
|
481
|
+
"Plan",
|
|
420
482
|
"PlaygroundSession",
|
|
421
483
|
"Pooling",
|
|
422
484
|
"PresetRetrievalParams",
|
|
@@ -443,6 +505,7 @@ __all__ = [
|
|
|
443
505
|
"TokenTextSplitter",
|
|
444
506
|
"TransformationCategoryNames",
|
|
445
507
|
"UnprocessableEntityError",
|
|
508
|
+
"Usage",
|
|
446
509
|
"UserOrganization",
|
|
447
510
|
"UserOrganizationCreate",
|
|
448
511
|
"UserOrganizationDelete",
|
|
@@ -455,6 +518,7 @@ __all__ = [
|
|
|
455
518
|
"component_definitions",
|
|
456
519
|
"data_sinks",
|
|
457
520
|
"data_sources",
|
|
521
|
+
"embedding_model_configs",
|
|
458
522
|
"evals",
|
|
459
523
|
"extraction",
|
|
460
524
|
"files",
|
llama_cloud/client.py
CHANGED
|
@@ -9,6 +9,7 @@ from .environment import LlamaCloudEnvironment
|
|
|
9
9
|
from .resources.component_definitions.client import AsyncComponentDefinitionsClient, ComponentDefinitionsClient
|
|
10
10
|
from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
|
|
11
11
|
from .resources.data_sources.client import AsyncDataSourcesClient, DataSourcesClient
|
|
12
|
+
from .resources.embedding_model_configs.client import AsyncEmbeddingModelConfigsClient, EmbeddingModelConfigsClient
|
|
12
13
|
from .resources.evals.client import AsyncEvalsClient, EvalsClient
|
|
13
14
|
from .resources.extraction.client import AsyncExtractionClient, ExtractionClient
|
|
14
15
|
from .resources.files.client import AsyncFilesClient, FilesClient
|
|
@@ -35,6 +36,7 @@ class LlamaCloud:
|
|
|
35
36
|
)
|
|
36
37
|
self.data_sinks = DataSinksClient(client_wrapper=self._client_wrapper)
|
|
37
38
|
self.data_sources = DataSourcesClient(client_wrapper=self._client_wrapper)
|
|
39
|
+
self.embedding_model_configs = EmbeddingModelConfigsClient(client_wrapper=self._client_wrapper)
|
|
38
40
|
self.organizations = OrganizationsClient(client_wrapper=self._client_wrapper)
|
|
39
41
|
self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
|
|
40
42
|
self.files = FilesClient(client_wrapper=self._client_wrapper)
|
|
@@ -62,6 +64,7 @@ class AsyncLlamaCloud:
|
|
|
62
64
|
)
|
|
63
65
|
self.data_sinks = AsyncDataSinksClient(client_wrapper=self._client_wrapper)
|
|
64
66
|
self.data_sources = AsyncDataSourcesClient(client_wrapper=self._client_wrapper)
|
|
67
|
+
self.embedding_model_configs = AsyncEmbeddingModelConfigsClient(client_wrapper=self._client_wrapper)
|
|
65
68
|
self.organizations = AsyncOrganizationsClient(client_wrapper=self._client_wrapper)
|
|
66
69
|
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
|
|
67
70
|
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
|
|
@@ -4,6 +4,7 @@ from . import (
|
|
|
4
4
|
component_definitions,
|
|
5
5
|
data_sinks,
|
|
6
6
|
data_sources,
|
|
7
|
+
embedding_model_configs,
|
|
7
8
|
evals,
|
|
8
9
|
extraction,
|
|
9
10
|
files,
|
|
@@ -14,8 +15,18 @@ from . import (
|
|
|
14
15
|
)
|
|
15
16
|
from .data_sinks import DataSinkUpdateComponent
|
|
16
17
|
from .data_sources import DataSourceUpdateComponent, DataSourceUpdateCustomMetadataValue
|
|
18
|
+
from .embedding_model_configs import (
|
|
19
|
+
EmbeddingModelConfigCreateEmbeddingConfig,
|
|
20
|
+
EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding,
|
|
21
|
+
EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding,
|
|
22
|
+
EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding,
|
|
23
|
+
EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding,
|
|
24
|
+
EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
25
|
+
EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
|
|
26
|
+
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
27
|
+
)
|
|
17
28
|
from .extraction import ExtractionSchemaCreateDataSchemaValue, ExtractionSchemaUpdateDataSchemaValue
|
|
18
|
-
from .files import FileCreateResourceInfoValue
|
|
29
|
+
from .files import FileCreateFromUrlResourceInfoValue, FileCreateResourceInfoValue
|
|
19
30
|
from .pipelines import (
|
|
20
31
|
PipelineFileUpdateCustomMetadataValue,
|
|
21
32
|
PipelineUpdateEmbeddingConfig,
|
|
@@ -33,8 +44,17 @@ __all__ = [
|
|
|
33
44
|
"DataSinkUpdateComponent",
|
|
34
45
|
"DataSourceUpdateComponent",
|
|
35
46
|
"DataSourceUpdateCustomMetadataValue",
|
|
47
|
+
"EmbeddingModelConfigCreateEmbeddingConfig",
|
|
48
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding",
|
|
49
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding",
|
|
50
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding",
|
|
51
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding",
|
|
52
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
53
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
|
|
54
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
|
|
36
55
|
"ExtractionSchemaCreateDataSchemaValue",
|
|
37
56
|
"ExtractionSchemaUpdateDataSchemaValue",
|
|
57
|
+
"FileCreateFromUrlResourceInfoValue",
|
|
38
58
|
"FileCreateResourceInfoValue",
|
|
39
59
|
"PipelineFileUpdateCustomMetadataValue",
|
|
40
60
|
"PipelineUpdateEmbeddingConfig",
|
|
@@ -49,6 +69,7 @@ __all__ = [
|
|
|
49
69
|
"component_definitions",
|
|
50
70
|
"data_sinks",
|
|
51
71
|
"data_sources",
|
|
72
|
+
"embedding_model_configs",
|
|
52
73
|
"evals",
|
|
53
74
|
"extraction",
|
|
54
75
|
"files",
|
|
@@ -31,13 +31,16 @@ class DataSinksClient:
|
|
|
31
31
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
32
32
|
self._client_wrapper = client_wrapper
|
|
33
33
|
|
|
34
|
-
def list_data_sinks(
|
|
34
|
+
def list_data_sinks(
|
|
35
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
36
|
+
) -> typing.List[DataSink]:
|
|
35
37
|
"""
|
|
36
38
|
List data sinks for a given project.
|
|
37
|
-
If project_id is not provided, uses the default project.
|
|
38
39
|
|
|
39
40
|
Parameters:
|
|
40
41
|
- project_id: typing.Optional[str].
|
|
42
|
+
|
|
43
|
+
- organization_id: typing.Optional[str].
|
|
41
44
|
---
|
|
42
45
|
from llama_cloud.client import LlamaCloud
|
|
43
46
|
|
|
@@ -49,7 +52,7 @@ class DataSinksClient:
|
|
|
49
52
|
_response = self._client_wrapper.httpx_client.request(
|
|
50
53
|
"GET",
|
|
51
54
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
52
|
-
params=remove_none_from_dict({"project_id": project_id}),
|
|
55
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
53
56
|
headers=self._client_wrapper.get_headers(),
|
|
54
57
|
timeout=60,
|
|
55
58
|
)
|
|
@@ -282,13 +285,16 @@ class AsyncDataSinksClient:
|
|
|
282
285
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
283
286
|
self._client_wrapper = client_wrapper
|
|
284
287
|
|
|
285
|
-
async def list_data_sinks(
|
|
288
|
+
async def list_data_sinks(
|
|
289
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
290
|
+
) -> typing.List[DataSink]:
|
|
286
291
|
"""
|
|
287
292
|
List data sinks for a given project.
|
|
288
|
-
If project_id is not provided, uses the default project.
|
|
289
293
|
|
|
290
294
|
Parameters:
|
|
291
295
|
- project_id: typing.Optional[str].
|
|
296
|
+
|
|
297
|
+
- organization_id: typing.Optional[str].
|
|
292
298
|
---
|
|
293
299
|
from llama_cloud.client import AsyncLlamaCloud
|
|
294
300
|
|
|
@@ -300,7 +306,7 @@ class AsyncDataSinksClient:
|
|
|
300
306
|
_response = await self._client_wrapper.httpx_client.request(
|
|
301
307
|
"GET",
|
|
302
308
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/data-sinks"),
|
|
303
|
-
params=remove_none_from_dict({"project_id": project_id}),
|
|
309
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
304
310
|
headers=self._client_wrapper.get_headers(),
|
|
305
311
|
timeout=60,
|
|
306
312
|
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from .types import (
|
|
4
|
+
EmbeddingModelConfigCreateEmbeddingConfig,
|
|
5
|
+
EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding,
|
|
6
|
+
EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding,
|
|
7
|
+
EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding,
|
|
8
|
+
EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding,
|
|
9
|
+
EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
10
|
+
EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding,
|
|
11
|
+
EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"EmbeddingModelConfigCreateEmbeddingConfig",
|
|
16
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_AzureEmbedding",
|
|
17
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_BedrockEmbedding",
|
|
18
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_CohereEmbedding",
|
|
19
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_GeminiEmbedding",
|
|
20
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_HuggingfaceApiEmbedding",
|
|
21
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_OpenaiEmbedding",
|
|
22
|
+
"EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding",
|
|
23
|
+
]
|
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.embedding_model_config import EmbeddingModelConfig
|
|
13
|
+
from ...types.embedding_model_config_update import EmbeddingModelConfigUpdate
|
|
14
|
+
from ...types.http_validation_error import HttpValidationError
|
|
15
|
+
from .types.embedding_model_config_create_embedding_config import EmbeddingModelConfigCreateEmbeddingConfig
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
import pydantic
|
|
19
|
+
if pydantic.__version__.startswith("1."):
|
|
20
|
+
raise ImportError
|
|
21
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
22
|
+
except ImportError:
|
|
23
|
+
import pydantic # type: ignore
|
|
24
|
+
|
|
25
|
+
# this is used as the default value for optional parameters
|
|
26
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class EmbeddingModelConfigsClient:
|
|
30
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
31
|
+
self._client_wrapper = client_wrapper
|
|
32
|
+
|
|
33
|
+
def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
|
|
34
|
+
"""
|
|
35
|
+
Parameters:
|
|
36
|
+
- project_id: str.
|
|
37
|
+
---
|
|
38
|
+
from llama_cloud.client import LlamaCloud
|
|
39
|
+
|
|
40
|
+
client = LlamaCloud(
|
|
41
|
+
token="YOUR_TOKEN",
|
|
42
|
+
)
|
|
43
|
+
client.embedding_model_configs.list_embedding_model_configs(
|
|
44
|
+
project_id="string",
|
|
45
|
+
)
|
|
46
|
+
"""
|
|
47
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
48
|
+
"GET",
|
|
49
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
50
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
51
|
+
headers=self._client_wrapper.get_headers(),
|
|
52
|
+
timeout=60,
|
|
53
|
+
)
|
|
54
|
+
if 200 <= _response.status_code < 300:
|
|
55
|
+
return pydantic.parse_obj_as(typing.List[EmbeddingModelConfig], _response.json()) # type: ignore
|
|
56
|
+
if _response.status_code == 422:
|
|
57
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
58
|
+
try:
|
|
59
|
+
_response_json = _response.json()
|
|
60
|
+
except JSONDecodeError:
|
|
61
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
62
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
63
|
+
|
|
64
|
+
def create_embedding_model_config(
|
|
65
|
+
self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
|
|
66
|
+
) -> typing.Any:
|
|
67
|
+
"""
|
|
68
|
+
Create a new embedding model configuration within a specified project.
|
|
69
|
+
|
|
70
|
+
Parameters:
|
|
71
|
+
- project_id: str.
|
|
72
|
+
|
|
73
|
+
- name: str. The name of the embedding model config.
|
|
74
|
+
|
|
75
|
+
- embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
|
|
76
|
+
"""
|
|
77
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
78
|
+
"POST",
|
|
79
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
80
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
81
|
+
json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
|
|
82
|
+
headers=self._client_wrapper.get_headers(),
|
|
83
|
+
timeout=60,
|
|
84
|
+
)
|
|
85
|
+
if 200 <= _response.status_code < 300:
|
|
86
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
87
|
+
if _response.status_code == 422:
|
|
88
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
89
|
+
try:
|
|
90
|
+
_response_json = _response.json()
|
|
91
|
+
except JSONDecodeError:
|
|
92
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
93
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
94
|
+
|
|
95
|
+
def upsert_embedding_model_config(
|
|
96
|
+
self,
|
|
97
|
+
*,
|
|
98
|
+
project_id: typing.Optional[str] = None,
|
|
99
|
+
organization_id: typing.Optional[str] = None,
|
|
100
|
+
request: EmbeddingModelConfigUpdate,
|
|
101
|
+
) -> EmbeddingModelConfig:
|
|
102
|
+
"""
|
|
103
|
+
Upserts an embedding model config.
|
|
104
|
+
Updates if an embedding model config with the same name and project_id already exists. Otherwise, creates a new embedding model config.
|
|
105
|
+
|
|
106
|
+
Parameters:
|
|
107
|
+
- project_id: typing.Optional[str].
|
|
108
|
+
|
|
109
|
+
- organization_id: typing.Optional[str].
|
|
110
|
+
|
|
111
|
+
- request: EmbeddingModelConfigUpdate.
|
|
112
|
+
"""
|
|
113
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
114
|
+
"PUT",
|
|
115
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
116
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
117
|
+
json=jsonable_encoder(request),
|
|
118
|
+
headers=self._client_wrapper.get_headers(),
|
|
119
|
+
timeout=60,
|
|
120
|
+
)
|
|
121
|
+
if 200 <= _response.status_code < 300:
|
|
122
|
+
return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
|
|
123
|
+
if _response.status_code == 422:
|
|
124
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
125
|
+
try:
|
|
126
|
+
_response_json = _response.json()
|
|
127
|
+
except JSONDecodeError:
|
|
128
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
129
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
130
|
+
|
|
131
|
+
def update_embedding_model_config(
|
|
132
|
+
self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
|
|
133
|
+
) -> EmbeddingModelConfig:
|
|
134
|
+
"""
|
|
135
|
+
Update an embedding model config by ID.
|
|
136
|
+
|
|
137
|
+
Parameters:
|
|
138
|
+
- embedding_model_config_id: str.
|
|
139
|
+
|
|
140
|
+
- request: EmbeddingModelConfigUpdate.
|
|
141
|
+
"""
|
|
142
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
143
|
+
"PUT",
|
|
144
|
+
urllib.parse.urljoin(
|
|
145
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
|
|
146
|
+
),
|
|
147
|
+
json=jsonable_encoder(request),
|
|
148
|
+
headers=self._client_wrapper.get_headers(),
|
|
149
|
+
timeout=60,
|
|
150
|
+
)
|
|
151
|
+
if 200 <= _response.status_code < 300:
|
|
152
|
+
return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
|
|
153
|
+
if _response.status_code == 422:
|
|
154
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
155
|
+
try:
|
|
156
|
+
_response_json = _response.json()
|
|
157
|
+
except JSONDecodeError:
|
|
158
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
159
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
160
|
+
|
|
161
|
+
def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
|
|
162
|
+
"""
|
|
163
|
+
Delete an embedding model config by ID.
|
|
164
|
+
|
|
165
|
+
Parameters:
|
|
166
|
+
- embedding_model_config_id: str.
|
|
167
|
+
---
|
|
168
|
+
from llama_cloud.client import LlamaCloud
|
|
169
|
+
|
|
170
|
+
client = LlamaCloud(
|
|
171
|
+
token="YOUR_TOKEN",
|
|
172
|
+
)
|
|
173
|
+
client.embedding_model_configs.delete_embedding_model_config(
|
|
174
|
+
embedding_model_config_id="string",
|
|
175
|
+
)
|
|
176
|
+
"""
|
|
177
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
178
|
+
"DELETE",
|
|
179
|
+
urllib.parse.urljoin(
|
|
180
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
|
|
181
|
+
),
|
|
182
|
+
headers=self._client_wrapper.get_headers(),
|
|
183
|
+
timeout=60,
|
|
184
|
+
)
|
|
185
|
+
if 200 <= _response.status_code < 300:
|
|
186
|
+
return
|
|
187
|
+
if _response.status_code == 422:
|
|
188
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
189
|
+
try:
|
|
190
|
+
_response_json = _response.json()
|
|
191
|
+
except JSONDecodeError:
|
|
192
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
193
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class AsyncEmbeddingModelConfigsClient:
|
|
197
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
198
|
+
self._client_wrapper = client_wrapper
|
|
199
|
+
|
|
200
|
+
async def list_embedding_model_configs(self, *, project_id: str) -> typing.List[EmbeddingModelConfig]:
|
|
201
|
+
"""
|
|
202
|
+
Parameters:
|
|
203
|
+
- project_id: str.
|
|
204
|
+
---
|
|
205
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
206
|
+
|
|
207
|
+
client = AsyncLlamaCloud(
|
|
208
|
+
token="YOUR_TOKEN",
|
|
209
|
+
)
|
|
210
|
+
await client.embedding_model_configs.list_embedding_model_configs(
|
|
211
|
+
project_id="string",
|
|
212
|
+
)
|
|
213
|
+
"""
|
|
214
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
215
|
+
"GET",
|
|
216
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
217
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
218
|
+
headers=self._client_wrapper.get_headers(),
|
|
219
|
+
timeout=60,
|
|
220
|
+
)
|
|
221
|
+
if 200 <= _response.status_code < 300:
|
|
222
|
+
return pydantic.parse_obj_as(typing.List[EmbeddingModelConfig], _response.json()) # type: ignore
|
|
223
|
+
if _response.status_code == 422:
|
|
224
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
225
|
+
try:
|
|
226
|
+
_response_json = _response.json()
|
|
227
|
+
except JSONDecodeError:
|
|
228
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
229
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
230
|
+
|
|
231
|
+
async def create_embedding_model_config(
|
|
232
|
+
self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
|
|
233
|
+
) -> typing.Any:
|
|
234
|
+
"""
|
|
235
|
+
Create a new embedding model configuration within a specified project.
|
|
236
|
+
|
|
237
|
+
Parameters:
|
|
238
|
+
- project_id: str.
|
|
239
|
+
|
|
240
|
+
- name: str. The name of the embedding model config.
|
|
241
|
+
|
|
242
|
+
- embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
|
|
243
|
+
"""
|
|
244
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
245
|
+
"POST",
|
|
246
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
247
|
+
params=remove_none_from_dict({"project_id": project_id}),
|
|
248
|
+
json=jsonable_encoder({"name": name, "embedding_config": embedding_config}),
|
|
249
|
+
headers=self._client_wrapper.get_headers(),
|
|
250
|
+
timeout=60,
|
|
251
|
+
)
|
|
252
|
+
if 200 <= _response.status_code < 300:
|
|
253
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
254
|
+
if _response.status_code == 422:
|
|
255
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
256
|
+
try:
|
|
257
|
+
_response_json = _response.json()
|
|
258
|
+
except JSONDecodeError:
|
|
259
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
260
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
261
|
+
|
|
262
|
+
async def upsert_embedding_model_config(
|
|
263
|
+
self,
|
|
264
|
+
*,
|
|
265
|
+
project_id: typing.Optional[str] = None,
|
|
266
|
+
organization_id: typing.Optional[str] = None,
|
|
267
|
+
request: EmbeddingModelConfigUpdate,
|
|
268
|
+
) -> EmbeddingModelConfig:
|
|
269
|
+
"""
|
|
270
|
+
Upserts an embedding model config.
|
|
271
|
+
Updates if an embedding model config with the same name and project_id already exists. Otherwise, creates a new embedding model config.
|
|
272
|
+
|
|
273
|
+
Parameters:
|
|
274
|
+
- project_id: typing.Optional[str].
|
|
275
|
+
|
|
276
|
+
- organization_id: typing.Optional[str].
|
|
277
|
+
|
|
278
|
+
- request: EmbeddingModelConfigUpdate.
|
|
279
|
+
"""
|
|
280
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
281
|
+
"PUT",
|
|
282
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/embedding-model-configs"),
|
|
283
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
284
|
+
json=jsonable_encoder(request),
|
|
285
|
+
headers=self._client_wrapper.get_headers(),
|
|
286
|
+
timeout=60,
|
|
287
|
+
)
|
|
288
|
+
if 200 <= _response.status_code < 300:
|
|
289
|
+
return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
|
|
290
|
+
if _response.status_code == 422:
|
|
291
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
292
|
+
try:
|
|
293
|
+
_response_json = _response.json()
|
|
294
|
+
except JSONDecodeError:
|
|
295
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
296
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
297
|
+
|
|
298
|
+
async def update_embedding_model_config(
|
|
299
|
+
self, embedding_model_config_id: str, *, request: EmbeddingModelConfigUpdate
|
|
300
|
+
) -> EmbeddingModelConfig:
|
|
301
|
+
"""
|
|
302
|
+
Update an embedding model config by ID.
|
|
303
|
+
|
|
304
|
+
Parameters:
|
|
305
|
+
- embedding_model_config_id: str.
|
|
306
|
+
|
|
307
|
+
- request: EmbeddingModelConfigUpdate.
|
|
308
|
+
"""
|
|
309
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
310
|
+
"PUT",
|
|
311
|
+
urllib.parse.urljoin(
|
|
312
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
|
|
313
|
+
),
|
|
314
|
+
json=jsonable_encoder(request),
|
|
315
|
+
headers=self._client_wrapper.get_headers(),
|
|
316
|
+
timeout=60,
|
|
317
|
+
)
|
|
318
|
+
if 200 <= _response.status_code < 300:
|
|
319
|
+
return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
|
|
320
|
+
if _response.status_code == 422:
|
|
321
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
322
|
+
try:
|
|
323
|
+
_response_json = _response.json()
|
|
324
|
+
except JSONDecodeError:
|
|
325
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
326
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
327
|
+
|
|
328
|
+
async def delete_embedding_model_config(self, embedding_model_config_id: str) -> None:
|
|
329
|
+
"""
|
|
330
|
+
Delete an embedding model config by ID.
|
|
331
|
+
|
|
332
|
+
Parameters:
|
|
333
|
+
- embedding_model_config_id: str.
|
|
334
|
+
---
|
|
335
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
336
|
+
|
|
337
|
+
client = AsyncLlamaCloud(
|
|
338
|
+
token="YOUR_TOKEN",
|
|
339
|
+
)
|
|
340
|
+
await client.embedding_model_configs.delete_embedding_model_config(
|
|
341
|
+
embedding_model_config_id="string",
|
|
342
|
+
)
|
|
343
|
+
"""
|
|
344
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
345
|
+
"DELETE",
|
|
346
|
+
urllib.parse.urljoin(
|
|
347
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/embedding-model-configs/{embedding_model_config_id}"
|
|
348
|
+
),
|
|
349
|
+
headers=self._client_wrapper.get_headers(),
|
|
350
|
+
timeout=60,
|
|
351
|
+
)
|
|
352
|
+
if 200 <= _response.status_code < 300:
|
|
353
|
+
return
|
|
354
|
+
if _response.status_code == 422:
|
|
355
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
356
|
+
try:
|
|
357
|
+
_response_json = _response.json()
|
|
358
|
+
except JSONDecodeError:
|
|
359
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
360
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|