llama-cloud 0.0.10__py3-none-any.whl → 0.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .types import (
4
+ AutoTransformConfig,
4
5
  AzureOpenAiEmbedding,
5
6
  Base,
6
7
  BasePromptTemplate,
@@ -48,6 +49,9 @@ from .types import (
48
49
  DataSourceCreateCustomMetadataValue,
49
50
  DataSourceCustomMetadataValue,
50
51
  DataSourceDefinition,
52
+ EmbeddingConfig,
53
+ EmbeddingConfigComponent,
54
+ EmbeddingConfigType,
51
55
  EvalDataset,
52
56
  EvalDatasetJobParams,
53
57
  EvalDatasetJobRecord,
@@ -134,6 +138,8 @@ from .types import (
134
138
  TextNodeRelationshipsValue,
135
139
  TextNodeWithScore,
136
140
  TokenTextSplitter,
141
+ TransformConfig,
142
+ TransformConfigMode,
137
143
  TransformationCategoryNames,
138
144
  UserOrganization,
139
145
  UserOrganizationCreate,
@@ -148,6 +154,7 @@ from .resources import (
148
154
  DataSourceUpdateComponent,
149
155
  DataSourceUpdateComponentOne,
150
156
  DataSourceUpdateCustomMetadataValue,
157
+ ExtractionSchemaCreateDataSchemaValue,
151
158
  ExtractionSchemaUpdateDataSchemaValue,
152
159
  FileCreateResourceInfoValue,
153
160
  PipelineFileUpdateCustomMetadataValue,
@@ -165,6 +172,7 @@ from .resources import (
165
172
  from .environment import LlamaCloudEnvironment
166
173
 
167
174
  __all__ = [
175
+ "AutoTransformConfig",
168
176
  "AzureOpenAiEmbedding",
169
177
  "Base",
170
178
  "BasePromptTemplate",
@@ -217,6 +225,9 @@ __all__ = [
217
225
  "DataSourceUpdateComponent",
218
226
  "DataSourceUpdateComponentOne",
219
227
  "DataSourceUpdateCustomMetadataValue",
228
+ "EmbeddingConfig",
229
+ "EmbeddingConfigComponent",
230
+ "EmbeddingConfigType",
220
231
  "EvalDataset",
221
232
  "EvalDatasetJobParams",
222
233
  "EvalDatasetJobRecord",
@@ -230,6 +241,7 @@ __all__ = [
230
241
  "ExtractionResult",
231
242
  "ExtractionResultDataValue",
232
243
  "ExtractionSchema",
244
+ "ExtractionSchemaCreateDataSchemaValue",
233
245
  "ExtractionSchemaDataSchemaValue",
234
246
  "ExtractionSchemaUpdateDataSchemaValue",
235
247
  "File",
@@ -307,6 +319,8 @@ __all__ = [
307
319
  "TextNodeRelationshipsValue",
308
320
  "TextNodeWithScore",
309
321
  "TokenTextSplitter",
322
+ "TransformConfig",
323
+ "TransformConfigMode",
310
324
  "TransformationCategoryNames",
311
325
  "UnprocessableEntityError",
312
326
  "UserOrganization",
@@ -14,7 +14,7 @@ from . import (
14
14
  )
15
15
  from .data_sinks import DataSinkUpdateComponent, DataSinkUpdateComponentOne
16
16
  from .data_sources import DataSourceUpdateComponent, DataSourceUpdateComponentOne, DataSourceUpdateCustomMetadataValue
17
- from .extraction import ExtractionSchemaUpdateDataSchemaValue
17
+ from .extraction import ExtractionSchemaCreateDataSchemaValue, ExtractionSchemaUpdateDataSchemaValue
18
18
  from .files import FileCreateResourceInfoValue
19
19
  from .pipelines import PipelineFileUpdateCustomMetadataValue
20
20
 
@@ -24,6 +24,7 @@ __all__ = [
24
24
  "DataSourceUpdateComponent",
25
25
  "DataSourceUpdateComponentOne",
26
26
  "DataSourceUpdateCustomMetadataValue",
27
+ "ExtractionSchemaCreateDataSchemaValue",
27
28
  "ExtractionSchemaUpdateDataSchemaValue",
28
29
  "FileCreateResourceInfoValue",
29
30
  "PipelineFileUpdateCustomMetadataValue",
@@ -1,5 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import ExtractionSchemaUpdateDataSchemaValue
3
+ from .types import ExtractionSchemaCreateDataSchemaValue, ExtractionSchemaUpdateDataSchemaValue
4
4
 
5
- __all__ = ["ExtractionSchemaUpdateDataSchemaValue"]
5
+ __all__ = ["ExtractionSchemaCreateDataSchemaValue", "ExtractionSchemaUpdateDataSchemaValue"]
@@ -13,6 +13,7 @@ from ...types.extraction_job import ExtractionJob
13
13
  from ...types.extraction_result import ExtractionResult
14
14
  from ...types.extraction_schema import ExtractionSchema
15
15
  from ...types.http_validation_error import HttpValidationError
16
+ from .types.extraction_schema_create_data_schema_value import ExtractionSchemaCreateDataSchemaValue
16
17
  from .types.extraction_schema_update_data_schema_value import ExtractionSchemaUpdateDataSchemaValue
17
18
 
18
19
  try:
@@ -31,47 +32,66 @@ class ExtractionClient:
31
32
  def __init__(self, *, client_wrapper: SyncClientWrapper):
32
33
  self._client_wrapper = client_wrapper
33
34
 
34
- def infer_schema(
35
+ def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
36
+ """
37
+ Parameters:
38
+ - project_id: typing.Optional[str].
39
+ ---
40
+ from llama_cloud.client import LlamaCloud
41
+
42
+ client = LlamaCloud(
43
+ token="YOUR_TOKEN",
44
+ )
45
+ client.extraction.list_schemas()
46
+ """
47
+ _response = self._client_wrapper.httpx_client.request(
48
+ "GET",
49
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
50
+ params=remove_none_from_dict({"project_id": project_id}),
51
+ headers=self._client_wrapper.get_headers(),
52
+ timeout=60,
53
+ )
54
+ if 200 <= _response.status_code < 300:
55
+ return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
56
+ if _response.status_code == 422:
57
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
58
+ try:
59
+ _response_json = _response.json()
60
+ except JSONDecodeError:
61
+ raise ApiError(status_code=_response.status_code, body=_response.text)
62
+ raise ApiError(status_code=_response.status_code, body=_response_json)
63
+
64
+ def create_schema(
35
65
  self,
36
66
  *,
37
- schema_id: typing.Optional[str] = OMIT,
38
67
  name: str,
39
68
  project_id: typing.Optional[str] = OMIT,
40
- file_ids: typing.List[str],
41
- stream: typing.Optional[bool] = OMIT,
69
+ data_schema: typing.Dict[str, ExtractionSchemaCreateDataSchemaValue],
42
70
  ) -> ExtractionSchema:
43
71
  """
44
72
  Parameters:
45
- - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
46
-
47
73
  - name: str. The name of the extraction schema
48
74
 
49
75
  - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
50
76
 
51
- - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
52
-
53
- - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
77
+ - data_schema: typing.Dict[str, ExtractionSchemaCreateDataSchemaValue]. The schema of the data
54
78
  ---
55
79
  from llama_cloud.client import LlamaCloud
56
80
 
57
81
  client = LlamaCloud(
58
82
  token="YOUR_TOKEN",
59
83
  )
60
- client.extraction.infer_schema(
84
+ client.extraction.create_schema(
61
85
  name="string",
62
- file_ids=[],
86
+ data_schema={},
63
87
  )
64
88
  """
65
- _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
66
- if schema_id is not OMIT:
67
- _request["schema_id"] = schema_id
89
+ _request: typing.Dict[str, typing.Any] = {"name": name, "data_schema": data_schema}
68
90
  if project_id is not OMIT:
69
91
  _request["project_id"] = project_id
70
- if stream is not OMIT:
71
- _request["stream"] = stream
72
92
  _response = self._client_wrapper.httpx_client.request(
73
93
  "POST",
74
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
94
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
75
95
  json=jsonable_encoder(_request),
76
96
  headers=self._client_wrapper.get_headers(),
77
97
  timeout=60,
@@ -86,27 +106,53 @@ class ExtractionClient:
86
106
  raise ApiError(status_code=_response.status_code, body=_response.text)
87
107
  raise ApiError(status_code=_response.status_code, body=_response_json)
88
108
 
89
- def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
109
+ def infer_schema(
110
+ self,
111
+ *,
112
+ schema_id: typing.Optional[str] = OMIT,
113
+ name: str,
114
+ project_id: typing.Optional[str] = OMIT,
115
+ file_ids: typing.List[str],
116
+ stream: typing.Optional[bool] = OMIT,
117
+ ) -> ExtractionSchema:
90
118
  """
91
119
  Parameters:
92
- - project_id: typing.Optional[str].
120
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
121
+
122
+ - name: str. The name of the extraction schema
123
+
124
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
125
+
126
+ - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
127
+
128
+ - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
93
129
  ---
94
130
  from llama_cloud.client import LlamaCloud
95
131
 
96
132
  client = LlamaCloud(
97
133
  token="YOUR_TOKEN",
98
134
  )
99
- client.extraction.list_schemas()
135
+ client.extraction.infer_schema(
136
+ name="string",
137
+ file_ids=[],
138
+ )
100
139
  """
140
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
141
+ if schema_id is not OMIT:
142
+ _request["schema_id"] = schema_id
143
+ if project_id is not OMIT:
144
+ _request["project_id"] = project_id
145
+ if stream is not OMIT:
146
+ _request["stream"] = stream
101
147
  _response = self._client_wrapper.httpx_client.request(
102
- "GET",
103
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
104
- params=remove_none_from_dict({"project_id": project_id}),
148
+ "POST",
149
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
150
+ json=jsonable_encoder(_request),
105
151
  headers=self._client_wrapper.get_headers(),
106
152
  timeout=60,
107
153
  )
108
154
  if 200 <= _response.status_code < 300:
109
- return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
155
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
110
156
  if _response.status_code == 422:
111
157
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
112
158
  try:
@@ -350,47 +396,66 @@ class AsyncExtractionClient:
350
396
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
351
397
  self._client_wrapper = client_wrapper
352
398
 
353
- async def infer_schema(
399
+ async def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
400
+ """
401
+ Parameters:
402
+ - project_id: typing.Optional[str].
403
+ ---
404
+ from llama_cloud.client import AsyncLlamaCloud
405
+
406
+ client = AsyncLlamaCloud(
407
+ token="YOUR_TOKEN",
408
+ )
409
+ await client.extraction.list_schemas()
410
+ """
411
+ _response = await self._client_wrapper.httpx_client.request(
412
+ "GET",
413
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
414
+ params=remove_none_from_dict({"project_id": project_id}),
415
+ headers=self._client_wrapper.get_headers(),
416
+ timeout=60,
417
+ )
418
+ if 200 <= _response.status_code < 300:
419
+ return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
420
+ if _response.status_code == 422:
421
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
422
+ try:
423
+ _response_json = _response.json()
424
+ except JSONDecodeError:
425
+ raise ApiError(status_code=_response.status_code, body=_response.text)
426
+ raise ApiError(status_code=_response.status_code, body=_response_json)
427
+
428
+ async def create_schema(
354
429
  self,
355
430
  *,
356
- schema_id: typing.Optional[str] = OMIT,
357
431
  name: str,
358
432
  project_id: typing.Optional[str] = OMIT,
359
- file_ids: typing.List[str],
360
- stream: typing.Optional[bool] = OMIT,
433
+ data_schema: typing.Dict[str, ExtractionSchemaCreateDataSchemaValue],
361
434
  ) -> ExtractionSchema:
362
435
  """
363
436
  Parameters:
364
- - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
365
-
366
437
  - name: str. The name of the extraction schema
367
438
 
368
439
  - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
369
440
 
370
- - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
371
-
372
- - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
441
+ - data_schema: typing.Dict[str, ExtractionSchemaCreateDataSchemaValue]. The schema of the data
373
442
  ---
374
443
  from llama_cloud.client import AsyncLlamaCloud
375
444
 
376
445
  client = AsyncLlamaCloud(
377
446
  token="YOUR_TOKEN",
378
447
  )
379
- await client.extraction.infer_schema(
448
+ await client.extraction.create_schema(
380
449
  name="string",
381
- file_ids=[],
450
+ data_schema={},
382
451
  )
383
452
  """
384
- _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
385
- if schema_id is not OMIT:
386
- _request["schema_id"] = schema_id
453
+ _request: typing.Dict[str, typing.Any] = {"name": name, "data_schema": data_schema}
387
454
  if project_id is not OMIT:
388
455
  _request["project_id"] = project_id
389
- if stream is not OMIT:
390
- _request["stream"] = stream
391
456
  _response = await self._client_wrapper.httpx_client.request(
392
457
  "POST",
393
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
458
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
394
459
  json=jsonable_encoder(_request),
395
460
  headers=self._client_wrapper.get_headers(),
396
461
  timeout=60,
@@ -405,27 +470,53 @@ class AsyncExtractionClient:
405
470
  raise ApiError(status_code=_response.status_code, body=_response.text)
406
471
  raise ApiError(status_code=_response.status_code, body=_response_json)
407
472
 
408
- async def list_schemas(self, *, project_id: typing.Optional[str] = None) -> typing.List[ExtractionSchema]:
473
+ async def infer_schema(
474
+ self,
475
+ *,
476
+ schema_id: typing.Optional[str] = OMIT,
477
+ name: str,
478
+ project_id: typing.Optional[str] = OMIT,
479
+ file_ids: typing.List[str],
480
+ stream: typing.Optional[bool] = OMIT,
481
+ ) -> ExtractionSchema:
409
482
  """
410
483
  Parameters:
411
- - project_id: typing.Optional[str].
484
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
485
+
486
+ - name: str. The name of the extraction schema
487
+
488
+ - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
489
+
490
+ - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
491
+
492
+ - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
412
493
  ---
413
494
  from llama_cloud.client import AsyncLlamaCloud
414
495
 
415
496
  client = AsyncLlamaCloud(
416
497
  token="YOUR_TOKEN",
417
498
  )
418
- await client.extraction.list_schemas()
499
+ await client.extraction.infer_schema(
500
+ name="string",
501
+ file_ids=[],
502
+ )
419
503
  """
504
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
505
+ if schema_id is not OMIT:
506
+ _request["schema_id"] = schema_id
507
+ if project_id is not OMIT:
508
+ _request["project_id"] = project_id
509
+ if stream is not OMIT:
510
+ _request["stream"] = stream
420
511
  _response = await self._client_wrapper.httpx_client.request(
421
- "GET",
422
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas"),
423
- params=remove_none_from_dict({"project_id": project_id}),
512
+ "POST",
513
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
514
+ json=jsonable_encoder(_request),
424
515
  headers=self._client_wrapper.get_headers(),
425
516
  timeout=60,
426
517
  )
427
518
  if 200 <= _response.status_code < 300:
428
- return pydantic.parse_obj_as(typing.List[ExtractionSchema], _response.json()) # type: ignore
519
+ return pydantic.parse_obj_as(ExtractionSchema, _response.json()) # type: ignore
429
520
  if _response.status_code == 422:
430
521
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
431
522
  try:
@@ -1,5 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from .extraction_schema_create_data_schema_value import ExtractionSchemaCreateDataSchemaValue
3
4
  from .extraction_schema_update_data_schema_value import ExtractionSchemaUpdateDataSchemaValue
4
5
 
5
- __all__ = ["ExtractionSchemaUpdateDataSchemaValue"]
6
+ __all__ = ["ExtractionSchemaCreateDataSchemaValue", "ExtractionSchemaUpdateDataSchemaValue"]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractionSchemaCreateDataSchemaValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -15,6 +15,7 @@ from ...types.cloud_document import CloudDocument
15
15
  from ...types.cloud_document_create import CloudDocumentCreate
16
16
  from ...types.configured_transformation_item import ConfiguredTransformationItem
17
17
  from ...types.data_sink_create import DataSinkCreate
18
+ from ...types.embedding_config import EmbeddingConfig
18
19
  from ...types.eval_dataset_job_record import EvalDatasetJobRecord
19
20
  from ...types.eval_execution_params import EvalExecutionParams
20
21
  from ...types.eval_execution_params_override import EvalExecutionParamsOverride
@@ -35,6 +36,7 @@ from ...types.preset_retrieval_params import PresetRetrievalParams
35
36
  from ...types.retrieval_mode import RetrievalMode
36
37
  from ...types.retrieve_results import RetrieveResults
37
38
  from ...types.text_node import TextNode
39
+ from ...types.transform_config import TransformConfig
38
40
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
39
41
 
40
42
  try:
@@ -117,8 +119,11 @@ class PipelinesClient:
117
119
  - request: PipelineCreate.
118
120
  ---
119
121
  from llama_cloud import (
122
+ AutoTransformConfig,
120
123
  ConfigurableDataSinkNames,
121
124
  DataSinkCreate,
125
+ EmbeddingConfig,
126
+ EmbeddingConfigType,
122
127
  EvalExecutionParams,
123
128
  FilterCondition,
124
129
  LlamaParseParameters,
@@ -128,6 +133,8 @@ class PipelinesClient:
128
133
  PresetRetrievalParams,
129
134
  RetrievalMode,
130
135
  SupportedEvalLlmModelNames,
136
+ TransformConfig,
137
+ TransformConfigMode,
131
138
  )
132
139
  from llama_cloud.client import LlamaCloud
133
140
 
@@ -136,6 +143,13 @@ class PipelinesClient:
136
143
  )
137
144
  client.pipelines.create_pipeline(
138
145
  request=PipelineCreate(
146
+ embedding_config=EmbeddingConfig(
147
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
148
+ ),
149
+ transform_config=TransformConfig(
150
+ mode=TransformConfigMode.AUTO,
151
+ config=AutoTransformConfig(),
152
+ ),
139
153
  data_sink=DataSinkCreate(
140
154
  name="string",
141
155
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -185,8 +199,11 @@ class PipelinesClient:
185
199
  - request: PipelineCreate.
186
200
  ---
187
201
  from llama_cloud import (
202
+ AutoTransformConfig,
188
203
  ConfigurableDataSinkNames,
189
204
  DataSinkCreate,
205
+ EmbeddingConfig,
206
+ EmbeddingConfigType,
190
207
  EvalExecutionParams,
191
208
  FilterCondition,
192
209
  LlamaParseParameters,
@@ -196,6 +213,8 @@ class PipelinesClient:
196
213
  PresetRetrievalParams,
197
214
  RetrievalMode,
198
215
  SupportedEvalLlmModelNames,
216
+ TransformConfig,
217
+ TransformConfigMode,
199
218
  )
200
219
  from llama_cloud.client import LlamaCloud
201
220
 
@@ -204,6 +223,13 @@ class PipelinesClient:
204
223
  )
205
224
  client.pipelines.upsert_pipeline(
206
225
  request=PipelineCreate(
226
+ embedding_config=EmbeddingConfig(
227
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
228
+ ),
229
+ transform_config=TransformConfig(
230
+ mode=TransformConfigMode.AUTO,
231
+ config=AutoTransformConfig(),
232
+ ),
207
233
  data_sink=DataSinkCreate(
208
234
  name="string",
209
235
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -278,6 +304,8 @@ class PipelinesClient:
278
304
  self,
279
305
  pipeline_id: str,
280
306
  *,
307
+ embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
308
+ transform_config: typing.Optional[TransformConfig] = OMIT,
281
309
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
282
310
  data_sink_id: typing.Optional[str] = OMIT,
283
311
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
@@ -293,7 +321,11 @@ class PipelinesClient:
293
321
  Parameters:
294
322
  - pipeline_id: str.
295
323
 
296
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
324
+ - embedding_config: typing.Optional[EmbeddingConfig]. Configuration for the embedding model.
325
+
326
+ - transform_config: typing.Optional[TransformConfig]. Configuration for the transformation.
327
+
328
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. List of configured transformations.
297
329
 
298
330
  - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
299
331
 
@@ -310,8 +342,11 @@ class PipelinesClient:
310
342
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
311
343
  ---
312
344
  from llama_cloud import (
345
+ AutoTransformConfig,
313
346
  ConfigurableDataSinkNames,
314
347
  DataSinkCreate,
348
+ EmbeddingConfig,
349
+ EmbeddingConfigType,
315
350
  EvalExecutionParams,
316
351
  FilterCondition,
317
352
  LlamaParseParameters,
@@ -319,6 +354,8 @@ class PipelinesClient:
319
354
  PresetRetrievalParams,
320
355
  RetrievalMode,
321
356
  SupportedEvalLlmModelNames,
357
+ TransformConfig,
358
+ TransformConfigMode,
322
359
  )
323
360
  from llama_cloud.client import LlamaCloud
324
361
 
@@ -327,6 +364,13 @@ class PipelinesClient:
327
364
  )
328
365
  client.pipelines.update_existing_pipeline(
329
366
  pipeline_id="string",
367
+ embedding_config=EmbeddingConfig(
368
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
369
+ ),
370
+ transform_config=TransformConfig(
371
+ mode=TransformConfigMode.AUTO,
372
+ config=AutoTransformConfig(),
373
+ ),
330
374
  data_sink=DataSinkCreate(
331
375
  name="string",
332
376
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -345,6 +389,10 @@ class PipelinesClient:
345
389
  )
346
390
  """
347
391
  _request: typing.Dict[str, typing.Any] = {}
392
+ if embedding_config is not OMIT:
393
+ _request["embedding_config"] = embedding_config
394
+ if transform_config is not OMIT:
395
+ _request["transform_config"] = transform_config
348
396
  if configured_transformations is not OMIT:
349
397
  _request["configured_transformations"] = configured_transformations
350
398
  if data_sink_id is not OMIT:
@@ -1676,8 +1724,11 @@ class AsyncPipelinesClient:
1676
1724
  - request: PipelineCreate.
1677
1725
  ---
1678
1726
  from llama_cloud import (
1727
+ AutoTransformConfig,
1679
1728
  ConfigurableDataSinkNames,
1680
1729
  DataSinkCreate,
1730
+ EmbeddingConfig,
1731
+ EmbeddingConfigType,
1681
1732
  EvalExecutionParams,
1682
1733
  FilterCondition,
1683
1734
  LlamaParseParameters,
@@ -1687,6 +1738,8 @@ class AsyncPipelinesClient:
1687
1738
  PresetRetrievalParams,
1688
1739
  RetrievalMode,
1689
1740
  SupportedEvalLlmModelNames,
1741
+ TransformConfig,
1742
+ TransformConfigMode,
1690
1743
  )
1691
1744
  from llama_cloud.client import AsyncLlamaCloud
1692
1745
 
@@ -1695,6 +1748,13 @@ class AsyncPipelinesClient:
1695
1748
  )
1696
1749
  await client.pipelines.create_pipeline(
1697
1750
  request=PipelineCreate(
1751
+ embedding_config=EmbeddingConfig(
1752
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
1753
+ ),
1754
+ transform_config=TransformConfig(
1755
+ mode=TransformConfigMode.AUTO,
1756
+ config=AutoTransformConfig(),
1757
+ ),
1698
1758
  data_sink=DataSinkCreate(
1699
1759
  name="string",
1700
1760
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1744,8 +1804,11 @@ class AsyncPipelinesClient:
1744
1804
  - request: PipelineCreate.
1745
1805
  ---
1746
1806
  from llama_cloud import (
1807
+ AutoTransformConfig,
1747
1808
  ConfigurableDataSinkNames,
1748
1809
  DataSinkCreate,
1810
+ EmbeddingConfig,
1811
+ EmbeddingConfigType,
1749
1812
  EvalExecutionParams,
1750
1813
  FilterCondition,
1751
1814
  LlamaParseParameters,
@@ -1755,6 +1818,8 @@ class AsyncPipelinesClient:
1755
1818
  PresetRetrievalParams,
1756
1819
  RetrievalMode,
1757
1820
  SupportedEvalLlmModelNames,
1821
+ TransformConfig,
1822
+ TransformConfigMode,
1758
1823
  )
1759
1824
  from llama_cloud.client import AsyncLlamaCloud
1760
1825
 
@@ -1763,6 +1828,13 @@ class AsyncPipelinesClient:
1763
1828
  )
1764
1829
  await client.pipelines.upsert_pipeline(
1765
1830
  request=PipelineCreate(
1831
+ embedding_config=EmbeddingConfig(
1832
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
1833
+ ),
1834
+ transform_config=TransformConfig(
1835
+ mode=TransformConfigMode.AUTO,
1836
+ config=AutoTransformConfig(),
1837
+ ),
1766
1838
  data_sink=DataSinkCreate(
1767
1839
  name="string",
1768
1840
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1837,6 +1909,8 @@ class AsyncPipelinesClient:
1837
1909
  self,
1838
1910
  pipeline_id: str,
1839
1911
  *,
1912
+ embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
1913
+ transform_config: typing.Optional[TransformConfig] = OMIT,
1840
1914
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = OMIT,
1841
1915
  data_sink_id: typing.Optional[str] = OMIT,
1842
1916
  data_sink: typing.Optional[DataSinkCreate] = OMIT,
@@ -1852,7 +1926,11 @@ class AsyncPipelinesClient:
1852
1926
  Parameters:
1853
1927
  - pipeline_id: str.
1854
1928
 
1855
- - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]].
1929
+ - embedding_config: typing.Optional[EmbeddingConfig]. Configuration for the embedding model.
1930
+
1931
+ - transform_config: typing.Optional[TransformConfig]. Configuration for the transformation.
1932
+
1933
+ - configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]. List of configured transformations.
1856
1934
 
1857
1935
  - data_sink_id: typing.Optional[str]. Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID.
1858
1936
 
@@ -1869,8 +1947,11 @@ class AsyncPipelinesClient:
1869
1947
  - managed_pipeline_id: typing.Optional[str]. The ID of the ManagedPipeline this playground pipeline is linked to.
1870
1948
  ---
1871
1949
  from llama_cloud import (
1950
+ AutoTransformConfig,
1872
1951
  ConfigurableDataSinkNames,
1873
1952
  DataSinkCreate,
1953
+ EmbeddingConfig,
1954
+ EmbeddingConfigType,
1874
1955
  EvalExecutionParams,
1875
1956
  FilterCondition,
1876
1957
  LlamaParseParameters,
@@ -1878,6 +1959,8 @@ class AsyncPipelinesClient:
1878
1959
  PresetRetrievalParams,
1879
1960
  RetrievalMode,
1880
1961
  SupportedEvalLlmModelNames,
1962
+ TransformConfig,
1963
+ TransformConfigMode,
1881
1964
  )
1882
1965
  from llama_cloud.client import AsyncLlamaCloud
1883
1966
 
@@ -1886,6 +1969,13 @@ class AsyncPipelinesClient:
1886
1969
  )
1887
1970
  await client.pipelines.update_existing_pipeline(
1888
1971
  pipeline_id="string",
1972
+ embedding_config=EmbeddingConfig(
1973
+ type=EmbeddingConfigType.OPENAI_EMBEDDING,
1974
+ ),
1975
+ transform_config=TransformConfig(
1976
+ mode=TransformConfigMode.AUTO,
1977
+ config=AutoTransformConfig(),
1978
+ ),
1889
1979
  data_sink=DataSinkCreate(
1890
1980
  name="string",
1891
1981
  sink_type=ConfigurableDataSinkNames.CHROMA,
@@ -1904,6 +1994,10 @@ class AsyncPipelinesClient:
1904
1994
  )
1905
1995
  """
1906
1996
  _request: typing.Dict[str, typing.Any] = {}
1997
+ if embedding_config is not OMIT:
1998
+ _request["embedding_config"] = embedding_config
1999
+ if transform_config is not OMIT:
2000
+ _request["transform_config"] = transform_config
1907
2001
  if configured_transformations is not OMIT:
1908
2002
  _request["configured_transformations"] = configured_transformations
1909
2003
  if data_sink_id is not OMIT:
@@ -1,5 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from .auto_transform_config import AutoTransformConfig
3
4
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
4
5
  from .base import Base
5
6
  from .base_prompt_template import BasePromptTemplate
@@ -47,6 +48,9 @@ from .data_source_create_component_one import DataSourceCreateComponentOne
47
48
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
48
49
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
49
50
  from .data_source_definition import DataSourceDefinition
51
+ from .embedding_config import EmbeddingConfig
52
+ from .embedding_config_component import EmbeddingConfigComponent
53
+ from .embedding_config_type import EmbeddingConfigType
50
54
  from .eval_dataset import EvalDataset
51
55
  from .eval_dataset_job_params import EvalDatasetJobParams
52
56
  from .eval_dataset_job_record import EvalDatasetJobRecord
@@ -133,6 +137,8 @@ from .text_node import TextNode
133
137
  from .text_node_relationships_value import TextNodeRelationshipsValue
134
138
  from .text_node_with_score import TextNodeWithScore
135
139
  from .token_text_splitter import TokenTextSplitter
140
+ from .transform_config import TransformConfig
141
+ from .transform_config_mode import TransformConfigMode
136
142
  from .transformation_category_names import TransformationCategoryNames
137
143
  from .user_organization import UserOrganization
138
144
  from .user_organization_create import UserOrganizationCreate
@@ -141,6 +147,7 @@ from .validation_error import ValidationError
141
147
  from .validation_error_loc_item import ValidationErrorLocItem
142
148
 
143
149
  __all__ = [
150
+ "AutoTransformConfig",
144
151
  "AzureOpenAiEmbedding",
145
152
  "Base",
146
153
  "BasePromptTemplate",
@@ -188,6 +195,9 @@ __all__ = [
188
195
  "DataSourceCreateCustomMetadataValue",
189
196
  "DataSourceCustomMetadataValue",
190
197
  "DataSourceDefinition",
198
+ "EmbeddingConfig",
199
+ "EmbeddingConfigComponent",
200
+ "EmbeddingConfigType",
191
201
  "EvalDataset",
192
202
  "EvalDatasetJobParams",
193
203
  "EvalDatasetJobRecord",
@@ -274,6 +284,8 @@ __all__ = [
274
284
  "TextNodeRelationshipsValue",
275
285
  "TextNodeWithScore",
276
286
  "TokenTextSplitter",
287
+ "TransformConfig",
288
+ "TransformConfigMode",
277
289
  "TransformationCategoryNames",
278
290
  "UserOrganization",
279
291
  "UserOrganizationCreate",
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class AutoTransformConfig(pydantic.BaseModel):
18
+ chunk_size: typing.Optional[int] = pydantic.Field(description="Chunk size for the transformation.")
19
+ chunk_overlap: typing.Optional[int] = pydantic.Field(description="Chunk overlap for the transformation.")
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .embedding_config_component import EmbeddingConfigComponent
8
+ from .embedding_config_type import EmbeddingConfigType
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class EmbeddingConfig(pydantic.BaseModel):
20
+ type: typing.Optional[EmbeddingConfigType] = pydantic.Field(description="Type of the embedding model.")
21
+ component: typing.Optional[EmbeddingConfigComponent] = pydantic.Field(
22
+ description="Configuration for the transformation."
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,19 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .azure_open_ai_embedding import AzureOpenAiEmbedding
6
+ from .bedrock_embedding import BedrockEmbedding
7
+ from .cohere_embedding import CohereEmbedding
8
+ from .gemini_embedding import GeminiEmbedding
9
+ from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
10
+ from .open_ai_embedding import OpenAiEmbedding
11
+
12
+ EmbeddingConfigComponent = typing.Union[
13
+ OpenAiEmbedding,
14
+ AzureOpenAiEmbedding,
15
+ BedrockEmbedding,
16
+ CohereEmbedding,
17
+ GeminiEmbedding,
18
+ HuggingFaceInferenceApiEmbedding,
19
+ ]
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class EmbeddingConfigType(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ OPENAI_EMBEDDING = "OPENAI_EMBEDDING"
15
+ AZURE_EMBEDDING = "AZURE_EMBEDDING"
16
+ BEDROCK_EMBEDDING = "BEDROCK_EMBEDDING"
17
+ COHERE_EMBEDDING = "COHERE_EMBEDDING"
18
+ GEMINI_EMBEDDING = "GEMINI_EMBEDDING"
19
+ HUGGINGFACE_API_EMBEDDING = "HUGGINGFACE_API_EMBEDDING"
20
+
21
+ def visit(
22
+ self,
23
+ openai_embedding: typing.Callable[[], T_Result],
24
+ azure_embedding: typing.Callable[[], T_Result],
25
+ bedrock_embedding: typing.Callable[[], T_Result],
26
+ cohere_embedding: typing.Callable[[], T_Result],
27
+ gemini_embedding: typing.Callable[[], T_Result],
28
+ huggingface_api_embedding: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is EmbeddingConfigType.OPENAI_EMBEDDING:
31
+ return openai_embedding()
32
+ if self is EmbeddingConfigType.AZURE_EMBEDDING:
33
+ return azure_embedding()
34
+ if self is EmbeddingConfigType.BEDROCK_EMBEDDING:
35
+ return bedrock_embedding()
36
+ if self is EmbeddingConfigType.COHERE_EMBEDDING:
37
+ return cohere_embedding()
38
+ if self is EmbeddingConfigType.GEMINI_EMBEDDING:
39
+ return gemini_embedding()
40
+ if self is EmbeddingConfigType.HUGGINGFACE_API_EMBEDDING:
41
+ return huggingface_api_embedding()
@@ -6,10 +6,12 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configured_transformation_item import ConfiguredTransformationItem
8
8
  from .data_sink_create import DataSinkCreate
9
+ from .embedding_config import EmbeddingConfig
9
10
  from .eval_execution_params import EvalExecutionParams
10
11
  from .llama_parse_parameters import LlamaParseParameters
11
12
  from .pipeline_type import PipelineType
12
13
  from .preset_retrieval_params import PresetRetrievalParams
14
+ from .transform_config import TransformConfig
13
15
 
14
16
  try:
15
17
  import pydantic
@@ -25,6 +27,12 @@ class PipelineCreate(pydantic.BaseModel):
25
27
  Schema for creating a pipeline.
26
28
  """
27
29
 
30
+ embedding_config: typing.Optional[EmbeddingConfig] = pydantic.Field(
31
+ description="Configuration for the embedding model."
32
+ )
33
+ transform_config: typing.Optional[TransformConfig] = pydantic.Field(
34
+ description="Configuration for the transformation."
35
+ )
28
36
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
29
37
  description="List of configured transformations."
30
38
  )
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .auto_transform_config import AutoTransformConfig
8
+ from .transform_config_mode import TransformConfigMode
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class TransformConfig(pydantic.BaseModel):
20
+ mode: typing.Optional[TransformConfigMode] = pydantic.Field(
21
+ description="Mode for the transformation configuration."
22
+ )
23
+ config: typing.Optional[AutoTransformConfig] = pydantic.Field(description="Configuration for the transformation.")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,21 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class TransformConfigMode(str, enum.Enum):
10
+ """
11
+ An enumeration.
12
+ """
13
+
14
+ AUTO = "AUTO"
15
+ ADVANCED = "ADVANCED"
16
+
17
+ def visit(self, auto: typing.Callable[[], T_Result], advanced: typing.Callable[[], T_Result]) -> T_Result:
18
+ if self is TransformConfigMode.AUTO:
19
+ return auto()
20
+ if self is TransformConfigMode.ADVANCED:
21
+ return advanced()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.0.10
3
+ Version: 0.0.11
4
4
  Summary:
5
5
  Author: Logan Markewich
6
6
  Author-email: logan@runllama.ai
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=iwA_KYHvKDW9C5OqOs1DW4UOBpV2sG2Jqfgf2ihV8wc,8525
1
+ llama_cloud/__init__.py,sha256=xHcoQ60V5FpNig3XYhARv0-xupmVjF12yLEVk_Ov9X8,8919
2
2
  llama_cloud/client.py,sha256=bhZPiYd1TQSn3PRgHZ66MgMnBneG4Skc9g6UsT0wQnE,4299
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -9,7 +9,7 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=q4q-uY5WgcSlzfHwEANOqFQPu0lstqvMnVOsSfifMKo,168
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=DCmiYR81K2DZKpNUZDPG5Tb2DcDNNZf2NbOcoB1_Ndw,1105
12
+ llama_cloud/resources/__init__.py,sha256=uHsQ6hgIBRAp4ozjPDH1Cd8CqkiNnoYBcO2-LBAvtNg,1189
13
13
  llama_cloud/resources/component_definitions/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/component_definitions/client.py,sha256=YYfoXNa1qim2OdD5y4N5mvoBZKtrCuXS560mtqH_-1c,7569
15
15
  llama_cloud/resources/data_sinks/__init__.py,sha256=nsMEyxkVilxvQGSdJi0Z0yKZoTaTWewZIGJNoMwNDsw,205
@@ -25,9 +25,10 @@ llama_cloud/resources/data_sources/types/data_source_update_component_one.py,sha
25
25
  llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py,sha256=3aFC-p8MSxjhOu2nFtqk0pixj6RqNqcFnbOYngUdZUk,215
26
26
  llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
27
27
  llama_cloud/resources/evals/client.py,sha256=P0NmQPRu606DZ2U-RKZRgh25BMriWyKGB77X0Dfe4q0,27603
28
- llama_cloud/resources/extraction/__init__.py,sha256=Q4-qd3ywXqa_GOSxstVJJccHnJGAfKxz2FYLXYeiyWs,175
29
- llama_cloud/resources/extraction/client.py,sha256=VQXFKLI5NalLpYui8REgqhN0_xLezrKbrTSjaCAxZ3A,27212
30
- llama_cloud/resources/extraction/types/__init__.py,sha256=GgKhbek1WvvnoXgiB0XeSOAX3W94honf5HzL3gvtAEc,212
28
+ llama_cloud/resources/extraction/__init__.py,sha256=trseRsayeGiyGThI4s_Folw5AHmdTSEP3KPrlvNhfVw,255
29
+ llama_cloud/resources/extraction/client.py,sha256=iYYBliMeMtExllJXV0FLeGz46Gc88Ksmk_8vk1TrAzg,31012
30
+ llama_cloud/resources/extraction/types/__init__.py,sha256=ePJKSJ6hGIsPnfpe0Sp5w4mBZgnZes4cdtZ8Gfw81Gc,347
31
+ llama_cloud/resources/extraction/types/extraction_schema_create_data_schema_value.py,sha256=igTdUjMeB-PI5xKrloRKHY-EvL6_V8OLshABu6Dyx4A,217
31
32
  llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py,sha256=z_4tkLkWnHnd3Xa9uUctk9hG9Mo7GKU4dK4s2pm8qow,217
32
33
  llama_cloud/resources/files/__init__.py,sha256=aZpyTj6KpZvA5XVwmuo1sIlRs7ba98btxVBZ6j5vIsI,155
33
34
  llama_cloud/resources/files/client.py,sha256=pU7ugpqW4dAXJycVg3KxUI82ixiH6vZtcwAaHyPdsDA,22186
@@ -38,12 +39,13 @@ llama_cloud/resources/organizations/client.py,sha256=akn_3sytJW_VhuLVBbP0TKiKKbB
38
39
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
39
40
  llama_cloud/resources/parsing/client.py,sha256=fLNVNa945yP76sTf6NdSm5ikifSFFrjxxHXy0vwNBUc,40450
40
41
  llama_cloud/resources/pipelines/__init__.py,sha256=H7yaFIN62vjuhU3TOKzzuf8qpxZRgw1xVa-eyig-2YU,175
41
- llama_cloud/resources/pipelines/client.py,sha256=HlWhqcZUKosmkhBuDgj-ocRKGpsN4tJRHa3KnKbaZZY,125040
42
+ llama_cloud/resources/pipelines/client.py,sha256=V_j8f77_i0IyqjVBWRXIlHBJaymFRRDdSSKU6oR1SIs,129012
42
43
  llama_cloud/resources/pipelines/types/__init__.py,sha256=xuT4OBPLrRfEe-E3UVdJvRjl9jTp7tNBK_YzZBb6Kj8,212
43
44
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
44
45
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
45
46
  llama_cloud/resources/projects/client.py,sha256=nK81HdhGbWY1rh8rSEsKzRuvyvCQ-IkhLHIPDqEqVFU,47754
46
- llama_cloud/types/__init__.py,sha256=LXj40Rz1FVHxAyWgl2Wp7FcleeQq_sHeaz1vYlto4AQ,11634
47
+ llama_cloud/types/__init__.py,sha256=Jdcar7UNhEXHZ7JB4Wanq6C388JZQbrge2Mr85IhVa8,12115
48
+ llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
47
49
  llama_cloud/types/azure_open_ai_embedding.py,sha256=-9LzRDNcxhRvEshA8SaI9zFMTpHLXJ34iMnpIVk88Cc,3590
48
50
  llama_cloud/types/base.py,sha256=cn_Zn61yLMDCMm1iZTPvKILSRlqRzOqZtSYyYBY5dIE,938
49
51
  llama_cloud/types/base_prompt_template.py,sha256=GO9k4EDVMf3gRQIA4bVfXqgIMKnKTXhi1JlGvhqXDRY,1576
@@ -91,6 +93,9 @@ llama_cloud/types/data_source_create_component_one.py,sha256=rkVjFBxh1wA1BcsDWsJ
91
93
  llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
92
94
  llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
93
95
  llama_cloud/types/data_source_definition.py,sha256=HlSlTxzYcQJOSo_2OSroAE8vAr-otDvTNBSEkA54vL8,1575
96
+ llama_cloud/types/embedding_config.py,sha256=eqW7xg1IHZcXIEsqVzAn1thxcWGTeBwhpDuqpS6EYKw,1319
97
+ llama_cloud/types/embedding_config_component.py,sha256=gi6-TKmeV2z72KSfJof6diEW4z7VXuNOyhJhDK7ZrPQ,601
98
+ llama_cloud/types/embedding_config_type.py,sha256=iLK2npXhXa0E1FjsZvCC1JTzwJ1ItHYkNgDJvTkofyc,1470
94
99
  llama_cloud/types/eval_dataset.py,sha256=Uav-YJqAvyzCp1j2XavzzVLV975uki71beIBLkCt8LY,1408
95
100
  llama_cloud/types/eval_dataset_job_params.py,sha256=vcXLJWO581uigNvGAurPDgMeEFtQURWucLF5pemdeS0,1343
96
101
  llama_cloud/types/eval_dataset_job_record.py,sha256=mUBpT2CI1IYYxwvC7S3mKu5GhBKXzVR5btDLoGcXqSg,2763
@@ -144,7 +149,7 @@ llama_cloud/types/parsing_job_markdown_result.py,sha256=E3-CVNFH1IMyuGs_xzYfYdNg
144
149
  llama_cloud/types/parsing_job_text_result.py,sha256=1QZielAWXuzPFOgr_DWshXPjmbExAAgAHKAEYVQVtJ8,1082
145
150
  llama_cloud/types/parsing_usage.py,sha256=Wy_c-kAFADDBZgDwqNglsJv_t7vcjOm-8EY32oZEYzU,995
146
151
  llama_cloud/types/pipeline.py,sha256=h-Xo7HirFCvgiu7NaqSrUTM2wJKd9WXzcqnZ_j_kRkU,2661
147
- llama_cloud/types/pipeline_create.py,sha256=PIa51SI_kvS5V0OGlumz_fabktiAC0z7L1qmRNH-sbE,2687
152
+ llama_cloud/types/pipeline_create.py,sha256=oXSclyv8UNW58c6mfUkMavEf7T1xrZJWjbCxIP61r7k,3058
148
153
  llama_cloud/types/pipeline_data_source.py,sha256=A3AlRzTD7zr1y-u5O5LFESqIupbbG-fqUndQgeYj77w,2062
149
154
  llama_cloud/types/pipeline_data_source_component.py,sha256=Pk_K0Gv7xSWe5BKCdxz82EFd6AQDvZGN-6t3zg9h8NY,265
150
155
  llama_cloud/types/pipeline_data_source_component_one.py,sha256=sYaNaVl2gk-Clq2BCOKT2fUOGa_B7kcsw1P7aVdn-jA,873
@@ -177,13 +182,15 @@ llama_cloud/types/text_node.py,sha256=ANT9oPqBs9IJFPhtq-6PC4l44FA3ZYjz_9nOE8h0RA
177
182
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
178
183
  llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586_HsvDoQbLfuQ,1229
179
184
  llama_cloud/types/token_text_splitter.py,sha256=Mv8xBCvMXyYuQq1KInPe65O0YYCLWxs61pIbkBRfxG0,1883
185
+ llama_cloud/types/transform_config.py,sha256=Xci_UUMz_xzx_OzePxLNk-6NvXO0H2PZtgEOApoF2lk,1315
186
+ llama_cloud/types/transform_config_mode.py,sha256=4jH-_MnlkM758y0lzlMh9JwGtHrdgAHdm_V8ikk7CbY,518
180
187
  llama_cloud/types/transformation_category_names.py,sha256=0xjYe-mDW9OKbTGqL5fSbTvqsfrG4LDu_stW_ubVLl4,582
181
188
  llama_cloud/types/user_organization.py,sha256=fLgTKr1phJ4EdhTXmr5086bRy9RTAUy4km6mQz_jgRI,1964
182
189
  llama_cloud/types/user_organization_create.py,sha256=YESlfcI64710OFdQzgGD4a7aItgBwcIKdM1xFPs1Szw,1209
183
190
  llama_cloud/types/user_organization_delete.py,sha256=Z8RSRXc0AGAuGxv6eQPC2S1XIdRfNCXBggfEefgPseM,1209
184
191
  llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
185
192
  llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
186
- llama_cloud-0.0.10.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
187
- llama_cloud-0.0.10.dist-info/METADATA,sha256=Lir2q2uYKl_qOx9F3F66PJT2dUOpNz1T_QKT4-MOC_g,751
188
- llama_cloud-0.0.10.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
189
- llama_cloud-0.0.10.dist-info/RECORD,,
193
+ llama_cloud-0.0.11.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
194
+ llama_cloud-0.0.11.dist-info/METADATA,sha256=ZsJt0biJi2jFJ-AGbm6bQlTmY5UOC2BA0o5hjWLZKbI,751
195
+ llama_cloud-0.0.11.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
196
+ llama_cloud-0.0.11.dist-info/RECORD,,