llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (105) hide show
  1. llama_cloud/__init__.py +12 -10
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/__init__.py +2 -1
  4. llama_cloud/resources/data_sinks/client.py +14 -14
  5. llama_cloud/resources/data_sources/client.py +16 -16
  6. llama_cloud/resources/embedding_model_configs/client.py +80 -24
  7. llama_cloud/resources/evals/client.py +36 -26
  8. llama_cloud/resources/extraction/client.py +32 -32
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +53 -28
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +60 -56
  14. llama_cloud/resources/parsing/client.py +555 -324
  15. llama_cloud/resources/pipelines/client.py +446 -302
  16. llama_cloud/resources/projects/client.py +270 -136
  17. llama_cloud/types/__init__.py +10 -10
  18. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  19. llama_cloud/types/base_prompt_template.py +6 -2
  20. llama_cloud/types/bedrock_embedding.py +12 -6
  21. llama_cloud/types/character_splitter.py +4 -2
  22. llama_cloud/types/chat_message.py +1 -1
  23. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  24. llama_cloud/types/cloud_box_data_source.py +13 -6
  25. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  26. llama_cloud/types/cloud_document.py +3 -1
  27. llama_cloud/types/cloud_document_create.py +3 -1
  28. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  29. llama_cloud/types/cloud_jira_data_source.py +7 -4
  30. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  31. llama_cloud/types/cloud_one_drive_data_source.py +6 -3
  32. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  33. llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
  34. llama_cloud/types/cloud_slack_data_source.py +7 -6
  35. llama_cloud/types/code_splitter.py +1 -1
  36. llama_cloud/types/cohere_embedding.py +7 -3
  37. llama_cloud/types/data_sink.py +4 -4
  38. llama_cloud/types/data_sink_create.py +1 -1
  39. llama_cloud/types/data_source.py +7 -5
  40. llama_cloud/types/data_source_create.py +4 -2
  41. llama_cloud/types/embedding_model_config.py +2 -2
  42. llama_cloud/types/embedding_model_config_update.py +4 -2
  43. llama_cloud/types/eval_dataset.py +2 -2
  44. llama_cloud/types/eval_dataset_job_record.py +13 -7
  45. llama_cloud/types/eval_execution_params_override.py +6 -2
  46. llama_cloud/types/eval_question.py +2 -2
  47. llama_cloud/types/extraction_result.py +2 -2
  48. llama_cloud/types/extraction_schema.py +5 -3
  49. llama_cloud/types/file.py +15 -7
  50. llama_cloud/types/file_permission_info_value.py +5 -0
  51. llama_cloud/types/filter_operator.py +2 -2
  52. llama_cloud/types/gemini_embedding.py +10 -6
  53. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  54. llama_cloud/types/input_message.py +3 -1
  55. llama_cloud/types/job_name_mapping.py +4 -0
  56. llama_cloud/types/llama_parse_parameters.py +11 -0
  57. llama_cloud/types/llm.py +4 -2
  58. llama_cloud/types/llm_parameters.py +5 -2
  59. llama_cloud/types/local_eval.py +10 -8
  60. llama_cloud/types/local_eval_results.py +1 -1
  61. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  62. llama_cloud/types/markdown_element_node_parser.py +5 -3
  63. llama_cloud/types/markdown_node_parser.py +1 -1
  64. llama_cloud/types/metadata_filter.py +2 -2
  65. llama_cloud/types/metric_result.py +3 -3
  66. llama_cloud/types/node_parser.py +1 -1
  67. llama_cloud/types/open_ai_embedding.py +12 -6
  68. llama_cloud/types/organization.py +2 -2
  69. llama_cloud/types/page_splitter_node_parser.py +2 -2
  70. llama_cloud/types/parsing_job_structured_result.py +32 -0
  71. llama_cloud/types/permission.py +3 -3
  72. llama_cloud/types/pipeline.py +17 -7
  73. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  74. llama_cloud/types/pipeline_create.py +15 -5
  75. llama_cloud/types/pipeline_data_source.py +13 -7
  76. llama_cloud/types/pipeline_data_source_create.py +3 -1
  77. llama_cloud/types/pipeline_deployment.py +4 -4
  78. llama_cloud/types/pipeline_file.py +25 -11
  79. llama_cloud/types/pipeline_file_create.py +3 -1
  80. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  81. llama_cloud/types/playground_session.py +2 -2
  82. llama_cloud/types/preset_retrieval_params.py +14 -7
  83. llama_cloud/types/presigned_url.py +3 -1
  84. llama_cloud/types/project.py +2 -2
  85. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  86. llama_cloud/types/prompt_spec.py +4 -2
  87. llama_cloud/types/role.py +3 -3
  88. llama_cloud/types/sentence_splitter.py +4 -2
  89. llama_cloud/types/text_node.py +3 -3
  90. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  91. llama_cloud/types/token_text_splitter.py +1 -1
  92. llama_cloud/types/user_organization.py +9 -5
  93. llama_cloud/types/user_organization_create.py +4 -4
  94. llama_cloud/types/user_organization_delete.py +2 -2
  95. llama_cloud/types/user_organization_role.py +2 -2
  96. llama_cloud/types/value.py +5 -0
  97. llama_cloud/types/vertex_text_embedding.py +9 -5
  98. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
  99. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
  100. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
  101. llama_cloud/types/data_sink_component.py +0 -20
  102. llama_cloud/types/data_source_component.py +0 -28
  103. llama_cloud/types/metadata_filter_value.py +0 -5
  104. llama_cloud/types/pipeline_data_source_component.py +0 -28
  105. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
llama_cloud/__init__.py CHANGED
@@ -52,12 +52,10 @@ from .types import (
52
52
  ConfiguredTransformationItem,
53
53
  ConfiguredTransformationItemComponent,
54
54
  DataSink,
55
- DataSinkComponent,
56
55
  DataSinkCreate,
57
56
  DataSinkCreateComponent,
58
57
  DataSinkDefinition,
59
58
  DataSource,
60
- DataSourceComponent,
61
59
  DataSourceCreate,
62
60
  DataSourceCreateComponent,
63
61
  DataSourceCreateCustomMetadataValue,
@@ -97,6 +95,7 @@ from .types import (
97
95
  ExtractionSchema,
98
96
  ExtractionSchemaDataSchemaValue,
99
97
  File,
98
+ FilePermissionInfoValue,
100
99
  FileResourceInfoValue,
101
100
  FilterCondition,
102
101
  FilterOperator,
@@ -105,7 +104,6 @@ from .types import (
105
104
  HttpValidationError,
106
105
  HuggingFaceInferenceApiEmbedding,
107
106
  HuggingFaceInferenceApiEmbeddingConfig,
108
- HuggingFaceInferenceApiEmbeddingToken,
109
107
  IngestionErrorResponse,
110
108
  InputMessage,
111
109
  IntervalUsageAndPlan,
@@ -125,7 +123,6 @@ from .types import (
125
123
  MessageAnnotation,
126
124
  MessageRole,
127
125
  MetadataFilter,
128
- MetadataFilterValue,
129
126
  MetadataFilters,
130
127
  MetadataFiltersFiltersItem,
131
128
  MetricResult,
@@ -148,6 +145,7 @@ from .types import (
148
145
  ParsingJob,
149
146
  ParsingJobJsonResult,
150
147
  ParsingJobMarkdownResult,
148
+ ParsingJobStructuredResult,
151
149
  ParsingJobTextResult,
152
150
  ParsingUsage,
153
151
  PartitionNames,
@@ -165,7 +163,6 @@ from .types import (
165
163
  PipelineCreateEmbeddingConfig_VertexaiEmbedding,
166
164
  PipelineCreateTransformConfig,
167
165
  PipelineDataSource,
168
- PipelineDataSourceComponent,
169
166
  PipelineDataSourceCreate,
170
167
  PipelineDataSourceCustomMetadataValue,
171
168
  PipelineDeployment,
@@ -182,6 +179,7 @@ from .types import (
182
179
  PipelineFileCreate,
183
180
  PipelineFileCreateCustomMetadataValue,
184
181
  PipelineFileCustomMetadataValue,
182
+ PipelineFilePermissionInfoValue,
185
183
  PipelineFileResourceInfoValue,
186
184
  PipelineTransformConfig,
187
185
  PipelineTransformConfig_Advanced,
@@ -210,6 +208,7 @@ from .types import (
210
208
  TextNode,
211
209
  TextNodeRelationshipsValue,
212
210
  TextNodeWithScore,
211
+ Token,
213
212
  TokenChunkingConfig,
214
213
  TokenTextSplitter,
215
214
  TransformationCategoryNames,
@@ -220,6 +219,7 @@ from .types import (
220
219
  UserOrganizationRole,
221
220
  ValidationError,
222
221
  ValidationErrorLocItem,
222
+ Value,
223
223
  VertexAiEmbeddingConfig,
224
224
  VertexEmbeddingMode,
225
225
  VertexTextEmbedding,
@@ -240,6 +240,7 @@ from .resources import (
240
240
  ExtractionSchemaCreateDataSchemaValue,
241
241
  ExtractionSchemaUpdateDataSchemaValue,
242
242
  FileCreateFromUrlResourceInfoValue,
243
+ FileCreatePermissionInfoValue,
243
244
  FileCreateResourceInfoValue,
244
245
  PipelineFileUpdateCustomMetadataValue,
245
246
  PipelineUpdateEmbeddingConfig,
@@ -317,13 +318,11 @@ __all__ = [
317
318
  "ConfiguredTransformationItem",
318
319
  "ConfiguredTransformationItemComponent",
319
320
  "DataSink",
320
- "DataSinkComponent",
321
321
  "DataSinkCreate",
322
322
  "DataSinkCreateComponent",
323
323
  "DataSinkDefinition",
324
324
  "DataSinkUpdateComponent",
325
325
  "DataSource",
326
- "DataSourceComponent",
327
326
  "DataSourceCreate",
328
327
  "DataSourceCreateComponent",
329
328
  "DataSourceCreateCustomMetadataValue",
@@ -376,7 +375,9 @@ __all__ = [
376
375
  "ExtractionSchemaUpdateDataSchemaValue",
377
376
  "File",
378
377
  "FileCreateFromUrlResourceInfoValue",
378
+ "FileCreatePermissionInfoValue",
379
379
  "FileCreateResourceInfoValue",
380
+ "FilePermissionInfoValue",
380
381
  "FileResourceInfoValue",
381
382
  "FilterCondition",
382
383
  "FilterOperator",
@@ -385,7 +386,6 @@ __all__ = [
385
386
  "HttpValidationError",
386
387
  "HuggingFaceInferenceApiEmbedding",
387
388
  "HuggingFaceInferenceApiEmbeddingConfig",
388
- "HuggingFaceInferenceApiEmbeddingToken",
389
389
  "IngestionErrorResponse",
390
390
  "InputMessage",
391
391
  "IntervalUsageAndPlan",
@@ -406,7 +406,6 @@ __all__ = [
406
406
  "MessageAnnotation",
407
407
  "MessageRole",
408
408
  "MetadataFilter",
409
- "MetadataFilterValue",
410
409
  "MetadataFilters",
411
410
  "MetadataFiltersFiltersItem",
412
411
  "MetricResult",
@@ -429,6 +428,7 @@ __all__ = [
429
428
  "ParsingJob",
430
429
  "ParsingJobJsonResult",
431
430
  "ParsingJobMarkdownResult",
431
+ "ParsingJobStructuredResult",
432
432
  "ParsingJobTextResult",
433
433
  "ParsingUsage",
434
434
  "PartitionNames",
@@ -446,7 +446,6 @@ __all__ = [
446
446
  "PipelineCreateEmbeddingConfig_VertexaiEmbedding",
447
447
  "PipelineCreateTransformConfig",
448
448
  "PipelineDataSource",
449
- "PipelineDataSourceComponent",
450
449
  "PipelineDataSourceCreate",
451
450
  "PipelineDataSourceCustomMetadataValue",
452
451
  "PipelineDeployment",
@@ -463,6 +462,7 @@ __all__ = [
463
462
  "PipelineFileCreate",
464
463
  "PipelineFileCreateCustomMetadataValue",
465
464
  "PipelineFileCustomMetadataValue",
465
+ "PipelineFilePermissionInfoValue",
466
466
  "PipelineFileResourceInfoValue",
467
467
  "PipelineFileUpdateCustomMetadataValue",
468
468
  "PipelineTransformConfig",
@@ -501,6 +501,7 @@ __all__ = [
501
501
  "TextNode",
502
502
  "TextNodeRelationshipsValue",
503
503
  "TextNodeWithScore",
504
+ "Token",
504
505
  "TokenChunkingConfig",
505
506
  "TokenTextSplitter",
506
507
  "TransformationCategoryNames",
@@ -512,6 +513,7 @@ __all__ = [
512
513
  "UserOrganizationRole",
513
514
  "ValidationError",
514
515
  "ValidationErrorLocItem",
516
+ "Value",
515
517
  "VertexAiEmbeddingConfig",
516
518
  "VertexEmbeddingMode",
517
519
  "VertexTextEmbedding",
@@ -4,4 +4,4 @@ import enum
4
4
 
5
5
 
6
6
  class LlamaCloudEnvironment(enum.Enum):
7
- DEFAULT = "https://api.cloud.llamaindex.ai/"
7
+ DEFAULT = "https://api.cloud.llamaindex.ai"
@@ -26,7 +26,7 @@ from .embedding_model_configs import (
26
26
  EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
27
27
  )
28
28
  from .extraction import ExtractionSchemaCreateDataSchemaValue, ExtractionSchemaUpdateDataSchemaValue
29
- from .files import FileCreateFromUrlResourceInfoValue, FileCreateResourceInfoValue
29
+ from .files import FileCreateFromUrlResourceInfoValue, FileCreatePermissionInfoValue, FileCreateResourceInfoValue
30
30
  from .pipelines import (
31
31
  PipelineFileUpdateCustomMetadataValue,
32
32
  PipelineUpdateEmbeddingConfig,
@@ -55,6 +55,7 @@ __all__ = [
55
55
  "ExtractionSchemaCreateDataSchemaValue",
56
56
  "ExtractionSchemaUpdateDataSchemaValue",
57
57
  "FileCreateFromUrlResourceInfoValue",
58
+ "FileCreatePermissionInfoValue",
58
59
  "FileCreateResourceInfoValue",
59
60
  "PipelineFileUpdateCustomMetadataValue",
60
61
  "PipelineUpdateEmbeddingConfig",
@@ -91,7 +91,7 @@ class DataSinksClient:
91
91
  )
92
92
  client.data_sinks.create_data_sink(
93
93
  request=DataSinkCreate(
94
- name="string",
94
+ name="name",
95
95
  sink_type=ConfigurableDataSinkNames.PINECONE,
96
96
  ),
97
97
  )
@@ -140,7 +140,7 @@ class DataSinksClient:
140
140
  )
141
141
  client.data_sinks.upsert_data_sink(
142
142
  request=DataSinkCreate(
143
- name="string",
143
+ name="name",
144
144
  sink_type=ConfigurableDataSinkNames.PINECONE,
145
145
  ),
146
146
  )
@@ -176,7 +176,7 @@ class DataSinksClient:
176
176
  token="YOUR_TOKEN",
177
177
  )
178
178
  client.data_sinks.get_data_sink(
179
- data_sink_id="string",
179
+ data_sink_id="data_sink_id",
180
180
  )
181
181
  """
182
182
  _response = self._client_wrapper.httpx_client.request(
@@ -209,11 +209,11 @@ class DataSinksClient:
209
209
  Parameters:
210
210
  - data_sink_id: str.
211
211
 
212
- - name: typing.Optional[str].
212
+ - name: typing.Optional[str]. The name of the data sink.
213
213
 
214
214
  - sink_type: ConfigurableDataSinkNames.
215
215
 
216
- - component: typing.Optional[DataSinkUpdateComponent].
216
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
217
217
  ---
218
218
  from llama_cloud import ConfigurableDataSinkNames
219
219
  from llama_cloud.client import LlamaCloud
@@ -222,7 +222,7 @@ class DataSinksClient:
222
222
  token="YOUR_TOKEN",
223
223
  )
224
224
  client.data_sinks.update_data_sink(
225
- data_sink_id="string",
225
+ data_sink_id="data_sink_id",
226
226
  sink_type=ConfigurableDataSinkNames.PINECONE,
227
227
  )
228
228
  """
@@ -261,7 +261,7 @@ class DataSinksClient:
261
261
  token="YOUR_TOKEN",
262
262
  )
263
263
  client.data_sinks.delete_data_sink(
264
- data_sink_id="string",
264
+ data_sink_id="data_sink_id",
265
265
  )
266
266
  """
267
267
  _response = self._client_wrapper.httpx_client.request(
@@ -345,7 +345,7 @@ class AsyncDataSinksClient:
345
345
  )
346
346
  await client.data_sinks.create_data_sink(
347
347
  request=DataSinkCreate(
348
- name="string",
348
+ name="name",
349
349
  sink_type=ConfigurableDataSinkNames.PINECONE,
350
350
  ),
351
351
  )
@@ -394,7 +394,7 @@ class AsyncDataSinksClient:
394
394
  )
395
395
  await client.data_sinks.upsert_data_sink(
396
396
  request=DataSinkCreate(
397
- name="string",
397
+ name="name",
398
398
  sink_type=ConfigurableDataSinkNames.PINECONE,
399
399
  ),
400
400
  )
@@ -430,7 +430,7 @@ class AsyncDataSinksClient:
430
430
  token="YOUR_TOKEN",
431
431
  )
432
432
  await client.data_sinks.get_data_sink(
433
- data_sink_id="string",
433
+ data_sink_id="data_sink_id",
434
434
  )
435
435
  """
436
436
  _response = await self._client_wrapper.httpx_client.request(
@@ -463,11 +463,11 @@ class AsyncDataSinksClient:
463
463
  Parameters:
464
464
  - data_sink_id: str.
465
465
 
466
- - name: typing.Optional[str].
466
+ - name: typing.Optional[str]. The name of the data sink.
467
467
 
468
468
  - sink_type: ConfigurableDataSinkNames.
469
469
 
470
- - component: typing.Optional[DataSinkUpdateComponent].
470
+ - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
471
471
  ---
472
472
  from llama_cloud import ConfigurableDataSinkNames
473
473
  from llama_cloud.client import AsyncLlamaCloud
@@ -476,7 +476,7 @@ class AsyncDataSinksClient:
476
476
  token="YOUR_TOKEN",
477
477
  )
478
478
  await client.data_sinks.update_data_sink(
479
- data_sink_id="string",
479
+ data_sink_id="data_sink_id",
480
480
  sink_type=ConfigurableDataSinkNames.PINECONE,
481
481
  )
482
482
  """
@@ -515,7 +515,7 @@ class AsyncDataSinksClient:
515
515
  token="YOUR_TOKEN",
516
516
  )
517
517
  await client.data_sinks.delete_data_sink(
518
- data_sink_id="string",
518
+ data_sink_id="data_sink_id",
519
519
  )
520
520
  """
521
521
  _response = await self._client_wrapper.httpx_client.request(
@@ -93,7 +93,7 @@ class DataSourcesClient:
93
93
  )
94
94
  client.data_sources.create_data_source(
95
95
  request=DataSourceCreate(
96
- name="string",
96
+ name="name",
97
97
  source_type=ConfigurableDataSourceNames.S_3,
98
98
  ),
99
99
  )
@@ -142,7 +142,7 @@ class DataSourcesClient:
142
142
  )
143
143
  client.data_sources.upsert_data_source(
144
144
  request=DataSourceCreate(
145
- name="string",
145
+ name="name",
146
146
  source_type=ConfigurableDataSourceNames.S_3,
147
147
  ),
148
148
  )
@@ -178,7 +178,7 @@ class DataSourcesClient:
178
178
  token="YOUR_TOKEN",
179
179
  )
180
180
  client.data_sources.get_data_source(
181
- data_source_id="string",
181
+ data_source_id="data_source_id",
182
182
  )
183
183
  """
184
184
  _response = self._client_wrapper.httpx_client.request(
@@ -212,13 +212,13 @@ class DataSourcesClient:
212
212
  Parameters:
213
213
  - data_source_id: str.
214
214
 
215
- - name: typing.Optional[str].
215
+ - name: typing.Optional[str]. The name of the data source.
216
216
 
217
217
  - source_type: ConfigurableDataSourceNames.
218
218
 
219
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
219
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]]. Custom metadata that will be present on all data loaded from the data source
220
220
 
221
- - component: typing.Optional[DataSourceUpdateComponent].
221
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
222
222
  ---
223
223
  from llama_cloud import ConfigurableDataSourceNames
224
224
  from llama_cloud.client import LlamaCloud
@@ -227,7 +227,7 @@ class DataSourcesClient:
227
227
  token="YOUR_TOKEN",
228
228
  )
229
229
  client.data_sources.update_data_source(
230
- data_source_id="string",
230
+ data_source_id="data_source_id",
231
231
  source_type=ConfigurableDataSourceNames.S_3,
232
232
  )
233
233
  """
@@ -268,7 +268,7 @@ class DataSourcesClient:
268
268
  token="YOUR_TOKEN",
269
269
  )
270
270
  client.data_sources.delete_data_source(
271
- data_source_id="string",
271
+ data_source_id="data_source_id",
272
272
  )
273
273
  """
274
274
  _response = self._client_wrapper.httpx_client.request(
@@ -353,7 +353,7 @@ class AsyncDataSourcesClient:
353
353
  )
354
354
  await client.data_sources.create_data_source(
355
355
  request=DataSourceCreate(
356
- name="string",
356
+ name="name",
357
357
  source_type=ConfigurableDataSourceNames.S_3,
358
358
  ),
359
359
  )
@@ -402,7 +402,7 @@ class AsyncDataSourcesClient:
402
402
  )
403
403
  await client.data_sources.upsert_data_source(
404
404
  request=DataSourceCreate(
405
- name="string",
405
+ name="name",
406
406
  source_type=ConfigurableDataSourceNames.S_3,
407
407
  ),
408
408
  )
@@ -438,7 +438,7 @@ class AsyncDataSourcesClient:
438
438
  token="YOUR_TOKEN",
439
439
  )
440
440
  await client.data_sources.get_data_source(
441
- data_source_id="string",
441
+ data_source_id="data_source_id",
442
442
  )
443
443
  """
444
444
  _response = await self._client_wrapper.httpx_client.request(
@@ -472,13 +472,13 @@ class AsyncDataSourcesClient:
472
472
  Parameters:
473
473
  - data_source_id: str.
474
474
 
475
- - name: typing.Optional[str].
475
+ - name: typing.Optional[str]. The name of the data source.
476
476
 
477
477
  - source_type: ConfigurableDataSourceNames.
478
478
 
479
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]].
479
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceUpdateCustomMetadataValue]]]. Custom metadata that will be present on all data loaded from the data source
480
480
 
481
- - component: typing.Optional[DataSourceUpdateComponent].
481
+ - component: typing.Optional[DataSourceUpdateComponent]. Component that implements the data source
482
482
  ---
483
483
  from llama_cloud import ConfigurableDataSourceNames
484
484
  from llama_cloud.client import AsyncLlamaCloud
@@ -487,7 +487,7 @@ class AsyncDataSourcesClient:
487
487
  token="YOUR_TOKEN",
488
488
  )
489
489
  await client.data_sources.update_data_source(
490
- data_source_id="string",
490
+ data_source_id="data_source_id",
491
491
  source_type=ConfigurableDataSourceNames.S_3,
492
492
  )
493
493
  """
@@ -528,7 +528,7 @@ class AsyncDataSourcesClient:
528
528
  token="YOUR_TOKEN",
529
529
  )
530
530
  await client.data_sources.delete_data_source(
531
- data_source_id="string",
531
+ data_source_id="data_source_id",
532
532
  )
533
533
  """
534
534
  _response = await self._client_wrapper.httpx_client.request(
@@ -34,15 +34,6 @@ class EmbeddingModelConfigsClient:
34
34
  """
35
35
  Parameters:
36
36
  - project_id: str.
37
- ---
38
- from llama_cloud.client import LlamaCloud
39
-
40
- client = LlamaCloud(
41
- token="YOUR_TOKEN",
42
- )
43
- client.embedding_model_configs.list_embedding_model_configs(
44
- project_id="string",
45
- )
46
37
  """
47
38
  _response = self._client_wrapper.httpx_client.request(
48
39
  "GET",
@@ -63,7 +54,7 @@ class EmbeddingModelConfigsClient:
63
54
 
64
55
  def create_embedding_model_config(
65
56
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
66
- ) -> typing.Any:
57
+ ) -> EmbeddingModelConfig:
67
58
  """
68
59
  Create a new embedding model configuration within a specified project.
69
60
 
@@ -73,6 +64,22 @@ class EmbeddingModelConfigsClient:
73
64
  - name: str. The name of the embedding model config.
74
65
 
75
66
  - embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
67
+ ---
68
+ from llama_cloud import (
69
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
70
+ )
71
+ from llama_cloud.client import LlamaCloud
72
+
73
+ client = LlamaCloud(
74
+ token="YOUR_TOKEN",
75
+ )
76
+ client.embedding_model_configs.create_embedding_model_config(
77
+ project_id="project_id",
78
+ name="name",
79
+ embedding_config=EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding(
80
+ type="VERTEXAI_EMBEDDING",
81
+ ),
82
+ )
76
83
  """
77
84
  _response = self._client_wrapper.httpx_client.request(
78
85
  "POST",
@@ -83,7 +90,7 @@ class EmbeddingModelConfigsClient:
83
90
  timeout=60,
84
91
  )
85
92
  if 200 <= _response.status_code < 300:
86
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
93
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
87
94
  if _response.status_code == 422:
88
95
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
89
96
  try:
@@ -109,6 +116,16 @@ class EmbeddingModelConfigsClient:
109
116
  - organization_id: typing.Optional[str].
110
117
 
111
118
  - request: EmbeddingModelConfigUpdate.
119
+ ---
120
+ from llama_cloud import EmbeddingModelConfigUpdate
121
+ from llama_cloud.client import LlamaCloud
122
+
123
+ client = LlamaCloud(
124
+ token="YOUR_TOKEN",
125
+ )
126
+ client.embedding_model_configs.upsert_embedding_model_config(
127
+ request=EmbeddingModelConfigUpdate(),
128
+ )
112
129
  """
113
130
  _response = self._client_wrapper.httpx_client.request(
114
131
  "PUT",
@@ -138,6 +155,17 @@ class EmbeddingModelConfigsClient:
138
155
  - embedding_model_config_id: str.
139
156
 
140
157
  - request: EmbeddingModelConfigUpdate.
158
+ ---
159
+ from llama_cloud import EmbeddingModelConfigUpdate
160
+ from llama_cloud.client import LlamaCloud
161
+
162
+ client = LlamaCloud(
163
+ token="YOUR_TOKEN",
164
+ )
165
+ client.embedding_model_configs.update_embedding_model_config(
166
+ embedding_model_config_id="embedding_model_config_id",
167
+ request=EmbeddingModelConfigUpdate(),
168
+ )
141
169
  """
142
170
  _response = self._client_wrapper.httpx_client.request(
143
171
  "PUT",
@@ -171,7 +199,7 @@ class EmbeddingModelConfigsClient:
171
199
  token="YOUR_TOKEN",
172
200
  )
173
201
  client.embedding_model_configs.delete_embedding_model_config(
174
- embedding_model_config_id="string",
202
+ embedding_model_config_id="embedding_model_config_id",
175
203
  )
176
204
  """
177
205
  _response = self._client_wrapper.httpx_client.request(
@@ -201,15 +229,6 @@ class AsyncEmbeddingModelConfigsClient:
201
229
  """
202
230
  Parameters:
203
231
  - project_id: str.
204
- ---
205
- from llama_cloud.client import AsyncLlamaCloud
206
-
207
- client = AsyncLlamaCloud(
208
- token="YOUR_TOKEN",
209
- )
210
- await client.embedding_model_configs.list_embedding_model_configs(
211
- project_id="string",
212
- )
213
232
  """
214
233
  _response = await self._client_wrapper.httpx_client.request(
215
234
  "GET",
@@ -230,7 +249,7 @@ class AsyncEmbeddingModelConfigsClient:
230
249
 
231
250
  async def create_embedding_model_config(
232
251
  self, *, project_id: str, name: str, embedding_config: EmbeddingModelConfigCreateEmbeddingConfig
233
- ) -> typing.Any:
252
+ ) -> EmbeddingModelConfig:
234
253
  """
235
254
  Create a new embedding model configuration within a specified project.
236
255
 
@@ -240,6 +259,22 @@ class AsyncEmbeddingModelConfigsClient:
240
259
  - name: str. The name of the embedding model config.
241
260
 
242
261
  - embedding_config: EmbeddingModelConfigCreateEmbeddingConfig. The embedding configuration for the embedding model config.
262
+ ---
263
+ from llama_cloud import (
264
+ EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding,
265
+ )
266
+ from llama_cloud.client import AsyncLlamaCloud
267
+
268
+ client = AsyncLlamaCloud(
269
+ token="YOUR_TOKEN",
270
+ )
271
+ await client.embedding_model_configs.create_embedding_model_config(
272
+ project_id="project_id",
273
+ name="name",
274
+ embedding_config=EmbeddingModelConfigCreateEmbeddingConfig_VertexaiEmbedding(
275
+ type="VERTEXAI_EMBEDDING",
276
+ ),
277
+ )
243
278
  """
244
279
  _response = await self._client_wrapper.httpx_client.request(
245
280
  "POST",
@@ -250,7 +285,7 @@ class AsyncEmbeddingModelConfigsClient:
250
285
  timeout=60,
251
286
  )
252
287
  if 200 <= _response.status_code < 300:
253
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
288
+ return pydantic.parse_obj_as(EmbeddingModelConfig, _response.json()) # type: ignore
254
289
  if _response.status_code == 422:
255
290
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
256
291
  try:
@@ -276,6 +311,16 @@ class AsyncEmbeddingModelConfigsClient:
276
311
  - organization_id: typing.Optional[str].
277
312
 
278
313
  - request: EmbeddingModelConfigUpdate.
314
+ ---
315
+ from llama_cloud import EmbeddingModelConfigUpdate
316
+ from llama_cloud.client import AsyncLlamaCloud
317
+
318
+ client = AsyncLlamaCloud(
319
+ token="YOUR_TOKEN",
320
+ )
321
+ await client.embedding_model_configs.upsert_embedding_model_config(
322
+ request=EmbeddingModelConfigUpdate(),
323
+ )
279
324
  """
280
325
  _response = await self._client_wrapper.httpx_client.request(
281
326
  "PUT",
@@ -305,6 +350,17 @@ class AsyncEmbeddingModelConfigsClient:
305
350
  - embedding_model_config_id: str.
306
351
 
307
352
  - request: EmbeddingModelConfigUpdate.
353
+ ---
354
+ from llama_cloud import EmbeddingModelConfigUpdate
355
+ from llama_cloud.client import AsyncLlamaCloud
356
+
357
+ client = AsyncLlamaCloud(
358
+ token="YOUR_TOKEN",
359
+ )
360
+ await client.embedding_model_configs.update_embedding_model_config(
361
+ embedding_model_config_id="embedding_model_config_id",
362
+ request=EmbeddingModelConfigUpdate(),
363
+ )
308
364
  """
309
365
  _response = await self._client_wrapper.httpx_client.request(
310
366
  "PUT",
@@ -338,7 +394,7 @@ class AsyncEmbeddingModelConfigsClient:
338
394
  token="YOUR_TOKEN",
339
395
  )
340
396
  await client.embedding_model_configs.delete_embedding_model_config(
341
- embedding_model_config_id="string",
397
+ embedding_model_config_id="embedding_model_config_id",
342
398
  )
343
399
  """
344
400
  _response = await self._client_wrapper.httpx_client.request(