llama-cloud 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (92) hide show
  1. llama_cloud/__init__.py +202 -42
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +61 -2
  4. llama_cloud/resources/beta/__init__.py +2 -0
  5. llama_cloud/resources/beta/client.py +371 -0
  6. llama_cloud/resources/data_sinks/__init__.py +18 -2
  7. llama_cloud/resources/data_sinks/client.py +2 -94
  8. llama_cloud/resources/data_sinks/types/__init__.py +18 -2
  9. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +65 -7
  10. llama_cloud/resources/data_sources/__init__.py +30 -2
  11. llama_cloud/resources/data_sources/types/__init__.py +28 -1
  12. llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -23
  13. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +122 -0
  14. llama_cloud/resources/embedding_model_configs/client.py +82 -22
  15. llama_cloud/resources/files/client.py +18 -4
  16. llama_cloud/resources/llama_extract/__init__.py +21 -0
  17. llama_cloud/resources/llama_extract/client.py +227 -114
  18. llama_cloud/resources/llama_extract/types/__init__.py +21 -0
  19. llama_cloud/resources/parsing/client.py +123 -4
  20. llama_cloud/resources/pipelines/client.py +116 -11
  21. llama_cloud/types/__init__.py +172 -52
  22. llama_cloud/types/{extract_schema_validate_request.py → audio_block.py} +5 -3
  23. llama_cloud/types/batch.py +47 -0
  24. llama_cloud/types/batch_item.py +40 -0
  25. llama_cloud/types/batch_paginated_list.py +35 -0
  26. llama_cloud/types/{base_prompt_template.py → batch_public_output.py} +7 -7
  27. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  28. llama_cloud/types/cloud_jira_data_source.py +0 -4
  29. llama_cloud/types/cloud_postgres_vector_store.py +2 -0
  30. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  31. llama_cloud/types/data_sink_component.py +65 -7
  32. llama_cloud/types/data_sink_create_component.py +65 -7
  33. llama_cloud/types/data_source_component.py +2 -23
  34. llama_cloud/types/data_source_component_one.py +122 -0
  35. llama_cloud/types/data_source_create_component.py +2 -23
  36. llama_cloud/types/data_source_create_component_one.py +122 -0
  37. llama_cloud/types/{extract_agent_update.py → data_source_update_dispatcher_config.py} +6 -6
  38. llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
  39. llama_cloud/types/{extract_agent_create.py → document_ingestion_job_params.py} +11 -7
  40. llama_cloud/types/extract_config.py +2 -0
  41. llama_cloud/types/extract_job_create.py +1 -2
  42. llama_cloud/types/fail_page_mode.py +29 -0
  43. llama_cloud/types/file_count_by_status_response.py +37 -0
  44. llama_cloud/types/file_parse_public.py +36 -0
  45. llama_cloud/types/job_names.py +8 -12
  46. llama_cloud/types/job_record.py +2 -2
  47. llama_cloud/types/job_record_parameters.py +111 -0
  48. llama_cloud/types/l_lama_parse_transform_config.py +37 -0
  49. llama_cloud/types/legacy_parse_job_config.py +189 -0
  50. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +13 -1
  51. llama_cloud/types/llama_parse_parameters.py +8 -0
  52. llama_cloud/types/load_files_job_config.py +35 -0
  53. llama_cloud/types/message_role.py +4 -0
  54. llama_cloud/types/parse_job_config.py +134 -0
  55. llama_cloud/types/pg_vector_distance_method.py +43 -0
  56. llama_cloud/types/{extract_job_create_batch.py → pg_vector_hnsw_settings.py} +12 -9
  57. llama_cloud/types/pg_vector_vector_type.py +35 -0
  58. llama_cloud/types/pipeline.py +2 -4
  59. llama_cloud/types/pipeline_create.py +3 -2
  60. llama_cloud/types/pipeline_data_source.py +3 -0
  61. llama_cloud/types/pipeline_data_source_component.py +2 -23
  62. llama_cloud/types/pipeline_data_source_component_one.py +122 -0
  63. llama_cloud/types/pipeline_data_source_status.py +33 -0
  64. llama_cloud/types/pipeline_file.py +1 -0
  65. llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
  66. llama_cloud/types/{markdown_node_parser.py → pipeline_file_updater_config.py} +14 -15
  67. llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
  68. llama_cloud/types/pipeline_metadata_config.py +36 -0
  69. llama_cloud/types/prompt_conf.py +3 -0
  70. llama_cloud/types/struct_parse_conf.py +4 -1
  71. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/METADATA +4 -2
  72. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/RECORD +82 -68
  73. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/WHEEL +1 -1
  74. llama_cloud/types/character_splitter.py +0 -46
  75. llama_cloud/types/code_splitter.py +0 -50
  76. llama_cloud/types/configured_transformation_item.py +0 -46
  77. llama_cloud/types/configured_transformation_item_component.py +0 -22
  78. llama_cloud/types/llm.py +0 -60
  79. llama_cloud/types/markdown_element_node_parser.py +0 -51
  80. llama_cloud/types/page_splitter_node_parser.py +0 -42
  81. llama_cloud/types/pydantic_program_mode.py +0 -41
  82. llama_cloud/types/sentence_splitter.py +0 -50
  83. llama_cloud/types/token_text_splitter.py +0 -47
  84. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema.py +0 -0
  85. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py +0 -0
  86. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema.py +0 -0
  87. /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py +0 -0
  88. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override.py +0 -0
  89. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override_zero_value.py +0 -0
  90. /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py +0 -0
  91. /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
  92. {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/LICENSE +0 -0
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,371 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.batch import Batch
13
+ from ...types.batch_paginated_list import BatchPaginatedList
14
+ from ...types.batch_public_output import BatchPublicOutput
15
+ from ...types.http_validation_error import HttpValidationError
16
+ from ...types.llama_parse_parameters import LlamaParseParameters
17
+
18
+ try:
19
+ import pydantic
20
+ if pydantic.__version__.startswith("1."):
21
+ raise ImportError
22
+ import pydantic.v1 as pydantic # type: ignore
23
+ except ImportError:
24
+ import pydantic # type: ignore
25
+
26
+ # this is used as the default value for optional parameters
27
+ OMIT = typing.cast(typing.Any, ...)
28
+
29
+
30
+ class BetaClient:
31
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
32
+ self._client_wrapper = client_wrapper
33
+
34
+ def list_batches(
35
+ self,
36
+ *,
37
+ limit: typing.Optional[int] = None,
38
+ offset: typing.Optional[int] = None,
39
+ project_id: typing.Optional[str] = None,
40
+ organization_id: typing.Optional[str] = None,
41
+ ) -> BatchPaginatedList:
42
+ """
43
+ Parameters:
44
+ - limit: typing.Optional[int].
45
+
46
+ - offset: typing.Optional[int].
47
+
48
+ - project_id: typing.Optional[str].
49
+
50
+ - organization_id: typing.Optional[str].
51
+ ---
52
+ from llama_cloud.client import LlamaCloud
53
+
54
+ client = LlamaCloud(
55
+ token="YOUR_TOKEN",
56
+ )
57
+ client.beta.list_batches()
58
+ """
59
+ _response = self._client_wrapper.httpx_client.request(
60
+ "GET",
61
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
62
+ params=remove_none_from_dict(
63
+ {"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
64
+ ),
65
+ headers=self._client_wrapper.get_headers(),
66
+ timeout=60,
67
+ )
68
+ if 200 <= _response.status_code < 300:
69
+ return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
70
+ if _response.status_code == 422:
71
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
72
+ try:
73
+ _response_json = _response.json()
74
+ except JSONDecodeError:
75
+ raise ApiError(status_code=_response.status_code, body=_response.text)
76
+ raise ApiError(status_code=_response.status_code, body=_response_json)
77
+
78
+ def create_batch(
79
+ self,
80
+ *,
81
+ organization_id: typing.Optional[str] = None,
82
+ project_id: typing.Optional[str] = None,
83
+ tool: str,
84
+ tool_data: typing.Optional[LlamaParseParameters] = OMIT,
85
+ input_type: str,
86
+ input_id: str,
87
+ output_type: typing.Optional[str] = OMIT,
88
+ output_id: typing.Optional[str] = OMIT,
89
+ batch_create_project_id: str,
90
+ external_id: str,
91
+ completion_window: typing.Optional[int] = OMIT,
92
+ ) -> Batch:
93
+ """
94
+ Parameters:
95
+ - organization_id: typing.Optional[str].
96
+
97
+ - project_id: typing.Optional[str].
98
+
99
+ - tool: str. The tool to be used for all requests in the batch.
100
+
101
+ - tool_data: typing.Optional[LlamaParseParameters].
102
+
103
+ - input_type: str. The type of input file. Currently only 'datasource' is supported.
104
+
105
+ - input_id: str. The ID of the input file for the batch.
106
+
107
+ - output_type: typing.Optional[str].
108
+
109
+ - output_id: typing.Optional[str].
110
+
111
+ - batch_create_project_id: str. The ID of the project to which the batch belongs
112
+
113
+ - external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
114
+
115
+ - completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
116
+ ---
117
+ from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
118
+ from llama_cloud.client import LlamaCloud
119
+
120
+ client = LlamaCloud(
121
+ token="YOUR_TOKEN",
122
+ )
123
+ client.beta.create_batch(
124
+ tool="string",
125
+ tool_data=LlamaParseParameters(
126
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
127
+ replace_failed_page_mode=FailPageMode.RAW_TEXT,
128
+ ),
129
+ input_type="string",
130
+ input_id="string",
131
+ batch_create_project_id="string",
132
+ external_id="string",
133
+ )
134
+ """
135
+ _request: typing.Dict[str, typing.Any] = {
136
+ "tool": tool,
137
+ "input_type": input_type,
138
+ "input_id": input_id,
139
+ "project_id": batch_create_project_id,
140
+ "external_id": external_id,
141
+ }
142
+ if tool_data is not OMIT:
143
+ _request["tool_data"] = tool_data
144
+ if output_type is not OMIT:
145
+ _request["output_type"] = output_type
146
+ if output_id is not OMIT:
147
+ _request["output_id"] = output_id
148
+ if completion_window is not OMIT:
149
+ _request["completion_window"] = completion_window
150
+ _response = self._client_wrapper.httpx_client.request(
151
+ "POST",
152
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
153
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
154
+ json=jsonable_encoder(_request),
155
+ headers=self._client_wrapper.get_headers(),
156
+ timeout=60,
157
+ )
158
+ if 200 <= _response.status_code < 300:
159
+ return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
160
+ if _response.status_code == 422:
161
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
162
+ try:
163
+ _response_json = _response.json()
164
+ except JSONDecodeError:
165
+ raise ApiError(status_code=_response.status_code, body=_response.text)
166
+ raise ApiError(status_code=_response.status_code, body=_response_json)
167
+
168
+ def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
169
+ """
170
+ Parameters:
171
+ - batch_id: str.
172
+
173
+ - organization_id: typing.Optional[str].
174
+ ---
175
+ from llama_cloud.client import LlamaCloud
176
+
177
+ client = LlamaCloud(
178
+ token="YOUR_TOKEN",
179
+ )
180
+ client.beta.get_batch(
181
+ batch_id="string",
182
+ )
183
+ """
184
+ _response = self._client_wrapper.httpx_client.request(
185
+ "GET",
186
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
187
+ params=remove_none_from_dict({"organization_id": organization_id}),
188
+ headers=self._client_wrapper.get_headers(),
189
+ timeout=60,
190
+ )
191
+ if 200 <= _response.status_code < 300:
192
+ return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
193
+ if _response.status_code == 422:
194
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
195
+ try:
196
+ _response_json = _response.json()
197
+ except JSONDecodeError:
198
+ raise ApiError(status_code=_response.status_code, body=_response.text)
199
+ raise ApiError(status_code=_response.status_code, body=_response_json)
200
+
201
+
202
+ class AsyncBetaClient:
203
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
204
+ self._client_wrapper = client_wrapper
205
+
206
+ async def list_batches(
207
+ self,
208
+ *,
209
+ limit: typing.Optional[int] = None,
210
+ offset: typing.Optional[int] = None,
211
+ project_id: typing.Optional[str] = None,
212
+ organization_id: typing.Optional[str] = None,
213
+ ) -> BatchPaginatedList:
214
+ """
215
+ Parameters:
216
+ - limit: typing.Optional[int].
217
+
218
+ - offset: typing.Optional[int].
219
+
220
+ - project_id: typing.Optional[str].
221
+
222
+ - organization_id: typing.Optional[str].
223
+ ---
224
+ from llama_cloud.client import AsyncLlamaCloud
225
+
226
+ client = AsyncLlamaCloud(
227
+ token="YOUR_TOKEN",
228
+ )
229
+ await client.beta.list_batches()
230
+ """
231
+ _response = await self._client_wrapper.httpx_client.request(
232
+ "GET",
233
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
234
+ params=remove_none_from_dict(
235
+ {"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
236
+ ),
237
+ headers=self._client_wrapper.get_headers(),
238
+ timeout=60,
239
+ )
240
+ if 200 <= _response.status_code < 300:
241
+ return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
242
+ if _response.status_code == 422:
243
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
244
+ try:
245
+ _response_json = _response.json()
246
+ except JSONDecodeError:
247
+ raise ApiError(status_code=_response.status_code, body=_response.text)
248
+ raise ApiError(status_code=_response.status_code, body=_response_json)
249
+
250
+ async def create_batch(
251
+ self,
252
+ *,
253
+ organization_id: typing.Optional[str] = None,
254
+ project_id: typing.Optional[str] = None,
255
+ tool: str,
256
+ tool_data: typing.Optional[LlamaParseParameters] = OMIT,
257
+ input_type: str,
258
+ input_id: str,
259
+ output_type: typing.Optional[str] = OMIT,
260
+ output_id: typing.Optional[str] = OMIT,
261
+ batch_create_project_id: str,
262
+ external_id: str,
263
+ completion_window: typing.Optional[int] = OMIT,
264
+ ) -> Batch:
265
+ """
266
+ Parameters:
267
+ - organization_id: typing.Optional[str].
268
+
269
+ - project_id: typing.Optional[str].
270
+
271
+ - tool: str. The tool to be used for all requests in the batch.
272
+
273
+ - tool_data: typing.Optional[LlamaParseParameters].
274
+
275
+ - input_type: str. The type of input file. Currently only 'datasource' is supported.
276
+
277
+ - input_id: str. The ID of the input file for the batch.
278
+
279
+ - output_type: typing.Optional[str].
280
+
281
+ - output_id: typing.Optional[str].
282
+
283
+ - batch_create_project_id: str. The ID of the project to which the batch belongs
284
+
285
+ - external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
286
+
287
+ - completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
288
+ ---
289
+ from llama_cloud import FailPageMode, LlamaParseParameters, ParsingMode
290
+ from llama_cloud.client import AsyncLlamaCloud
291
+
292
+ client = AsyncLlamaCloud(
293
+ token="YOUR_TOKEN",
294
+ )
295
+ await client.beta.create_batch(
296
+ tool="string",
297
+ tool_data=LlamaParseParameters(
298
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
299
+ replace_failed_page_mode=FailPageMode.RAW_TEXT,
300
+ ),
301
+ input_type="string",
302
+ input_id="string",
303
+ batch_create_project_id="string",
304
+ external_id="string",
305
+ )
306
+ """
307
+ _request: typing.Dict[str, typing.Any] = {
308
+ "tool": tool,
309
+ "input_type": input_type,
310
+ "input_id": input_id,
311
+ "project_id": batch_create_project_id,
312
+ "external_id": external_id,
313
+ }
314
+ if tool_data is not OMIT:
315
+ _request["tool_data"] = tool_data
316
+ if output_type is not OMIT:
317
+ _request["output_type"] = output_type
318
+ if output_id is not OMIT:
319
+ _request["output_id"] = output_id
320
+ if completion_window is not OMIT:
321
+ _request["completion_window"] = completion_window
322
+ _response = await self._client_wrapper.httpx_client.request(
323
+ "POST",
324
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
325
+ params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
326
+ json=jsonable_encoder(_request),
327
+ headers=self._client_wrapper.get_headers(),
328
+ timeout=60,
329
+ )
330
+ if 200 <= _response.status_code < 300:
331
+ return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
332
+ if _response.status_code == 422:
333
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
334
+ try:
335
+ _response_json = _response.json()
336
+ except JSONDecodeError:
337
+ raise ApiError(status_code=_response.status_code, body=_response.text)
338
+ raise ApiError(status_code=_response.status_code, body=_response_json)
339
+
340
+ async def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
341
+ """
342
+ Parameters:
343
+ - batch_id: str.
344
+
345
+ - organization_id: typing.Optional[str].
346
+ ---
347
+ from llama_cloud.client import AsyncLlamaCloud
348
+
349
+ client = AsyncLlamaCloud(
350
+ token="YOUR_TOKEN",
351
+ )
352
+ await client.beta.get_batch(
353
+ batch_id="string",
354
+ )
355
+ """
356
+ _response = await self._client_wrapper.httpx_client.request(
357
+ "GET",
358
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
359
+ params=remove_none_from_dict({"organization_id": organization_id}),
360
+ headers=self._client_wrapper.get_headers(),
361
+ timeout=60,
362
+ )
363
+ if 200 <= _response.status_code < 300:
364
+ return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
365
+ if _response.status_code == 422:
366
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
367
+ try:
368
+ _response_json = _response.json()
369
+ except JSONDecodeError:
370
+ raise ApiError(status_code=_response.status_code, body=_response.text)
371
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,5 +1,21 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import DataSinkUpdateComponent
3
+ from .types import (
4
+ DataSinkUpdateComponent,
5
+ DataSinkUpdateComponent_AzureAiSearch,
6
+ DataSinkUpdateComponent_Milvus,
7
+ DataSinkUpdateComponent_MongodbAtlas,
8
+ DataSinkUpdateComponent_Pinecone,
9
+ DataSinkUpdateComponent_Postgres,
10
+ DataSinkUpdateComponent_Qdrant,
11
+ )
4
12
 
5
- __all__ = ["DataSinkUpdateComponent"]
13
+ __all__ = [
14
+ "DataSinkUpdateComponent",
15
+ "DataSinkUpdateComponent_AzureAiSearch",
16
+ "DataSinkUpdateComponent_Milvus",
17
+ "DataSinkUpdateComponent_MongodbAtlas",
18
+ "DataSinkUpdateComponent_Pinecone",
19
+ "DataSinkUpdateComponent_Postgres",
20
+ "DataSinkUpdateComponent_Qdrant",
21
+ ]
@@ -82,19 +82,6 @@ class DataSinksClient:
82
82
  - organization_id: typing.Optional[str].
83
83
 
84
84
  - request: DataSinkCreate.
85
- ---
86
- from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
87
- from llama_cloud.client import LlamaCloud
88
-
89
- client = LlamaCloud(
90
- token="YOUR_TOKEN",
91
- )
92
- client.data_sinks.create_data_sink(
93
- request=DataSinkCreate(
94
- name="string",
95
- sink_type=ConfigurableDataSinkNames.PINECONE,
96
- ),
97
- )
98
85
  """
99
86
  _response = self._client_wrapper.httpx_client.request(
100
87
  "POST",
@@ -131,19 +118,6 @@ class DataSinksClient:
131
118
  - organization_id: typing.Optional[str].
132
119
 
133
120
  - request: DataSinkCreate.
134
- ---
135
- from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
136
- from llama_cloud.client import LlamaCloud
137
-
138
- client = LlamaCloud(
139
- token="YOUR_TOKEN",
140
- )
141
- client.data_sinks.upsert_data_sink(
142
- request=DataSinkCreate(
143
- name="string",
144
- sink_type=ConfigurableDataSinkNames.PINECONE,
145
- ),
146
- )
147
121
  """
148
122
  _response = self._client_wrapper.httpx_client.request(
149
123
  "PUT",
@@ -169,15 +143,6 @@ class DataSinksClient:
169
143
 
170
144
  Parameters:
171
145
  - data_sink_id: str.
172
- ---
173
- from llama_cloud.client import LlamaCloud
174
-
175
- client = LlamaCloud(
176
- token="YOUR_TOKEN",
177
- )
178
- client.data_sinks.get_data_sink(
179
- data_sink_id="string",
180
- )
181
146
  """
182
147
  _response = self._client_wrapper.httpx_client.request(
183
148
  "GET",
@@ -213,18 +178,7 @@ class DataSinksClient:
213
178
 
214
179
  - sink_type: ConfigurableDataSinkNames.
215
180
 
216
- - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
217
- ---
218
- from llama_cloud import ConfigurableDataSinkNames
219
- from llama_cloud.client import LlamaCloud
220
-
221
- client = LlamaCloud(
222
- token="YOUR_TOKEN",
223
- )
224
- client.data_sinks.update_data_sink(
225
- data_sink_id="string",
226
- sink_type=ConfigurableDataSinkNames.PINECONE,
227
- )
181
+ - component: typing.Optional[DataSinkUpdateComponent].
228
182
  """
229
183
  _request: typing.Dict[str, typing.Any] = {"sink_type": sink_type}
230
184
  if name is not OMIT:
@@ -336,19 +290,6 @@ class AsyncDataSinksClient:
336
290
  - organization_id: typing.Optional[str].
337
291
 
338
292
  - request: DataSinkCreate.
339
- ---
340
- from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
341
- from llama_cloud.client import AsyncLlamaCloud
342
-
343
- client = AsyncLlamaCloud(
344
- token="YOUR_TOKEN",
345
- )
346
- await client.data_sinks.create_data_sink(
347
- request=DataSinkCreate(
348
- name="string",
349
- sink_type=ConfigurableDataSinkNames.PINECONE,
350
- ),
351
- )
352
293
  """
353
294
  _response = await self._client_wrapper.httpx_client.request(
354
295
  "POST",
@@ -385,19 +326,6 @@ class AsyncDataSinksClient:
385
326
  - organization_id: typing.Optional[str].
386
327
 
387
328
  - request: DataSinkCreate.
388
- ---
389
- from llama_cloud import ConfigurableDataSinkNames, DataSinkCreate
390
- from llama_cloud.client import AsyncLlamaCloud
391
-
392
- client = AsyncLlamaCloud(
393
- token="YOUR_TOKEN",
394
- )
395
- await client.data_sinks.upsert_data_sink(
396
- request=DataSinkCreate(
397
- name="string",
398
- sink_type=ConfigurableDataSinkNames.PINECONE,
399
- ),
400
- )
401
329
  """
402
330
  _response = await self._client_wrapper.httpx_client.request(
403
331
  "PUT",
@@ -423,15 +351,6 @@ class AsyncDataSinksClient:
423
351
 
424
352
  Parameters:
425
353
  - data_sink_id: str.
426
- ---
427
- from llama_cloud.client import AsyncLlamaCloud
428
-
429
- client = AsyncLlamaCloud(
430
- token="YOUR_TOKEN",
431
- )
432
- await client.data_sinks.get_data_sink(
433
- data_sink_id="string",
434
- )
435
354
  """
436
355
  _response = await self._client_wrapper.httpx_client.request(
437
356
  "GET",
@@ -467,18 +386,7 @@ class AsyncDataSinksClient:
467
386
 
468
387
  - sink_type: ConfigurableDataSinkNames.
469
388
 
470
- - component: typing.Optional[DataSinkUpdateComponent]. Component that implements the data sink
471
- ---
472
- from llama_cloud import ConfigurableDataSinkNames
473
- from llama_cloud.client import AsyncLlamaCloud
474
-
475
- client = AsyncLlamaCloud(
476
- token="YOUR_TOKEN",
477
- )
478
- await client.data_sinks.update_data_sink(
479
- data_sink_id="string",
480
- sink_type=ConfigurableDataSinkNames.PINECONE,
481
- )
389
+ - component: typing.Optional[DataSinkUpdateComponent].
482
390
  """
483
391
  _request: typing.Dict[str, typing.Any] = {"sink_type": sink_type}
484
392
  if name is not OMIT:
@@ -1,5 +1,21 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .data_sink_update_component import DataSinkUpdateComponent
3
+ from .data_sink_update_component import (
4
+ DataSinkUpdateComponent,
5
+ DataSinkUpdateComponent_AzureAiSearch,
6
+ DataSinkUpdateComponent_Milvus,
7
+ DataSinkUpdateComponent_MongodbAtlas,
8
+ DataSinkUpdateComponent_Pinecone,
9
+ DataSinkUpdateComponent_Postgres,
10
+ DataSinkUpdateComponent_Qdrant,
11
+ )
4
12
 
5
- __all__ = ["DataSinkUpdateComponent"]
13
+ __all__ = [
14
+ "DataSinkUpdateComponent",
15
+ "DataSinkUpdateComponent_AzureAiSearch",
16
+ "DataSinkUpdateComponent_Milvus",
17
+ "DataSinkUpdateComponent_MongodbAtlas",
18
+ "DataSinkUpdateComponent_Pinecone",
19
+ "DataSinkUpdateComponent_Postgres",
20
+ "DataSinkUpdateComponent_Qdrant",
21
+ ]
@@ -1,7 +1,11 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import typing
4
6
 
7
+ import typing_extensions
8
+
5
9
  from ....types.cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
6
10
  from ....types.cloud_milvus_vector_store import CloudMilvusVectorStore
7
11
  from ....types.cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
@@ -9,12 +13,66 @@ from ....types.cloud_pinecone_vector_store import CloudPineconeVectorStore
9
13
  from ....types.cloud_postgres_vector_store import CloudPostgresVectorStore
10
14
  from ....types.cloud_qdrant_vector_store import CloudQdrantVectorStore
11
15
 
16
+
17
+ class DataSinkUpdateComponent_AzureAiSearch(CloudAzureAiSearchVectorStore):
18
+ type: typing_extensions.Literal["azure_ai_search"]
19
+
20
+ class Config:
21
+ frozen = True
22
+ smart_union = True
23
+ allow_population_by_field_name = True
24
+
25
+
26
+ class DataSinkUpdateComponent_Milvus(CloudMilvusVectorStore):
27
+ type: typing_extensions.Literal["milvus"]
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ allow_population_by_field_name = True
33
+
34
+
35
+ class DataSinkUpdateComponent_MongodbAtlas(CloudMongoDbAtlasVectorSearch):
36
+ type: typing_extensions.Literal["mongodb_atlas"]
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ allow_population_by_field_name = True
42
+
43
+
44
+ class DataSinkUpdateComponent_Pinecone(CloudPineconeVectorStore):
45
+ type: typing_extensions.Literal["pinecone"]
46
+
47
+ class Config:
48
+ frozen = True
49
+ smart_union = True
50
+ allow_population_by_field_name = True
51
+
52
+
53
+ class DataSinkUpdateComponent_Postgres(CloudPostgresVectorStore):
54
+ type: typing_extensions.Literal["postgres"]
55
+
56
+ class Config:
57
+ frozen = True
58
+ smart_union = True
59
+ allow_population_by_field_name = True
60
+
61
+
62
+ class DataSinkUpdateComponent_Qdrant(CloudQdrantVectorStore):
63
+ type: typing_extensions.Literal["qdrant"]
64
+
65
+ class Config:
66
+ frozen = True
67
+ smart_union = True
68
+ allow_population_by_field_name = True
69
+
70
+
12
71
  DataSinkUpdateComponent = typing.Union[
13
- typing.Dict[str, typing.Any],
14
- CloudPineconeVectorStore,
15
- CloudPostgresVectorStore,
16
- CloudQdrantVectorStore,
17
- CloudAzureAiSearchVectorStore,
18
- CloudMongoDbAtlasVectorSearch,
19
- CloudMilvusVectorStore,
72
+ DataSinkUpdateComponent_AzureAiSearch,
73
+ DataSinkUpdateComponent_Milvus,
74
+ DataSinkUpdateComponent_MongodbAtlas,
75
+ DataSinkUpdateComponent_Pinecone,
76
+ DataSinkUpdateComponent_Postgres,
77
+ DataSinkUpdateComponent_Qdrant,
20
78
  ]
@@ -1,5 +1,33 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import DataSourceUpdateComponent, DataSourceUpdateCustomMetadataValue
3
+ from .types import (
4
+ DataSourceUpdateComponent,
5
+ DataSourceUpdateComponentOne,
6
+ DataSourceUpdateComponentOne_AzureStorageBlob,
7
+ DataSourceUpdateComponentOne_Box,
8
+ DataSourceUpdateComponentOne_Confluence,
9
+ DataSourceUpdateComponentOne_GoogleDrive,
10
+ DataSourceUpdateComponentOne_Jira,
11
+ DataSourceUpdateComponentOne_MicrosoftOnedrive,
12
+ DataSourceUpdateComponentOne_MicrosoftSharepoint,
13
+ DataSourceUpdateComponentOne_NotionPage,
14
+ DataSourceUpdateComponentOne_S3,
15
+ DataSourceUpdateComponentOne_Slack,
16
+ DataSourceUpdateCustomMetadataValue,
17
+ )
4
18
 
5
- __all__ = ["DataSourceUpdateComponent", "DataSourceUpdateCustomMetadataValue"]
19
+ __all__ = [
20
+ "DataSourceUpdateComponent",
21
+ "DataSourceUpdateComponentOne",
22
+ "DataSourceUpdateComponentOne_AzureStorageBlob",
23
+ "DataSourceUpdateComponentOne_Box",
24
+ "DataSourceUpdateComponentOne_Confluence",
25
+ "DataSourceUpdateComponentOne_GoogleDrive",
26
+ "DataSourceUpdateComponentOne_Jira",
27
+ "DataSourceUpdateComponentOne_MicrosoftOnedrive",
28
+ "DataSourceUpdateComponentOne_MicrosoftSharepoint",
29
+ "DataSourceUpdateComponentOne_NotionPage",
30
+ "DataSourceUpdateComponentOne_S3",
31
+ "DataSourceUpdateComponentOne_Slack",
32
+ "DataSourceUpdateCustomMetadataValue",
33
+ ]