llama-cloud 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -63,12 +63,14 @@ from .types import (
63
63
  HuggingFaceInferenceApiEmbedding,
64
64
  HuggingFaceInferenceApiEmbeddingToken,
65
65
  JsonNodeParser,
66
+ LlamaParseParameters,
66
67
  LlamaParseSupportedFileExtensions,
67
68
  Llm,
68
69
  LocalEval,
69
70
  LocalEvalResults,
70
71
  LocalEvalSets,
71
72
  ManagedIngestionStatus,
73
+ ManagedIngestionStatusResponse,
72
74
  MarkdownElementNodeParser,
73
75
  MarkdownNodeParser,
74
76
  MessageRole,
@@ -100,7 +102,6 @@ from .types import (
100
102
  PipelineFileCreateCustomMetadataValue,
101
103
  PipelineFileCustomMetadataValue,
102
104
  PipelineFileResourceInfoValue,
103
- PipelineFileStatusResponse,
104
105
  PipelineType,
105
106
  Pooling,
106
107
  PresetRetrievalParams,
@@ -215,12 +216,14 @@ __all__ = [
215
216
  "HuggingFaceInferenceApiEmbeddingToken",
216
217
  "JsonNodeParser",
217
218
  "LlamaCloudEnvironment",
219
+ "LlamaParseParameters",
218
220
  "LlamaParseSupportedFileExtensions",
219
221
  "Llm",
220
222
  "LocalEval",
221
223
  "LocalEvalResults",
222
224
  "LocalEvalSets",
223
225
  "ManagedIngestionStatus",
226
+ "ManagedIngestionStatusResponse",
224
227
  "MarkdownElementNodeParser",
225
228
  "MarkdownNodeParser",
226
229
  "MessageRole",
@@ -252,7 +255,6 @@ __all__ = [
252
255
  "PipelineFileCreateCustomMetadataValue",
253
256
  "PipelineFileCustomMetadataValue",
254
257
  "PipelineFileResourceInfoValue",
255
- "PipelineFileStatusResponse",
256
258
  "PipelineFileUpdateCustomMetadataValue",
257
259
  "PipelineType",
258
260
  "Pooling",
@@ -23,9 +23,9 @@ class ComponentDefinitionsClient:
23
23
  def __init__(self, *, client_wrapper: SyncClientWrapper):
24
24
  self._client_wrapper = client_wrapper
25
25
 
26
- def get_all_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
26
+ def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
27
27
  """
28
- Get all configurable transformation component definitions.
28
+ List transformation component definitions.
29
29
 
30
30
  ---
31
31
  from llama_cloud.client import LlamaCloud
@@ -33,7 +33,7 @@ class ComponentDefinitionsClient:
33
33
  client = LlamaCloud(
34
34
  token="YOUR_TOKEN",
35
35
  )
36
- client.component_definitions.get_all_transformation_definitions()
36
+ client.component_definitions.list_transformation_definitions()
37
37
  """
38
38
  _response = self._client_wrapper.httpx_client.request(
39
39
  "GET",
@@ -51,9 +51,9 @@ class ComponentDefinitionsClient:
51
51
  raise ApiError(status_code=_response.status_code, body=_response.text)
52
52
  raise ApiError(status_code=_response.status_code, body=_response_json)
53
53
 
54
- def get_all_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
54
+ def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
55
55
  """
56
- Get all data source component definitions.
56
+ List data source component definitions.
57
57
 
58
58
  ---
59
59
  from llama_cloud.client import LlamaCloud
@@ -61,7 +61,7 @@ class ComponentDefinitionsClient:
61
61
  client = LlamaCloud(
62
62
  token="YOUR_TOKEN",
63
63
  )
64
- client.component_definitions.get_all_data_source_definitions()
64
+ client.component_definitions.list_data_source_definitions()
65
65
  """
66
66
  _response = self._client_wrapper.httpx_client.request(
67
67
  "GET",
@@ -77,9 +77,9 @@ class ComponentDefinitionsClient:
77
77
  raise ApiError(status_code=_response.status_code, body=_response.text)
78
78
  raise ApiError(status_code=_response.status_code, body=_response_json)
79
79
 
80
- def get_all_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
80
+ def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
81
81
  """
82
- Get all data sink component definitions.
82
+ List data sink component definitions.
83
83
 
84
84
  ---
85
85
  from llama_cloud.client import LlamaCloud
@@ -87,7 +87,7 @@ class ComponentDefinitionsClient:
87
87
  client = LlamaCloud(
88
88
  token="YOUR_TOKEN",
89
89
  )
90
- client.component_definitions.get_all_data_sink_definitions()
90
+ client.component_definitions.list_data_sink_definitions()
91
91
  """
92
92
  _response = self._client_wrapper.httpx_client.request(
93
93
  "GET",
@@ -108,9 +108,9 @@ class AsyncComponentDefinitionsClient:
108
108
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
109
109
  self._client_wrapper = client_wrapper
110
110
 
111
- async def get_all_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
111
+ async def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
112
112
  """
113
- Get all configurable transformation component definitions.
113
+ List transformation component definitions.
114
114
 
115
115
  ---
116
116
  from llama_cloud.client import AsyncLlamaCloud
@@ -118,7 +118,7 @@ class AsyncComponentDefinitionsClient:
118
118
  client = AsyncLlamaCloud(
119
119
  token="YOUR_TOKEN",
120
120
  )
121
- await client.component_definitions.get_all_transformation_definitions()
121
+ await client.component_definitions.list_transformation_definitions()
122
122
  """
123
123
  _response = await self._client_wrapper.httpx_client.request(
124
124
  "GET",
@@ -136,9 +136,9 @@ class AsyncComponentDefinitionsClient:
136
136
  raise ApiError(status_code=_response.status_code, body=_response.text)
137
137
  raise ApiError(status_code=_response.status_code, body=_response_json)
138
138
 
139
- async def get_all_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
139
+ async def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
140
140
  """
141
- Get all data source component definitions.
141
+ List data source component definitions.
142
142
 
143
143
  ---
144
144
  from llama_cloud.client import AsyncLlamaCloud
@@ -146,7 +146,7 @@ class AsyncComponentDefinitionsClient:
146
146
  client = AsyncLlamaCloud(
147
147
  token="YOUR_TOKEN",
148
148
  )
149
- await client.component_definitions.get_all_data_source_definitions()
149
+ await client.component_definitions.list_data_source_definitions()
150
150
  """
151
151
  _response = await self._client_wrapper.httpx_client.request(
152
152
  "GET",
@@ -162,9 +162,9 @@ class AsyncComponentDefinitionsClient:
162
162
  raise ApiError(status_code=_response.status_code, body=_response.text)
163
163
  raise ApiError(status_code=_response.status_code, body=_response_json)
164
164
 
165
- async def get_all_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
165
+ async def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
166
166
  """
167
- Get all data sink component definitions.
167
+ List data sink component definitions.
168
168
 
169
169
  ---
170
170
  from llama_cloud.client import AsyncLlamaCloud
@@ -172,7 +172,7 @@ class AsyncComponentDefinitionsClient:
172
172
  client = AsyncLlamaCloud(
173
173
  token="YOUR_TOKEN",
174
174
  )
175
- await client.component_definitions.get_all_data_sink_definitions()
175
+ await client.component_definitions.list_data_sink_definitions()
176
176
  """
177
177
  _response = await self._client_wrapper.httpx_client.request(
178
178
  "GET",
@@ -33,7 +33,7 @@ class DataSinksClient:
33
33
 
34
34
  def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
35
35
  """
36
- Get all data sinks for a given project.
36
+ List data sinks for a given project.
37
37
  If project_id is not provided, uses the default project.
38
38
 
39
39
  Parameters:
@@ -268,7 +268,7 @@ class AsyncDataSinksClient:
268
268
 
269
269
  async def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
270
270
  """
271
- Get all data sinks for a given project.
271
+ List data sinks for a given project.
272
272
  If project_id is not provided, uses the default project.
273
273
 
274
274
  Parameters:
@@ -34,7 +34,7 @@ class DataSourcesClient:
34
34
 
35
35
  def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
36
36
  """
37
- Get all data sources for a given project.
37
+ List data sources for a given project.
38
38
  If project_id is not provided, uses the default project.
39
39
 
40
40
  Parameters:
@@ -274,7 +274,7 @@ class AsyncDataSourcesClient:
274
274
 
275
275
  async def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
276
276
  """
277
- Get all data sources for a given project.
277
+ List data sources for a given project.
278
278
  If project_id is not provided, uses the default project.
279
279
 
280
280
  Parameters:
@@ -130,9 +130,9 @@ class EvalsClient:
130
130
  raise ApiError(status_code=_response.status_code, body=_response.text)
131
131
  raise ApiError(status_code=_response.status_code, body=_response_json)
132
132
 
133
- def get_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
133
+ def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
134
134
  """
135
- Get all questions for a dataset.
135
+ List questions for a dataset.
136
136
 
137
137
  Parameters:
138
138
  - dataset_id: str.
@@ -142,7 +142,7 @@ class EvalsClient:
142
142
  client = LlamaCloud(
143
143
  token="YOUR_TOKEN",
144
144
  )
145
- client.evals.get_questions(
145
+ client.evals.list_questions(
146
146
  dataset_id="string",
147
147
  )
148
148
  """
@@ -348,9 +348,9 @@ class EvalsClient:
348
348
  raise ApiError(status_code=_response.status_code, body=_response.text)
349
349
  raise ApiError(status_code=_response.status_code, body=_response_json)
350
350
 
351
- def get_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
351
+ def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
352
352
  """
353
- Get all supported models.
353
+ List supported models.
354
354
 
355
355
  ---
356
356
  from llama_cloud.client import LlamaCloud
@@ -358,7 +358,7 @@ class EvalsClient:
358
358
  client = LlamaCloud(
359
359
  token="YOUR_TOKEN",
360
360
  )
361
- client.evals.get_supported_models()
361
+ client.evals.list_supported_models()
362
362
  """
363
363
  _response = self._client_wrapper.httpx_client.request(
364
364
  "GET",
@@ -481,9 +481,9 @@ class AsyncEvalsClient:
481
481
  raise ApiError(status_code=_response.status_code, body=_response.text)
482
482
  raise ApiError(status_code=_response.status_code, body=_response_json)
483
483
 
484
- async def get_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
484
+ async def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
485
485
  """
486
- Get all questions for a dataset.
486
+ List questions for a dataset.
487
487
 
488
488
  Parameters:
489
489
  - dataset_id: str.
@@ -493,7 +493,7 @@ class AsyncEvalsClient:
493
493
  client = AsyncLlamaCloud(
494
494
  token="YOUR_TOKEN",
495
495
  )
496
- await client.evals.get_questions(
496
+ await client.evals.list_questions(
497
497
  dataset_id="string",
498
498
  )
499
499
  """
@@ -699,9 +699,9 @@ class AsyncEvalsClient:
699
699
  raise ApiError(status_code=_response.status_code, body=_response.text)
700
700
  raise ApiError(status_code=_response.status_code, body=_response_json)
701
701
 
702
- async def get_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
702
+ async def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
703
703
  """
704
- Get all supported models.
704
+ List supported models.
705
705
 
706
706
  ---
707
707
  from llama_cloud.client import AsyncLlamaCloud
@@ -709,7 +709,7 @@ class AsyncEvalsClient:
709
709
  client = AsyncLlamaCloud(
710
710
  token="YOUR_TOKEN",
711
711
  )
712
- await client.evals.get_supported_models()
712
+ await client.evals.list_supported_models()
713
713
  """
714
714
  _response = await self._client_wrapper.httpx_client.request(
715
715
  "GET",
@@ -31,7 +31,7 @@ class FilesClient:
31
31
  def __init__(self, *, client_wrapper: SyncClientWrapper):
32
32
  self._client_wrapper = client_wrapper
33
33
 
34
- def read_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
34
+ def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
35
35
  """
36
36
  Read File metadata objects.
37
37
 
@@ -45,7 +45,7 @@ class FilesClient:
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
47
  )
48
- client.files.read_file(
48
+ client.files.get_file(
49
49
  id="string",
50
50
  )
51
51
  """
@@ -101,7 +101,7 @@ class FilesClient:
101
101
  raise ApiError(status_code=_response.status_code, body=_response.text)
102
102
  raise ApiError(status_code=_response.status_code, body=_response_json)
103
103
 
104
- def read_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
104
+ def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
105
105
  """
106
106
  Read File metadata objects.
107
107
 
@@ -113,7 +113,7 @@ class FilesClient:
113
113
  client = LlamaCloud(
114
114
  token="YOUR_TOKEN",
115
115
  )
116
- client.files.read_files()
116
+ client.files.list_files()
117
117
  """
118
118
  _response = self._client_wrapper.httpx_client.request(
119
119
  "GET",
@@ -293,7 +293,7 @@ class AsyncFilesClient:
293
293
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
294
294
  self._client_wrapper = client_wrapper
295
295
 
296
- async def read_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
296
+ async def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
297
297
  """
298
298
  Read File metadata objects.
299
299
 
@@ -307,7 +307,7 @@ class AsyncFilesClient:
307
307
  client = AsyncLlamaCloud(
308
308
  token="YOUR_TOKEN",
309
309
  )
310
- await client.files.read_file(
310
+ await client.files.get_file(
311
311
  id="string",
312
312
  )
313
313
  """
@@ -363,7 +363,7 @@ class AsyncFilesClient:
363
363
  raise ApiError(status_code=_response.status_code, body=_response.text)
364
364
  raise ApiError(status_code=_response.status_code, body=_response_json)
365
365
 
366
- async def read_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
366
+ async def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
367
367
  """
368
368
  Read File metadata objects.
369
369
 
@@ -375,7 +375,7 @@ class AsyncFilesClient:
375
375
  client = AsyncLlamaCloud(
376
376
  token="YOUR_TOKEN",
377
377
  )
378
- await client.files.read_files()
378
+ await client.files.list_files()
379
379
  """
380
380
  _response = await self._client_wrapper.httpx_client.request(
381
381
  "GET",
@@ -111,6 +111,8 @@ class ParsingClient:
111
111
  gpt_4_o_api_key: str,
112
112
  do_not_unroll_columns: bool,
113
113
  page_separator: str,
114
+ bounding_box: str,
115
+ target_pages: str,
114
116
  file: typing.IO,
115
117
  ) -> ParsingJob:
116
118
  """
@@ -137,6 +139,10 @@ class ParsingClient:
137
139
 
138
140
  - page_separator: str.
139
141
 
142
+ - bounding_box: str.
143
+
144
+ - target_pages: str.
145
+
140
146
  - file: typing.IO.
141
147
  """
142
148
  _response = self._client_wrapper.httpx_client.request(
@@ -154,6 +160,8 @@ class ParsingClient:
154
160
  "gpt4o_api_key": gpt_4_o_api_key,
155
161
  "do_not_unroll_columns": do_not_unroll_columns,
156
162
  "page_separator": page_separator,
163
+ "bounding_box": bounding_box,
164
+ "target_pages": target_pages,
157
165
  }
158
166
  ),
159
167
  files={"file": file},
@@ -576,6 +584,8 @@ class AsyncParsingClient:
576
584
  gpt_4_o_api_key: str,
577
585
  do_not_unroll_columns: bool,
578
586
  page_separator: str,
587
+ bounding_box: str,
588
+ target_pages: str,
579
589
  file: typing.IO,
580
590
  ) -> ParsingJob:
581
591
  """
@@ -602,6 +612,10 @@ class AsyncParsingClient:
602
612
 
603
613
  - page_separator: str.
604
614
 
615
+ - bounding_box: str.
616
+
617
+ - target_pages: str.
618
+
605
619
  - file: typing.IO.
606
620
  """
607
621
  _response = await self._client_wrapper.httpx_client.request(
@@ -619,6 +633,8 @@ class AsyncParsingClient:
619
633
  "gpt4o_api_key": gpt_4_o_api_key,
620
634
  "do_not_unroll_columns": do_not_unroll_columns,
621
635
  "page_separator": page_separator,
636
+ "bounding_box": bounding_box,
637
+ "target_pages": target_pages,
622
638
  }
623
639
  ),
624
640
  files={"file": file},