llama-cloud 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -23,9 +23,9 @@ class ComponentDefinitionsClient:
23
23
  def __init__(self, *, client_wrapper: SyncClientWrapper):
24
24
  self._client_wrapper = client_wrapper
25
25
 
26
- def get_all_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
26
+ def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
27
27
  """
28
- Get all configurable transformation component definitions.
28
+ List transformation component definitions.
29
29
 
30
30
  ---
31
31
  from llama_cloud.client import LlamaCloud
@@ -33,7 +33,7 @@ class ComponentDefinitionsClient:
33
33
  client = LlamaCloud(
34
34
  token="YOUR_TOKEN",
35
35
  )
36
- client.component_definitions.get_all_transformation_definitions()
36
+ client.component_definitions.list_transformation_definitions()
37
37
  """
38
38
  _response = self._client_wrapper.httpx_client.request(
39
39
  "GET",
@@ -51,9 +51,9 @@ class ComponentDefinitionsClient:
51
51
  raise ApiError(status_code=_response.status_code, body=_response.text)
52
52
  raise ApiError(status_code=_response.status_code, body=_response_json)
53
53
 
54
- def get_all_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
54
+ def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
55
55
  """
56
- Get all data source component definitions.
56
+ List data source component definitions.
57
57
 
58
58
  ---
59
59
  from llama_cloud.client import LlamaCloud
@@ -61,7 +61,7 @@ class ComponentDefinitionsClient:
61
61
  client = LlamaCloud(
62
62
  token="YOUR_TOKEN",
63
63
  )
64
- client.component_definitions.get_all_data_source_definitions()
64
+ client.component_definitions.list_data_source_definitions()
65
65
  """
66
66
  _response = self._client_wrapper.httpx_client.request(
67
67
  "GET",
@@ -77,9 +77,9 @@ class ComponentDefinitionsClient:
77
77
  raise ApiError(status_code=_response.status_code, body=_response.text)
78
78
  raise ApiError(status_code=_response.status_code, body=_response_json)
79
79
 
80
- def get_all_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
80
+ def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
81
81
  """
82
- Get all data sink component definitions.
82
+ List data sink component definitions.
83
83
 
84
84
  ---
85
85
  from llama_cloud.client import LlamaCloud
@@ -87,7 +87,7 @@ class ComponentDefinitionsClient:
87
87
  client = LlamaCloud(
88
88
  token="YOUR_TOKEN",
89
89
  )
90
- client.component_definitions.get_all_data_sink_definitions()
90
+ client.component_definitions.list_data_sink_definitions()
91
91
  """
92
92
  _response = self._client_wrapper.httpx_client.request(
93
93
  "GET",
@@ -108,9 +108,9 @@ class AsyncComponentDefinitionsClient:
108
108
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
109
109
  self._client_wrapper = client_wrapper
110
110
 
111
- async def get_all_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
111
+ async def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
112
112
  """
113
- Get all configurable transformation component definitions.
113
+ List transformation component definitions.
114
114
 
115
115
  ---
116
116
  from llama_cloud.client import AsyncLlamaCloud
@@ -118,7 +118,7 @@ class AsyncComponentDefinitionsClient:
118
118
  client = AsyncLlamaCloud(
119
119
  token="YOUR_TOKEN",
120
120
  )
121
- await client.component_definitions.get_all_transformation_definitions()
121
+ await client.component_definitions.list_transformation_definitions()
122
122
  """
123
123
  _response = await self._client_wrapper.httpx_client.request(
124
124
  "GET",
@@ -136,9 +136,9 @@ class AsyncComponentDefinitionsClient:
136
136
  raise ApiError(status_code=_response.status_code, body=_response.text)
137
137
  raise ApiError(status_code=_response.status_code, body=_response_json)
138
138
 
139
- async def get_all_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
139
+ async def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
140
140
  """
141
- Get all data source component definitions.
141
+ List data source component definitions.
142
142
 
143
143
  ---
144
144
  from llama_cloud.client import AsyncLlamaCloud
@@ -146,7 +146,7 @@ class AsyncComponentDefinitionsClient:
146
146
  client = AsyncLlamaCloud(
147
147
  token="YOUR_TOKEN",
148
148
  )
149
- await client.component_definitions.get_all_data_source_definitions()
149
+ await client.component_definitions.list_data_source_definitions()
150
150
  """
151
151
  _response = await self._client_wrapper.httpx_client.request(
152
152
  "GET",
@@ -162,9 +162,9 @@ class AsyncComponentDefinitionsClient:
162
162
  raise ApiError(status_code=_response.status_code, body=_response.text)
163
163
  raise ApiError(status_code=_response.status_code, body=_response_json)
164
164
 
165
- async def get_all_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
165
+ async def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
166
166
  """
167
- Get all data sink component definitions.
167
+ List data sink component definitions.
168
168
 
169
169
  ---
170
170
  from llama_cloud.client import AsyncLlamaCloud
@@ -172,7 +172,7 @@ class AsyncComponentDefinitionsClient:
172
172
  client = AsyncLlamaCloud(
173
173
  token="YOUR_TOKEN",
174
174
  )
175
- await client.component_definitions.get_all_data_sink_definitions()
175
+ await client.component_definitions.list_data_sink_definitions()
176
176
  """
177
177
  _response = await self._client_wrapper.httpx_client.request(
178
178
  "GET",
@@ -33,7 +33,7 @@ class DataSinksClient:
33
33
 
34
34
  def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
35
35
  """
36
- Get all data sinks for a given project.
36
+ List data sinks for a given project.
37
37
  If project_id is not provided, uses the default project.
38
38
 
39
39
  Parameters:
@@ -268,7 +268,7 @@ class AsyncDataSinksClient:
268
268
 
269
269
  async def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
270
270
  """
271
- Get all data sinks for a given project.
271
+ List data sinks for a given project.
272
272
  If project_id is not provided, uses the default project.
273
273
 
274
274
  Parameters:
@@ -34,7 +34,7 @@ class DataSourcesClient:
34
34
 
35
35
  def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
36
36
  """
37
- Get all data sources for a given project.
37
+ List data sources for a given project.
38
38
  If project_id is not provided, uses the default project.
39
39
 
40
40
  Parameters:
@@ -274,7 +274,7 @@ class AsyncDataSourcesClient:
274
274
 
275
275
  async def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
276
276
  """
277
- Get all data sources for a given project.
277
+ List data sources for a given project.
278
278
  If project_id is not provided, uses the default project.
279
279
 
280
280
  Parameters:
@@ -130,9 +130,9 @@ class EvalsClient:
130
130
  raise ApiError(status_code=_response.status_code, body=_response.text)
131
131
  raise ApiError(status_code=_response.status_code, body=_response_json)
132
132
 
133
- def get_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
133
+ def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
134
134
  """
135
- Get all questions for a dataset.
135
+ List questions for a dataset.
136
136
 
137
137
  Parameters:
138
138
  - dataset_id: str.
@@ -142,7 +142,7 @@ class EvalsClient:
142
142
  client = LlamaCloud(
143
143
  token="YOUR_TOKEN",
144
144
  )
145
- client.evals.get_questions(
145
+ client.evals.list_questions(
146
146
  dataset_id="string",
147
147
  )
148
148
  """
@@ -348,9 +348,9 @@ class EvalsClient:
348
348
  raise ApiError(status_code=_response.status_code, body=_response.text)
349
349
  raise ApiError(status_code=_response.status_code, body=_response_json)
350
350
 
351
- def get_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
351
+ def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
352
352
  """
353
- Get all supported models.
353
+ List supported models.
354
354
 
355
355
  ---
356
356
  from llama_cloud.client import LlamaCloud
@@ -358,7 +358,7 @@ class EvalsClient:
358
358
  client = LlamaCloud(
359
359
  token="YOUR_TOKEN",
360
360
  )
361
- client.evals.get_supported_models()
361
+ client.evals.list_supported_models()
362
362
  """
363
363
  _response = self._client_wrapper.httpx_client.request(
364
364
  "GET",
@@ -481,9 +481,9 @@ class AsyncEvalsClient:
481
481
  raise ApiError(status_code=_response.status_code, body=_response.text)
482
482
  raise ApiError(status_code=_response.status_code, body=_response_json)
483
483
 
484
- async def get_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
484
+ async def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
485
485
  """
486
- Get all questions for a dataset.
486
+ List questions for a dataset.
487
487
 
488
488
  Parameters:
489
489
  - dataset_id: str.
@@ -493,7 +493,7 @@ class AsyncEvalsClient:
493
493
  client = AsyncLlamaCloud(
494
494
  token="YOUR_TOKEN",
495
495
  )
496
- await client.evals.get_questions(
496
+ await client.evals.list_questions(
497
497
  dataset_id="string",
498
498
  )
499
499
  """
@@ -699,9 +699,9 @@ class AsyncEvalsClient:
699
699
  raise ApiError(status_code=_response.status_code, body=_response.text)
700
700
  raise ApiError(status_code=_response.status_code, body=_response_json)
701
701
 
702
- async def get_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
702
+ async def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
703
703
  """
704
- Get all supported models.
704
+ List supported models.
705
705
 
706
706
  ---
707
707
  from llama_cloud.client import AsyncLlamaCloud
@@ -709,7 +709,7 @@ class AsyncEvalsClient:
709
709
  client = AsyncLlamaCloud(
710
710
  token="YOUR_TOKEN",
711
711
  )
712
- await client.evals.get_supported_models()
712
+ await client.evals.list_supported_models()
713
713
  """
714
714
  _response = await self._client_wrapper.httpx_client.request(
715
715
  "GET",
@@ -31,7 +31,7 @@ class FilesClient:
31
31
  def __init__(self, *, client_wrapper: SyncClientWrapper):
32
32
  self._client_wrapper = client_wrapper
33
33
 
34
- def read_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
34
+ def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
35
35
  """
36
36
  Read File metadata objects.
37
37
 
@@ -45,7 +45,7 @@ class FilesClient:
45
45
  client = LlamaCloud(
46
46
  token="YOUR_TOKEN",
47
47
  )
48
- client.files.read_file(
48
+ client.files.get_file(
49
49
  id="string",
50
50
  )
51
51
  """
@@ -101,7 +101,7 @@ class FilesClient:
101
101
  raise ApiError(status_code=_response.status_code, body=_response.text)
102
102
  raise ApiError(status_code=_response.status_code, body=_response_json)
103
103
 
104
- def read_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
104
+ def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
105
105
  """
106
106
  Read File metadata objects.
107
107
 
@@ -113,7 +113,7 @@ class FilesClient:
113
113
  client = LlamaCloud(
114
114
  token="YOUR_TOKEN",
115
115
  )
116
- client.files.read_files()
116
+ client.files.list_files()
117
117
  """
118
118
  _response = self._client_wrapper.httpx_client.request(
119
119
  "GET",
@@ -293,7 +293,7 @@ class AsyncFilesClient:
293
293
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
294
294
  self._client_wrapper = client_wrapper
295
295
 
296
- async def read_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
296
+ async def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
297
297
  """
298
298
  Read File metadata objects.
299
299
 
@@ -307,7 +307,7 @@ class AsyncFilesClient:
307
307
  client = AsyncLlamaCloud(
308
308
  token="YOUR_TOKEN",
309
309
  )
310
- await client.files.read_file(
310
+ await client.files.get_file(
311
311
  id="string",
312
312
  )
313
313
  """
@@ -363,7 +363,7 @@ class AsyncFilesClient:
363
363
  raise ApiError(status_code=_response.status_code, body=_response.text)
364
364
  raise ApiError(status_code=_response.status_code, body=_response_json)
365
365
 
366
- async def read_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
366
+ async def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
367
367
  """
368
368
  Read File metadata objects.
369
369
 
@@ -375,7 +375,7 @@ class AsyncFilesClient:
375
375
  client = AsyncLlamaCloud(
376
376
  token="YOUR_TOKEN",
377
377
  )
378
- await client.files.read_files()
378
+ await client.files.list_files()
379
379
  """
380
380
  _response = await self._client_wrapper.httpx_client.request(
381
381
  "GET",
@@ -111,6 +111,8 @@ class ParsingClient:
111
111
  gpt_4_o_api_key: str,
112
112
  do_not_unroll_columns: bool,
113
113
  page_separator: str,
114
+ bounding_box: str,
115
+ target_pages: str,
114
116
  file: typing.IO,
115
117
  ) -> ParsingJob:
116
118
  """
@@ -137,6 +139,10 @@ class ParsingClient:
137
139
 
138
140
  - page_separator: str.
139
141
 
142
+ - bounding_box: str.
143
+
144
+ - target_pages: str.
145
+
140
146
  - file: typing.IO.
141
147
  """
142
148
  _response = self._client_wrapper.httpx_client.request(
@@ -154,6 +160,8 @@ class ParsingClient:
154
160
  "gpt4o_api_key": gpt_4_o_api_key,
155
161
  "do_not_unroll_columns": do_not_unroll_columns,
156
162
  "page_separator": page_separator,
163
+ "bounding_box": bounding_box,
164
+ "target_pages": target_pages,
157
165
  }
158
166
  ),
159
167
  files={"file": file},
@@ -576,6 +584,8 @@ class AsyncParsingClient:
576
584
  gpt_4_o_api_key: str,
577
585
  do_not_unroll_columns: bool,
578
586
  page_separator: str,
587
+ bounding_box: str,
588
+ target_pages: str,
579
589
  file: typing.IO,
580
590
  ) -> ParsingJob:
581
591
  """
@@ -602,6 +612,10 @@ class AsyncParsingClient:
602
612
 
603
613
  - page_separator: str.
604
614
 
615
+ - bounding_box: str.
616
+
617
+ - target_pages: str.
618
+
605
619
  - file: typing.IO.
606
620
  """
607
621
  _response = await self._client_wrapper.httpx_client.request(
@@ -619,6 +633,8 @@ class AsyncParsingClient:
619
633
  "gpt4o_api_key": gpt_4_o_api_key,
620
634
  "do_not_unroll_columns": do_not_unroll_columns,
621
635
  "page_separator": page_separator,
636
+ "bounding_box": bounding_box,
637
+ "target_pages": target_pages,
622
638
  }
623
639
  ),
624
640
  files={"file": file},
@@ -644,7 +644,7 @@ class PipelinesClient:
644
644
  raise ApiError(status_code=_response.status_code, body=_response.text)
645
645
  raise ApiError(status_code=_response.status_code, body=_response_json)
646
646
 
647
- def get_files_for_pipeline(self, pipeline_id: str) -> typing.List[PipelineFile]:
647
+ def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
648
648
  """
649
649
  Get files for a pipeline.
650
650
 
@@ -656,7 +656,7 @@ class PipelinesClient:
656
656
  client = LlamaCloud(
657
657
  token="YOUR_TOKEN",
658
658
  )
659
- client.pipelines.get_files_for_pipeline(
659
+ client.pipelines.list_pipeline_files(
660
660
  pipeline_id="string",
661
661
  )
662
662
  """
@@ -837,7 +837,7 @@ class PipelinesClient:
837
837
  raise ApiError(status_code=_response.status_code, body=_response.text)
838
838
  raise ApiError(status_code=_response.status_code, body=_response_json)
839
839
 
840
- def get_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
840
+ def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
841
841
  """
842
842
  Get data sources for a pipeline.
843
843
 
@@ -849,7 +849,7 @@ class PipelinesClient:
849
849
  client = LlamaCloud(
850
850
  token="YOUR_TOKEN",
851
851
  )
852
- client.pipelines.get_pipeline_data_sources(
852
+ client.pipelines.list_pipeline_data_sources(
853
853
  pipeline_id="string",
854
854
  )
855
855
  """
@@ -1064,7 +1064,7 @@ class PipelinesClient:
1064
1064
  raise ApiError(status_code=_response.status_code, body=_response.text)
1065
1065
  raise ApiError(status_code=_response.status_code, body=_response_json)
1066
1066
 
1067
- def get_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
1067
+ def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
1068
1068
  """
1069
1069
  Get jobs for a pipeline.
1070
1070
 
@@ -1076,7 +1076,7 @@ class PipelinesClient:
1076
1076
  client = LlamaCloud(
1077
1077
  token="YOUR_TOKEN",
1078
1078
  )
1079
- client.pipelines.get_pipeline_jobs(
1079
+ client.pipelines.list_pipeline_jobs(
1080
1080
  pipeline_id="string",
1081
1081
  )
1082
1082
  """
@@ -1968,7 +1968,7 @@ class AsyncPipelinesClient:
1968
1968
  raise ApiError(status_code=_response.status_code, body=_response.text)
1969
1969
  raise ApiError(status_code=_response.status_code, body=_response_json)
1970
1970
 
1971
- async def get_files_for_pipeline(self, pipeline_id: str) -> typing.List[PipelineFile]:
1971
+ async def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
1972
1972
  """
1973
1973
  Get files for a pipeline.
1974
1974
 
@@ -1980,7 +1980,7 @@ class AsyncPipelinesClient:
1980
1980
  client = AsyncLlamaCloud(
1981
1981
  token="YOUR_TOKEN",
1982
1982
  )
1983
- await client.pipelines.get_files_for_pipeline(
1983
+ await client.pipelines.list_pipeline_files(
1984
1984
  pipeline_id="string",
1985
1985
  )
1986
1986
  """
@@ -2161,7 +2161,7 @@ class AsyncPipelinesClient:
2161
2161
  raise ApiError(status_code=_response.status_code, body=_response.text)
2162
2162
  raise ApiError(status_code=_response.status_code, body=_response_json)
2163
2163
 
2164
- async def get_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
2164
+ async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
2165
2165
  """
2166
2166
  Get data sources for a pipeline.
2167
2167
 
@@ -2173,7 +2173,7 @@ class AsyncPipelinesClient:
2173
2173
  client = AsyncLlamaCloud(
2174
2174
  token="YOUR_TOKEN",
2175
2175
  )
2176
- await client.pipelines.get_pipeline_data_sources(
2176
+ await client.pipelines.list_pipeline_data_sources(
2177
2177
  pipeline_id="string",
2178
2178
  )
2179
2179
  """
@@ -2388,7 +2388,7 @@ class AsyncPipelinesClient:
2388
2388
  raise ApiError(status_code=_response.status_code, body=_response.text)
2389
2389
  raise ApiError(status_code=_response.status_code, body=_response_json)
2390
2390
 
2391
- async def get_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
2391
+ async def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
2392
2392
  """
2393
2393
  Get jobs for a pipeline.
2394
2394
 
@@ -2400,7 +2400,7 @@ class AsyncPipelinesClient:
2400
2400
  client = AsyncLlamaCloud(
2401
2401
  token="YOUR_TOKEN",
2402
2402
  )
2403
- await client.pipelines.get_pipeline_jobs(
2403
+ await client.pipelines.list_pipeline_jobs(
2404
2404
  pipeline_id="string",
2405
2405
  )
2406
2406
  """
@@ -238,9 +238,9 @@ class ProjectsClient:
238
238
  raise ApiError(status_code=_response.status_code, body=_response.text)
239
239
  raise ApiError(status_code=_response.status_code, body=_response_json)
240
240
 
241
- def get_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
241
+ def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
242
242
  """
243
- Get all eval datasets for a project.
243
+ List eval datasets for a project.
244
244
 
245
245
  Parameters:
246
246
  - project_id: str.
@@ -250,7 +250,7 @@ class ProjectsClient:
250
250
  client = LlamaCloud(
251
251
  token="YOUR_TOKEN",
252
252
  )
253
- client.projects.get_datasets_for_project(
253
+ client.projects.list_datasets_for_project(
254
254
  project_id="string",
255
255
  )
256
256
  """
@@ -353,9 +353,9 @@ class ProjectsClient:
353
353
  raise ApiError(status_code=_response.status_code, body=_response.text)
354
354
  raise ApiError(status_code=_response.status_code, body=_response_json)
355
355
 
356
- def get_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
356
+ def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
357
357
  """
358
- Get all local eval results for a project.
358
+ List local eval results for a project.
359
359
 
360
360
  Parameters:
361
361
  - project_id: str.
@@ -365,7 +365,7 @@ class ProjectsClient:
365
365
  client = LlamaCloud(
366
366
  token="YOUR_TOKEN",
367
367
  )
368
- client.projects.get_local_evals_for_project(
368
+ client.projects.list_local_evals_for_project(
369
369
  project_id="string",
370
370
  )
371
371
  """
@@ -385,9 +385,9 @@ class ProjectsClient:
385
385
  raise ApiError(status_code=_response.status_code, body=_response.text)
386
386
  raise ApiError(status_code=_response.status_code, body=_response_json)
387
387
 
388
- def get_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
388
+ def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
389
389
  """
390
- Get all local eval sets for a project.
390
+ List local eval sets for a project.
391
391
 
392
392
  Parameters:
393
393
  - project_id: str.
@@ -397,7 +397,7 @@ class ProjectsClient:
397
397
  client = LlamaCloud(
398
398
  token="YOUR_TOKEN",
399
399
  )
400
- client.projects.get_local_eval_sets_for_project(
400
+ client.projects.list_local_eval_sets_for_project(
401
401
  project_id="string",
402
402
  )
403
403
  """
@@ -457,9 +457,9 @@ class ProjectsClient:
457
457
  raise ApiError(status_code=_response.status_code, body=_response.text)
458
458
  raise ApiError(status_code=_response.status_code, body=_response_json)
459
459
 
460
- def get_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
460
+ def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
461
461
  """
462
- Get all PromptMixin prompt sets for a project.
462
+ List PromptMixin prompt sets for a project.
463
463
 
464
464
  Parameters:
465
465
  - project_id: str.
@@ -469,7 +469,7 @@ class ProjectsClient:
469
469
  client = LlamaCloud(
470
470
  token="YOUR_TOKEN",
471
471
  )
472
- client.projects.get_promptmixin_prompts(
472
+ client.projects.list_promptmixin_prompts(
473
473
  project_id="string",
474
474
  )
475
475
  """
@@ -824,9 +824,9 @@ class AsyncProjectsClient:
824
824
  raise ApiError(status_code=_response.status_code, body=_response.text)
825
825
  raise ApiError(status_code=_response.status_code, body=_response_json)
826
826
 
827
- async def get_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
827
+ async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
828
828
  """
829
- Get all eval datasets for a project.
829
+ List eval datasets for a project.
830
830
 
831
831
  Parameters:
832
832
  - project_id: str.
@@ -836,7 +836,7 @@ class AsyncProjectsClient:
836
836
  client = AsyncLlamaCloud(
837
837
  token="YOUR_TOKEN",
838
838
  )
839
- await client.projects.get_datasets_for_project(
839
+ await client.projects.list_datasets_for_project(
840
840
  project_id="string",
841
841
  )
842
842
  """
@@ -939,9 +939,9 @@ class AsyncProjectsClient:
939
939
  raise ApiError(status_code=_response.status_code, body=_response.text)
940
940
  raise ApiError(status_code=_response.status_code, body=_response_json)
941
941
 
942
- async def get_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
942
+ async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
943
943
  """
944
- Get all local eval results for a project.
944
+ List local eval results for a project.
945
945
 
946
946
  Parameters:
947
947
  - project_id: str.
@@ -951,7 +951,7 @@ class AsyncProjectsClient:
951
951
  client = AsyncLlamaCloud(
952
952
  token="YOUR_TOKEN",
953
953
  )
954
- await client.projects.get_local_evals_for_project(
954
+ await client.projects.list_local_evals_for_project(
955
955
  project_id="string",
956
956
  )
957
957
  """
@@ -971,9 +971,9 @@ class AsyncProjectsClient:
971
971
  raise ApiError(status_code=_response.status_code, body=_response.text)
972
972
  raise ApiError(status_code=_response.status_code, body=_response_json)
973
973
 
974
- async def get_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
974
+ async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
975
975
  """
976
- Get all local eval sets for a project.
976
+ List local eval sets for a project.
977
977
 
978
978
  Parameters:
979
979
  - project_id: str.
@@ -983,7 +983,7 @@ class AsyncProjectsClient:
983
983
  client = AsyncLlamaCloud(
984
984
  token="YOUR_TOKEN",
985
985
  )
986
- await client.projects.get_local_eval_sets_for_project(
986
+ await client.projects.list_local_eval_sets_for_project(
987
987
  project_id="string",
988
988
  )
989
989
  """
@@ -1043,9 +1043,9 @@ class AsyncProjectsClient:
1043
1043
  raise ApiError(status_code=_response.status_code, body=_response.text)
1044
1044
  raise ApiError(status_code=_response.status_code, body=_response_json)
1045
1045
 
1046
- async def get_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1046
+ async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1047
1047
  """
1048
- Get all PromptMixin prompt sets for a project.
1048
+ List PromptMixin prompt sets for a project.
1049
1049
 
1050
1050
  Parameters:
1051
1051
  - project_id: str.
@@ -1055,7 +1055,7 @@ class AsyncProjectsClient:
1055
1055
  client = AsyncLlamaCloud(
1056
1056
  token="YOUR_TOKEN",
1057
1057
  )
1058
- await client.projects.get_promptmixin_prompts(
1058
+ await client.projects.list_promptmixin_prompts(
1059
1059
  project_id="string",
1060
1060
  )
1061
1061
  """
@@ -38,13 +38,13 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
38
38
  description="The correlation ID for this job. Used for tracking the job across services."
39
39
  )
40
40
  parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
41
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
41
42
  id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
42
43
  status: StatusEnum
43
44
  error_message: typing.Optional[str]
44
45
  attempts: typing.Optional[int] = pydantic.Field(description="The number of times this job has been attempted")
45
46
  started_at: typing.Optional[dt.datetime]
46
47
  ended_at: typing.Optional[dt.datetime]
47
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
48
48
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
49
49
  data: typing.Optional[Base] = pydantic.Field(description="Additional metadata for the job execution.")
50
50
 
@@ -31,6 +31,8 @@ class LlamaParseParameters(pydantic.BaseModel):
31
31
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
32
32
  do_not_unroll_columns: typing.Optional[bool]
33
33
  page_separator: typing.Optional[str]
34
+ bounding_box: typing.Optional[str]
35
+ target_pages: typing.Optional[str]
34
36
 
35
37
  def json(self, **kwargs: typing.Any) -> str:
36
38
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -47,6 +47,37 @@ class LlamaParseSupportedFileExtensions(str, enum.Enum):
47
47
  EPUB = ".epub"
48
48
  HTML = ".html"
49
49
  HTM = ".htm"
50
+ XLS = ".xls"
51
+ XLSX = ".xlsx"
52
+ XLSM = ".xlsm"
53
+ XLSB = ".xlsb"
54
+ XLW = ".xlw"
55
+ CSV = ".csv"
56
+ DIF = ".dif"
57
+ SYLK = ".sylk"
58
+ SLK = ".slk"
59
+ PRN = ".prn"
60
+ NUMBERS = ".numbers"
61
+ ET = ".et"
62
+ ODS = ".ods"
63
+ FODS = ".fods"
64
+ UOS_1 = ".uos1"
65
+ UOS_2 = ".uos2"
66
+ DBF = ".dbf"
67
+ WK_1 = ".wk1"
68
+ WK_2 = ".wk2"
69
+ WK_3 = ".wk3"
70
+ WK_4 = ".wk4"
71
+ WKS = ".wks"
72
+ WQ_1 = ".wq1"
73
+ WQ_2 = ".wq2"
74
+ WB_1 = ".wb1"
75
+ WB_2 = ".wb2"
76
+ WB_3 = ".wb3"
77
+ QPW = ".qpw"
78
+ XLR = ".xlr"
79
+ ETH = ".eth"
80
+ TSV = ".tsv"
50
81
 
51
82
  def visit(
52
83
  self,
@@ -86,6 +117,37 @@ class LlamaParseSupportedFileExtensions(str, enum.Enum):
86
117
  epub: typing.Callable[[], T_Result],
87
118
  html: typing.Callable[[], T_Result],
88
119
  htm: typing.Callable[[], T_Result],
120
+ xls: typing.Callable[[], T_Result],
121
+ xlsx: typing.Callable[[], T_Result],
122
+ xlsm: typing.Callable[[], T_Result],
123
+ xlsb: typing.Callable[[], T_Result],
124
+ xlw: typing.Callable[[], T_Result],
125
+ csv: typing.Callable[[], T_Result],
126
+ dif: typing.Callable[[], T_Result],
127
+ sylk: typing.Callable[[], T_Result],
128
+ slk: typing.Callable[[], T_Result],
129
+ prn: typing.Callable[[], T_Result],
130
+ numbers: typing.Callable[[], T_Result],
131
+ et: typing.Callable[[], T_Result],
132
+ ods: typing.Callable[[], T_Result],
133
+ fods: typing.Callable[[], T_Result],
134
+ uos_1: typing.Callable[[], T_Result],
135
+ uos_2: typing.Callable[[], T_Result],
136
+ dbf: typing.Callable[[], T_Result],
137
+ wk_1: typing.Callable[[], T_Result],
138
+ wk_2: typing.Callable[[], T_Result],
139
+ wk_3: typing.Callable[[], T_Result],
140
+ wk_4: typing.Callable[[], T_Result],
141
+ wks: typing.Callable[[], T_Result],
142
+ wq_1: typing.Callable[[], T_Result],
143
+ wq_2: typing.Callable[[], T_Result],
144
+ wb_1: typing.Callable[[], T_Result],
145
+ wb_2: typing.Callable[[], T_Result],
146
+ wb_3: typing.Callable[[], T_Result],
147
+ qpw: typing.Callable[[], T_Result],
148
+ xlr: typing.Callable[[], T_Result],
149
+ eth: typing.Callable[[], T_Result],
150
+ tsv: typing.Callable[[], T_Result],
89
151
  ) -> T_Result:
90
152
  if self is LlamaParseSupportedFileExtensions.PDF:
91
153
  return pdf()
@@ -159,3 +221,65 @@ class LlamaParseSupportedFileExtensions(str, enum.Enum):
159
221
  return html()
160
222
  if self is LlamaParseSupportedFileExtensions.HTM:
161
223
  return htm()
224
+ if self is LlamaParseSupportedFileExtensions.XLS:
225
+ return xls()
226
+ if self is LlamaParseSupportedFileExtensions.XLSX:
227
+ return xlsx()
228
+ if self is LlamaParseSupportedFileExtensions.XLSM:
229
+ return xlsm()
230
+ if self is LlamaParseSupportedFileExtensions.XLSB:
231
+ return xlsb()
232
+ if self is LlamaParseSupportedFileExtensions.XLW:
233
+ return xlw()
234
+ if self is LlamaParseSupportedFileExtensions.CSV:
235
+ return csv()
236
+ if self is LlamaParseSupportedFileExtensions.DIF:
237
+ return dif()
238
+ if self is LlamaParseSupportedFileExtensions.SYLK:
239
+ return sylk()
240
+ if self is LlamaParseSupportedFileExtensions.SLK:
241
+ return slk()
242
+ if self is LlamaParseSupportedFileExtensions.PRN:
243
+ return prn()
244
+ if self is LlamaParseSupportedFileExtensions.NUMBERS:
245
+ return numbers()
246
+ if self is LlamaParseSupportedFileExtensions.ET:
247
+ return et()
248
+ if self is LlamaParseSupportedFileExtensions.ODS:
249
+ return ods()
250
+ if self is LlamaParseSupportedFileExtensions.FODS:
251
+ return fods()
252
+ if self is LlamaParseSupportedFileExtensions.UOS_1:
253
+ return uos_1()
254
+ if self is LlamaParseSupportedFileExtensions.UOS_2:
255
+ return uos_2()
256
+ if self is LlamaParseSupportedFileExtensions.DBF:
257
+ return dbf()
258
+ if self is LlamaParseSupportedFileExtensions.WK_1:
259
+ return wk_1()
260
+ if self is LlamaParseSupportedFileExtensions.WK_2:
261
+ return wk_2()
262
+ if self is LlamaParseSupportedFileExtensions.WK_3:
263
+ return wk_3()
264
+ if self is LlamaParseSupportedFileExtensions.WK_4:
265
+ return wk_4()
266
+ if self is LlamaParseSupportedFileExtensions.WKS:
267
+ return wks()
268
+ if self is LlamaParseSupportedFileExtensions.WQ_1:
269
+ return wq_1()
270
+ if self is LlamaParseSupportedFileExtensions.WQ_2:
271
+ return wq_2()
272
+ if self is LlamaParseSupportedFileExtensions.WB_1:
273
+ return wb_1()
274
+ if self is LlamaParseSupportedFileExtensions.WB_2:
275
+ return wb_2()
276
+ if self is LlamaParseSupportedFileExtensions.WB_3:
277
+ return wb_3()
278
+ if self is LlamaParseSupportedFileExtensions.QPW:
279
+ return qpw()
280
+ if self is LlamaParseSupportedFileExtensions.XLR:
281
+ return xlr()
282
+ if self is LlamaParseSupportedFileExtensions.ETH:
283
+ return eth()
284
+ if self is LlamaParseSupportedFileExtensions.TSV:
285
+ return tsv()
@@ -8,7 +8,6 @@ from .configured_transformation_item import ConfiguredTransformationItem
8
8
  from .data_sink import DataSink
9
9
  from .eval_execution_params import EvalExecutionParams
10
10
  from .llama_parse_parameters import LlamaParseParameters
11
- from .managed_ingestion_status import ManagedIngestionStatus
12
11
  from .pipeline_type import PipelineType
13
12
  from .preset_retrieval_params import PresetRetrievalParams
14
13
 
@@ -50,9 +49,6 @@ class Pipeline(pydantic.BaseModel):
50
49
  llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
51
50
  description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
52
51
  )
53
- managed_ingestion_status: typing.Optional[ManagedIngestionStatus] = pydantic.Field(
54
- description="Status of Managed Ingestion."
55
- )
56
52
  data_sink: typing.Optional[DataSink] = pydantic.Field(
57
53
  description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
58
54
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary:
5
5
  Author: Logan Markewich
6
6
  Author-email: logan@runllama.ai
@@ -11,32 +11,32 @@ llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
12
  llama_cloud/resources/__init__.py,sha256=CZM0cBk0JjOYkzt1OIl9iQzVBQmD4KuWJsIhrf6BMW0,887
13
13
  llama_cloud/resources/component_definitions/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
- llama_cloud/resources/component_definitions/client.py,sha256=-Hv7W64P59flEf8osaG6L8CoyuJWDfUQh6zPR-nFvmI,7649
14
+ llama_cloud/resources/component_definitions/client.py,sha256=YYfoXNa1qim2OdD5y4N5mvoBZKtrCuXS560mtqH_-1c,7569
15
15
  llama_cloud/resources/data_sinks/__init__.py,sha256=nsMEyxkVilxvQGSdJi0Z0yKZoTaTWewZIGJNoMwNDsw,205
16
- llama_cloud/resources/data_sinks/client.py,sha256=ABIi7PQU7EyGN_dRyl4MgkYD8Rb1ZIbbFvxX3Xv-D3I,19705
16
+ llama_cloud/resources/data_sinks/client.py,sha256=jsXsLcT4FaIV3f0gBb6aEZVyUuvNjdF-CPlujzfvMag,19699
17
17
  llama_cloud/resources/data_sinks/types/__init__.py,sha256=M9AO57_TUUgjUcGOhxcROql5U7UbJDbEm7aQj3YqU2I,269
18
18
  llama_cloud/resources/data_sinks/types/data_sink_update_component.py,sha256=TjBOpvPvUIyi-NT1Gv1vShMoe-jzDKc8UYaFfo7XOO8,249
19
19
  llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py,sha256=wNaRFihU7fW2nhLALvqcRZngS9-rudxn_dgmh_z6FRs,639
20
20
  llama_cloud/resources/data_sources/__init__.py,sha256=CCs8ur4fvszPjy0GpTWmMjUAx0WykNgKDKFDNbkYLeM,289
21
- llama_cloud/resources/data_sources/client.py,sha256=NtLwngkjp6O-ekTPjWHsw9Zt487udK1XfHYlWgBQyLA,20917
21
+ llama_cloud/resources/data_sources/client.py,sha256=uN4gjCbsR3aGxZoE6ouyX2bWEp_AelGX_lOigDF3lHQ,20911
22
22
  llama_cloud/resources/data_sources/types/__init__.py,sha256=iOdDXvAM6w80PR62JCscsTOwzDIXHHcG_Ypv18DEdic,410
23
23
  llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=8MoJgdjYmN5WqntDpMXX34WJsf-Wsn0gYw_0t9SOTTA,257
24
24
  llama_cloud/resources/data_sources/types/data_source_update_component_one.py,sha256=jfHjlwkUonW0Z73XhJ3w0BZpmptuXU205FWXS1Ucf44,742
25
25
  llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py,sha256=3aFC-p8MSxjhOu2nFtqk0pixj6RqNqcFnbOYngUdZUk,215
26
26
  llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
27
- llama_cloud/resources/evals/client.py,sha256=mIgyzUj74Ia1YYEenGtpFEAJuFrgm6g_c3JfoJg4Zuk,27607
27
+ llama_cloud/resources/evals/client.py,sha256=P0NmQPRu606DZ2U-RKZRgh25BMriWyKGB77X0Dfe4q0,27603
28
28
  llama_cloud/resources/files/__init__.py,sha256=aZpyTj6KpZvA5XVwmuo1sIlRs7ba98btxVBZ6j5vIsI,155
29
- llama_cloud/resources/files/client.py,sha256=iOMMoyXenOsbB9Ba_8Jpy3dy9bFQNO5I-DycUDHPTlc,22190
29
+ llama_cloud/resources/files/client.py,sha256=pU7ugpqW4dAXJycVg3KxUI82ixiH6vZtcwAaHyPdsDA,22186
30
30
  llama_cloud/resources/files/types/__init__.py,sha256=ZWnnYWuDYZSfUJc7Jv3HyovzijdB--DTK4YB-uPcDsA,181
31
31
  llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-CJf7fnbvIqE3xOI5XOrmPwLbVJLC7zpxMu8Zopk,201
32
32
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
33
- llama_cloud/resources/parsing/client.py,sha256=lm02dcjE6U1BpMMIrLaJZ3Yzji-gRX4jPgZrxgN_t50,36084
33
+ llama_cloud/resources/parsing/client.py,sha256=P-2g7zca-qXhMr5DLCkJqeOyrq3uTNFtV-VBb1bmJ0Q,36528
34
34
  llama_cloud/resources/pipelines/__init__.py,sha256=H7yaFIN62vjuhU3TOKzzuf8qpxZRgw1xVa-eyig-2YU,175
35
- llama_cloud/resources/pipelines/client.py,sha256=4_6o20jUBMfSDRUExN_LAtbc-w_qA7LdroLNMBqr49w,106966
35
+ llama_cloud/resources/pipelines/client.py,sha256=IL49vu9HBi74o5iTXu8ld44jqMWBgdnHF8sGdXWcwec,106962
36
36
  llama_cloud/resources/pipelines/types/__init__.py,sha256=xuT4OBPLrRfEe-E3UVdJvRjl9jTp7tNBK_YzZBb6Kj8,212
37
37
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
38
38
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
39
- llama_cloud/resources/projects/client.py,sha256=gqjSRfpNK8rxECpHGluiTLxF8qGX2LvNIPJNujfNQ9E,46742
39
+ llama_cloud/resources/projects/client.py,sha256=-ctl7lYcuXBJfdbBoYHWTkAD46BV2k5Vb_KHoUAg6Vg,46734
40
40
  llama_cloud/types/__init__.py,sha256=UtfkKO9KZ03jpjMHHsW1y-VB8C3f0zKtXcLtbFZSqEI,10402
41
41
  llama_cloud/types/azure_open_ai_embedding.py,sha256=Ne7DkOTpdwGsH2DUVIGdT5T8Nmk6J61lHGbmgG90LuY,3438
42
42
  llama_cloud/types/base.py,sha256=cn_Zn61yLMDCMm1iZTPvKILSRlqRzOqZtSYyYBY5dIE,938
@@ -83,7 +83,7 @@ llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABF
83
83
  llama_cloud/types/data_source_definition.py,sha256=HlSlTxzYcQJOSo_2OSroAE8vAr-otDvTNBSEkA54vL8,1575
84
84
  llama_cloud/types/eval_dataset.py,sha256=Uav-YJqAvyzCp1j2XavzzVLV975uki71beIBLkCt8LY,1408
85
85
  llama_cloud/types/eval_dataset_job_params.py,sha256=vcXLJWO581uigNvGAurPDgMeEFtQURWucLF5pemdeS0,1343
86
- llama_cloud/types/eval_dataset_job_record.py,sha256=3ppKnI_IwgvZ89TsfNrl1Y8w--QcnDPqhtwPP-YYQnw,2656
86
+ llama_cloud/types/eval_dataset_job_record.py,sha256=LWfxxOokpExuGxjhCnF79NvKBJ2x8QNdOMU8Ak1VmEU,2656
87
87
  llama_cloud/types/eval_execution_params.py,sha256=TkdGGLfBIS2AeeUZtQBqC-Ln7_xPsU44JbN0yOBuP3Q,1382
88
88
  llama_cloud/types/eval_execution_params_override.py,sha256=yhYHQvtk50FW_3oOFpOU-Swuh0MhBTY2-GNsXVWZJNY,1399
89
89
  llama_cloud/types/eval_llm_model_data.py,sha256=H56AfhYsPA3eMKj1418_67tJ-5PsCDW36-6Zyif-f3M,1162
@@ -100,8 +100,8 @@ llama_cloud/types/http_validation_error.py,sha256=iOSKYv0dJGjyIq8DAeLVKNJY-GiM1b
100
100
  llama_cloud/types/hugging_face_inference_api_embedding.py,sha256=_nXn3KkPnnQiuspEUsBASHJOjeGYHuDUq1eBfXr6xwg,3315
101
101
  llama_cloud/types/hugging_face_inference_api_embedding_token.py,sha256=A7-_YryBcsP4G5uRyJ9acao3XwX5-YC3NRndTeDAPj4,144
102
102
  llama_cloud/types/json_node_parser.py,sha256=w7U_HbyxIDTEyJCdrk4j_8IUaqVsqEkpOJ6cq-0xz0A,1577
103
- llama_cloud/types/llama_parse_parameters.py,sha256=KiLSFyCAgxV9Ebk6TGYamwCWlsizj_MtFgAjQcnZENs,1732
104
- llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=wapP4esgu0fSNjQ-Qs5NkZcuUwYEn4YFHKV_HH_yc_M,5519
103
+ llama_cloud/types/llama_parse_parameters.py,sha256=hphB95tS4k7uH9BtM4XdErSfc6lcECq_6YpnAT5JRg8,1810
104
+ llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=EAaw2iWIf08gY1JTg-t-VtZsuHIpNwpwCZPG1xXc2RA,10077
105
105
  llama_cloud/types/llm.py,sha256=T-Uv5OO0E6Rscpn841302jx3c7G1uo9LJkdrGlNGk30,2238
106
106
  llama_cloud/types/local_eval.py,sha256=77NY_rq4zr0V3iB-PXE7Om6LcjRrytLbQ55f_ovAF-M,2050
107
107
  llama_cloud/types/local_eval_results.py,sha256=G1rLE6vO2lEziHQ6bAbZvpJMTrkSYWFvsS1iyZZ44Jw,1449
@@ -126,7 +126,7 @@ llama_cloud/types/parsing_job_json_result.py,sha256=vC0FNMklitCgcB0esthMfv_RbbyF
126
126
  llama_cloud/types/parsing_job_markdown_result.py,sha256=E3-CVNFH1IMyuGs_xzYfYdNgq9AdnDshA_CxOTXz_dQ,1094
127
127
  llama_cloud/types/parsing_job_text_result.py,sha256=1QZielAWXuzPFOgr_DWshXPjmbExAAgAHKAEYVQVtJ8,1082
128
128
  llama_cloud/types/parsing_usage.py,sha256=Wy_c-kAFADDBZgDwqNglsJv_t7vcjOm-8EY32oZEYzU,995
129
- llama_cloud/types/pipeline.py,sha256=wZ68MphMPSw_tNLEErphPGnkX3te8RsxR0YbfnulwcE,3013
129
+ llama_cloud/types/pipeline.py,sha256=BMJh_QfmaqHgZsfiabu3QpNmfRDPXYqiS0Cp_632UOM,2807
130
130
  llama_cloud/types/pipeline_create.py,sha256=_8qO8PVbD6zHW4xsYEHD4TQ-LhD5YE0iWK2x8BIALs0,2833
131
131
  llama_cloud/types/pipeline_data_source.py,sha256=A3AlRzTD7zr1y-u5O5LFESqIupbbG-fqUndQgeYj77w,2062
132
132
  llama_cloud/types/pipeline_data_source_component.py,sha256=Pk_K0Gv7xSWe5BKCdxz82EFd6AQDvZGN-6t3zg9h8NY,265
@@ -162,7 +162,7 @@ llama_cloud/types/token_text_splitter.py,sha256=Mv8xBCvMXyYuQq1KInPe65O0YYCLWxs6
162
162
  llama_cloud/types/transformation_category_names.py,sha256=0xjYe-mDW9OKbTGqL5fSbTvqsfrG4LDu_stW_ubVLl4,582
163
163
  llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
164
164
  llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
165
- llama_cloud-0.0.6.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
166
- llama_cloud-0.0.6.dist-info/METADATA,sha256=mgK5dEdoILzh7V9_WLp2E2Wbie57LovcqDNXpq3Dmis,750
167
- llama_cloud-0.0.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
168
- llama_cloud-0.0.6.dist-info/RECORD,,
165
+ llama_cloud-0.0.7.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
166
+ llama_cloud-0.0.7.dist-info/METADATA,sha256=SFxHQPUn2Bt8tQeMWjT5p37B2zT947-9o5C2autNTu0,750
167
+ llama_cloud-0.0.7.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
168
+ llama_cloud-0.0.7.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 1.8.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any