llama-cloud 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -50,6 +50,7 @@ from .types import (
50
50
  CompositeRetrievalMode,
51
51
  CompositeRetrievalResult,
52
52
  CompositeRetrievedTextNode,
53
+ CompositeRetrievedTextNodeWithScore,
53
54
  ConfigurableDataSinkNames,
54
55
  ConfigurableDataSourceNames,
55
56
  ConfigurableTransformationDefinition,
@@ -109,6 +110,9 @@ from .types import (
109
110
  ExtractResultsetDataItemValue,
110
111
  ExtractResultsetDataZeroValue,
111
112
  ExtractResultsetExtractionMetadataValue,
113
+ ExtractRun,
114
+ ExtractRunDataSchemaValue,
115
+ ExtractState,
112
116
  ExtractionJob,
113
117
  ExtractionResult,
114
118
  ExtractionResultDataValue,
@@ -133,6 +137,7 @@ from .types import (
133
137
  JobNames,
134
138
  JobRecord,
135
139
  JobRecordWithUsageMetrics,
140
+ LlamaExtractSettings,
136
141
  LlamaIndexCoreBaseLlmsTypesChatMessage,
137
142
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
138
143
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
@@ -165,6 +170,7 @@ from .types import (
165
170
  OpenAiEmbeddingConfig,
166
171
  Organization,
167
172
  OrganizationCreate,
173
+ PageFigureMetadata,
168
174
  PageScreenshotMetadata,
169
175
  PageScreenshotNodeWithScore,
170
176
  PageSegmentationConfig,
@@ -383,6 +389,7 @@ __all__ = [
383
389
  "CompositeRetrievalMode",
384
390
  "CompositeRetrievalResult",
385
391
  "CompositeRetrievedTextNode",
392
+ "CompositeRetrievedTextNodeWithScore",
386
393
  "ConfigurableDataSinkNames",
387
394
  "ConfigurableDataSourceNames",
388
395
  "ConfigurableTransformationDefinition",
@@ -455,6 +462,9 @@ __all__ = [
455
462
  "ExtractResultsetDataItemValue",
456
463
  "ExtractResultsetDataZeroValue",
457
464
  "ExtractResultsetExtractionMetadataValue",
465
+ "ExtractRun",
466
+ "ExtractRunDataSchemaValue",
467
+ "ExtractState",
458
468
  "ExtractionJob",
459
469
  "ExtractionResult",
460
470
  "ExtractionResultDataValue",
@@ -485,6 +495,7 @@ __all__ = [
485
495
  "JobRecord",
486
496
  "JobRecordWithUsageMetrics",
487
497
  "LlamaCloudEnvironment",
498
+ "LlamaExtractSettings",
488
499
  "LlamaIndexCoreBaseLlmsTypesChatMessage",
489
500
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
490
501
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
@@ -517,6 +528,7 @@ __all__ = [
517
528
  "OpenAiEmbeddingConfig",
518
529
  "Organization",
519
530
  "OrganizationCreate",
531
+ "PageFigureMetadata",
520
532
  "PageScreenshotMetadata",
521
533
  "PageScreenshotNodeWithScore",
522
534
  "PageSegmentationConfig",
@@ -12,6 +12,7 @@ from ...core.remove_none_from_dict import remove_none_from_dict
12
12
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
13
13
  from ...types.file import File
14
14
  from ...types.http_validation_error import HttpValidationError
15
+ from ...types.page_figure_metadata import PageFigureMetadata
15
16
  from ...types.page_screenshot_metadata import PageScreenshotMetadata
16
17
  from ...types.presigned_url import PresignedUrl
17
18
  from .types.file_create_from_url_resource_info_value import FileCreateFromUrlResourceInfoValue
@@ -495,6 +496,141 @@ class FilesClient:
495
496
  raise ApiError(status_code=_response.status_code, body=_response.text)
496
497
  raise ApiError(status_code=_response.status_code, body=_response_json)
497
498
 
499
+ def list_file_pages_figures(
500
+ self, id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
501
+ ) -> typing.List[PageFigureMetadata]:
502
+ """
503
+ Parameters:
504
+ - id: str.
505
+
506
+ - project_id: typing.Optional[str].
507
+
508
+ - organization_id: typing.Optional[str].
509
+ ---
510
+ from llama_cloud.client import LlamaCloud
511
+
512
+ client = LlamaCloud(
513
+ token="YOUR_TOKEN",
514
+ )
515
+ client.files.list_file_pages_figures(
516
+ id="string",
517
+ )
518
+ """
519
+ _response = self._client_wrapper.httpx_client.request(
520
+ "GET",
521
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures"),
522
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
523
+ headers=self._client_wrapper.get_headers(),
524
+ timeout=60,
525
+ )
526
+ if 200 <= _response.status_code < 300:
527
+ return pydantic.parse_obj_as(typing.List[PageFigureMetadata], _response.json()) # type: ignore
528
+ if _response.status_code == 422:
529
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
530
+ try:
531
+ _response_json = _response.json()
532
+ except JSONDecodeError:
533
+ raise ApiError(status_code=_response.status_code, body=_response.text)
534
+ raise ApiError(status_code=_response.status_code, body=_response_json)
535
+
536
+ def list_file_page_figures(
537
+ self,
538
+ id: str,
539
+ page_index: int,
540
+ *,
541
+ project_id: typing.Optional[str] = None,
542
+ organization_id: typing.Optional[str] = None,
543
+ ) -> typing.List[PageFigureMetadata]:
544
+ """
545
+ Parameters:
546
+ - id: str.
547
+
548
+ - page_index: int.
549
+
550
+ - project_id: typing.Optional[str].
551
+
552
+ - organization_id: typing.Optional[str].
553
+ ---
554
+ from llama_cloud.client import LlamaCloud
555
+
556
+ client = LlamaCloud(
557
+ token="YOUR_TOKEN",
558
+ )
559
+ client.files.list_file_page_figures(
560
+ id="string",
561
+ page_index=1,
562
+ )
563
+ """
564
+ _response = self._client_wrapper.httpx_client.request(
565
+ "GET",
566
+ urllib.parse.urljoin(
567
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures/{page_index}"
568
+ ),
569
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
570
+ headers=self._client_wrapper.get_headers(),
571
+ timeout=60,
572
+ )
573
+ if 200 <= _response.status_code < 300:
574
+ return pydantic.parse_obj_as(typing.List[PageFigureMetadata], _response.json()) # type: ignore
575
+ if _response.status_code == 422:
576
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
577
+ try:
578
+ _response_json = _response.json()
579
+ except JSONDecodeError:
580
+ raise ApiError(status_code=_response.status_code, body=_response.text)
581
+ raise ApiError(status_code=_response.status_code, body=_response_json)
582
+
583
+ def get_file_page_figure(
584
+ self,
585
+ id: str,
586
+ page_index: int,
587
+ figure_name: str,
588
+ *,
589
+ project_id: typing.Optional[str] = None,
590
+ organization_id: typing.Optional[str] = None,
591
+ ) -> typing.Any:
592
+ """
593
+ Parameters:
594
+ - id: str.
595
+
596
+ - page_index: int.
597
+
598
+ - figure_name: str.
599
+
600
+ - project_id: typing.Optional[str].
601
+
602
+ - organization_id: typing.Optional[str].
603
+ ---
604
+ from llama_cloud.client import LlamaCloud
605
+
606
+ client = LlamaCloud(
607
+ token="YOUR_TOKEN",
608
+ )
609
+ client.files.get_file_page_figure(
610
+ id="string",
611
+ page_index=1,
612
+ figure_name="string",
613
+ )
614
+ """
615
+ _response = self._client_wrapper.httpx_client.request(
616
+ "GET",
617
+ urllib.parse.urljoin(
618
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures/{page_index}/{figure_name}"
619
+ ),
620
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
621
+ headers=self._client_wrapper.get_headers(),
622
+ timeout=60,
623
+ )
624
+ if 200 <= _response.status_code < 300:
625
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
626
+ if _response.status_code == 422:
627
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
628
+ try:
629
+ _response_json = _response.json()
630
+ except JSONDecodeError:
631
+ raise ApiError(status_code=_response.status_code, body=_response.text)
632
+ raise ApiError(status_code=_response.status_code, body=_response_json)
633
+
498
634
 
499
635
  class AsyncFilesClient:
500
636
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -960,3 +1096,138 @@ class AsyncFilesClient:
960
1096
  except JSONDecodeError:
961
1097
  raise ApiError(status_code=_response.status_code, body=_response.text)
962
1098
  raise ApiError(status_code=_response.status_code, body=_response_json)
1099
+
1100
+ async def list_file_pages_figures(
1101
+ self, id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
1102
+ ) -> typing.List[PageFigureMetadata]:
1103
+ """
1104
+ Parameters:
1105
+ - id: str.
1106
+
1107
+ - project_id: typing.Optional[str].
1108
+
1109
+ - organization_id: typing.Optional[str].
1110
+ ---
1111
+ from llama_cloud.client import AsyncLlamaCloud
1112
+
1113
+ client = AsyncLlamaCloud(
1114
+ token="YOUR_TOKEN",
1115
+ )
1116
+ await client.files.list_file_pages_figures(
1117
+ id="string",
1118
+ )
1119
+ """
1120
+ _response = await self._client_wrapper.httpx_client.request(
1121
+ "GET",
1122
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures"),
1123
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1124
+ headers=self._client_wrapper.get_headers(),
1125
+ timeout=60,
1126
+ )
1127
+ if 200 <= _response.status_code < 300:
1128
+ return pydantic.parse_obj_as(typing.List[PageFigureMetadata], _response.json()) # type: ignore
1129
+ if _response.status_code == 422:
1130
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1131
+ try:
1132
+ _response_json = _response.json()
1133
+ except JSONDecodeError:
1134
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1135
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1136
+
1137
+ async def list_file_page_figures(
1138
+ self,
1139
+ id: str,
1140
+ page_index: int,
1141
+ *,
1142
+ project_id: typing.Optional[str] = None,
1143
+ organization_id: typing.Optional[str] = None,
1144
+ ) -> typing.List[PageFigureMetadata]:
1145
+ """
1146
+ Parameters:
1147
+ - id: str.
1148
+
1149
+ - page_index: int.
1150
+
1151
+ - project_id: typing.Optional[str].
1152
+
1153
+ - organization_id: typing.Optional[str].
1154
+ ---
1155
+ from llama_cloud.client import AsyncLlamaCloud
1156
+
1157
+ client = AsyncLlamaCloud(
1158
+ token="YOUR_TOKEN",
1159
+ )
1160
+ await client.files.list_file_page_figures(
1161
+ id="string",
1162
+ page_index=1,
1163
+ )
1164
+ """
1165
+ _response = await self._client_wrapper.httpx_client.request(
1166
+ "GET",
1167
+ urllib.parse.urljoin(
1168
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures/{page_index}"
1169
+ ),
1170
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1171
+ headers=self._client_wrapper.get_headers(),
1172
+ timeout=60,
1173
+ )
1174
+ if 200 <= _response.status_code < 300:
1175
+ return pydantic.parse_obj_as(typing.List[PageFigureMetadata], _response.json()) # type: ignore
1176
+ if _response.status_code == 422:
1177
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1178
+ try:
1179
+ _response_json = _response.json()
1180
+ except JSONDecodeError:
1181
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1182
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1183
+
1184
+ async def get_file_page_figure(
1185
+ self,
1186
+ id: str,
1187
+ page_index: int,
1188
+ figure_name: str,
1189
+ *,
1190
+ project_id: typing.Optional[str] = None,
1191
+ organization_id: typing.Optional[str] = None,
1192
+ ) -> typing.Any:
1193
+ """
1194
+ Parameters:
1195
+ - id: str.
1196
+
1197
+ - page_index: int.
1198
+
1199
+ - figure_name: str.
1200
+
1201
+ - project_id: typing.Optional[str].
1202
+
1203
+ - organization_id: typing.Optional[str].
1204
+ ---
1205
+ from llama_cloud.client import AsyncLlamaCloud
1206
+
1207
+ client = AsyncLlamaCloud(
1208
+ token="YOUR_TOKEN",
1209
+ )
1210
+ await client.files.get_file_page_figure(
1211
+ id="string",
1212
+ page_index=1,
1213
+ figure_name="string",
1214
+ )
1215
+ """
1216
+ _response = await self._client_wrapper.httpx_client.request(
1217
+ "GET",
1218
+ urllib.parse.urljoin(
1219
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/files/{id}/page-figures/{page_index}/{figure_name}"
1220
+ ),
1221
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
1222
+ headers=self._client_wrapper.get_headers(),
1223
+ timeout=60,
1224
+ )
1225
+ if 200 <= _response.status_code < 300:
1226
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1227
+ if _response.status_code == 422:
1228
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1229
+ try:
1230
+ _response_json = _response.json()
1231
+ except JSONDecodeError:
1232
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1233
+ raise ApiError(status_code=_response.status_code, body=_response_json)