llama-cloud 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -16,7 +16,6 @@ from .types import (
16
16
  AutoTransformConfig,
17
17
  AzureOpenAiEmbedding,
18
18
  AzureOpenAiEmbeddingConfig,
19
- Base,
20
19
  BasePromptTemplate,
21
20
  BedrockEmbedding,
22
21
  BedrockEmbeddingConfig,
@@ -26,6 +25,7 @@ from .types import (
26
25
  ChatApp,
27
26
  ChatAppResponse,
28
27
  ChatData,
28
+ ChunkMode,
29
29
  CloudAzStorageBlobDataSource,
30
30
  CloudAzureAiSearchVectorStore,
31
31
  CloudBoxDataSource,
@@ -190,6 +190,7 @@ from .types import (
190
190
  ParsingJobMarkdownResult,
191
191
  ParsingJobStructuredResult,
192
192
  ParsingJobTextResult,
193
+ ParsingMode,
193
194
  ParsingUsage,
194
195
  PartitionNames,
195
196
  Permission,
@@ -239,6 +240,7 @@ from .types import (
239
240
  ProgressEventStatus,
240
241
  Project,
241
242
  ProjectCreate,
243
+ PromptConf,
242
244
  PromptMixinPrompts,
243
245
  PromptSpec,
244
246
  PydanticProgramMode,
@@ -254,7 +256,6 @@ from .types import (
254
256
  ReportEventItemEventData_ReportBlockUpdate,
255
257
  ReportEventItemEventData_ReportStateUpdate,
256
258
  ReportEventType,
257
- ReportFileInfo,
258
259
  ReportMetadata,
259
260
  ReportPlan,
260
261
  ReportPlanBlock,
@@ -269,10 +270,13 @@ from .types import (
269
270
  RetrieverCreate,
270
271
  RetrieverPipeline,
271
272
  Role,
273
+ SchemaRelaxMode,
272
274
  SemanticChunkingConfig,
273
275
  SentenceChunkingConfig,
274
276
  SentenceSplitter,
275
277
  StatusEnum,
278
+ StructMode,
279
+ StructParseConf,
276
280
  SupportedLlmModel,
277
281
  SupportedLlmModelNames,
278
282
  TextBlock,
@@ -362,7 +366,6 @@ __all__ = [
362
366
  "AutoTransformConfig",
363
367
  "AzureOpenAiEmbedding",
364
368
  "AzureOpenAiEmbeddingConfig",
365
- "Base",
366
369
  "BasePromptTemplate",
367
370
  "BedrockEmbedding",
368
371
  "BedrockEmbeddingConfig",
@@ -372,6 +375,7 @@ __all__ = [
372
375
  "ChatApp",
373
376
  "ChatAppResponse",
374
377
  "ChatData",
378
+ "ChunkMode",
375
379
  "CloudAzStorageBlobDataSource",
376
380
  "CloudAzureAiSearchVectorStore",
377
381
  "CloudBoxDataSource",
@@ -557,6 +561,7 @@ __all__ = [
557
561
  "ParsingJobMarkdownResult",
558
562
  "ParsingJobStructuredResult",
559
563
  "ParsingJobTextResult",
564
+ "ParsingMode",
560
565
  "ParsingUsage",
561
566
  "PartitionNames",
562
567
  "Permission",
@@ -616,6 +621,7 @@ __all__ = [
616
621
  "ProgressEventStatus",
617
622
  "Project",
618
623
  "ProjectCreate",
624
+ "PromptConf",
619
625
  "PromptMixinPrompts",
620
626
  "PromptSpec",
621
627
  "PydanticProgramMode",
@@ -631,7 +637,6 @@ __all__ = [
631
637
  "ReportEventItemEventData_ReportBlockUpdate",
632
638
  "ReportEventItemEventData_ReportStateUpdate",
633
639
  "ReportEventType",
634
- "ReportFileInfo",
635
640
  "ReportMetadata",
636
641
  "ReportPlan",
637
642
  "ReportPlanBlock",
@@ -646,10 +651,13 @@ __all__ = [
646
651
  "RetrieverCreate",
647
652
  "RetrieverPipeline",
648
653
  "Role",
654
+ "SchemaRelaxMode",
649
655
  "SemanticChunkingConfig",
650
656
  "SentenceChunkingConfig",
651
657
  "SentenceSplitter",
652
658
  "StatusEnum",
659
+ "StructMode",
660
+ "StructParseConf",
653
661
  "SupportedLlmModel",
654
662
  "SupportedLlmModelNames",
655
663
  "TextBlock",
@@ -151,6 +151,7 @@ class FilesClient:
151
151
  def upload_file(
152
152
  self,
153
153
  *,
154
+ external_file_id: typing.Optional[str] = None,
154
155
  project_id: typing.Optional[str] = None,
155
156
  organization_id: typing.Optional[str] = None,
156
157
  upload_file: typing.IO,
@@ -159,6 +160,8 @@ class FilesClient:
159
160
  Upload a file to S3.
160
161
 
161
162
  Parameters:
163
+ - external_file_id: typing.Optional[str].
164
+
162
165
  - project_id: typing.Optional[str].
163
166
 
164
167
  - organization_id: typing.Optional[str].
@@ -168,7 +171,9 @@ class FilesClient:
168
171
  _response = self._client_wrapper.httpx_client.request(
169
172
  "POST",
170
173
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/files"),
171
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
174
+ params=remove_none_from_dict(
175
+ {"external_file_id": external_file_id, "project_id": project_id, "organization_id": organization_id}
176
+ ),
172
177
  data=jsonable_encoder({}),
173
178
  files={"upload_file": upload_file},
174
179
  headers=self._client_wrapper.get_headers(),
@@ -766,6 +771,7 @@ class AsyncFilesClient:
766
771
  async def upload_file(
767
772
  self,
768
773
  *,
774
+ external_file_id: typing.Optional[str] = None,
769
775
  project_id: typing.Optional[str] = None,
770
776
  organization_id: typing.Optional[str] = None,
771
777
  upload_file: typing.IO,
@@ -774,6 +780,8 @@ class AsyncFilesClient:
774
780
  Upload a file to S3.
775
781
 
776
782
  Parameters:
783
+ - external_file_id: typing.Optional[str].
784
+
777
785
  - project_id: typing.Optional[str].
778
786
 
779
787
  - organization_id: typing.Optional[str].
@@ -783,7 +791,9 @@ class AsyncFilesClient:
783
791
  _response = await self._client_wrapper.httpx_client.request(
784
792
  "POST",
785
793
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/files"),
786
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
794
+ params=remove_none_from_dict(
795
+ {"external_file_id": external_file_id, "project_id": project_id, "organization_id": organization_id}
796
+ ),
787
797
  data=jsonable_encoder({}),
788
798
  files={"upload_file": upload_file},
789
799
  headers=self._client_wrapper.get_headers(),
@@ -417,12 +417,14 @@ class LlamaExtractClient:
417
417
  - extract_settings: typing.Optional[LlamaExtractSettings].
418
418
  ---
419
419
  from llama_cloud import (
420
+ ChunkMode,
420
421
  ExtractConfig,
421
422
  ExtractJobCreate,
422
423
  ExtractMode,
423
424
  ExtractTarget,
424
425
  LlamaExtractSettings,
425
426
  LlamaParseParameters,
427
+ ParsingMode,
426
428
  )
427
429
  from llama_cloud.client import LlamaCloud
428
430
 
@@ -439,7 +441,10 @@ class LlamaExtractClient:
439
441
  ),
440
442
  ),
441
443
  extract_settings=LlamaExtractSettings(
442
- llama_parse_params=LlamaParseParameters(),
444
+ chunk_mode=ChunkMode.PAGE,
445
+ llama_parse_params=LlamaParseParameters(
446
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
447
+ ),
443
448
  ),
444
449
  )
445
450
  """
@@ -517,12 +522,14 @@ class LlamaExtractClient:
517
522
  - extract_settings: typing.Optional[LlamaExtractSettings].
518
523
  ---
519
524
  from llama_cloud import (
525
+ ChunkMode,
520
526
  ExtractConfig,
521
527
  ExtractJobCreate,
522
528
  ExtractMode,
523
529
  ExtractTarget,
524
530
  LlamaExtractSettings,
525
531
  LlamaParseParameters,
532
+ ParsingMode,
526
533
  )
527
534
  from llama_cloud.client import LlamaCloud
528
535
 
@@ -539,7 +546,10 @@ class LlamaExtractClient:
539
546
  ),
540
547
  ),
541
548
  extract_settings=LlamaExtractSettings(
542
- llama_parse_params=LlamaParseParameters(),
549
+ chunk_mode=ChunkMode.PAGE,
550
+ llama_parse_params=LlamaParseParameters(
551
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
552
+ ),
543
553
  ),
544
554
  )
545
555
  """
@@ -1072,12 +1082,14 @@ class AsyncLlamaExtractClient:
1072
1082
  - extract_settings: typing.Optional[LlamaExtractSettings].
1073
1083
  ---
1074
1084
  from llama_cloud import (
1085
+ ChunkMode,
1075
1086
  ExtractConfig,
1076
1087
  ExtractJobCreate,
1077
1088
  ExtractMode,
1078
1089
  ExtractTarget,
1079
1090
  LlamaExtractSettings,
1080
1091
  LlamaParseParameters,
1092
+ ParsingMode,
1081
1093
  )
1082
1094
  from llama_cloud.client import AsyncLlamaCloud
1083
1095
 
@@ -1094,7 +1106,10 @@ class AsyncLlamaExtractClient:
1094
1106
  ),
1095
1107
  ),
1096
1108
  extract_settings=LlamaExtractSettings(
1097
- llama_parse_params=LlamaParseParameters(),
1109
+ chunk_mode=ChunkMode.PAGE,
1110
+ llama_parse_params=LlamaParseParameters(
1111
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
1112
+ ),
1098
1113
  ),
1099
1114
  )
1100
1115
  """
@@ -1172,12 +1187,14 @@ class AsyncLlamaExtractClient:
1172
1187
  - extract_settings: typing.Optional[LlamaExtractSettings].
1173
1188
  ---
1174
1189
  from llama_cloud import (
1190
+ ChunkMode,
1175
1191
  ExtractConfig,
1176
1192
  ExtractJobCreate,
1177
1193
  ExtractMode,
1178
1194
  ExtractTarget,
1179
1195
  LlamaExtractSettings,
1180
1196
  LlamaParseParameters,
1197
+ ParsingMode,
1181
1198
  )
1182
1199
  from llama_cloud.client import AsyncLlamaCloud
1183
1200
 
@@ -1194,7 +1211,10 @@ class AsyncLlamaExtractClient:
1194
1211
  ),
1195
1212
  ),
1196
1213
  extract_settings=LlamaExtractSettings(
1197
- llama_parse_params=LlamaParseParameters(),
1214
+ chunk_mode=ChunkMode.PAGE,
1215
+ llama_parse_params=LlamaParseParameters(
1216
+ parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
1217
+ ),
1198
1218
  ),
1199
1219
  )
1200
1220
  """