llama-cloud 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (49) hide show
  1. llama_cloud/__init__.py +10 -32
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/chat_apps/client.py +20 -0
  4. llama_cloud/resources/evals/client.py +0 -643
  5. llama_cloud/resources/llama_extract/client.py +98 -6
  6. llama_cloud/resources/parsing/client.py +8 -0
  7. llama_cloud/resources/pipelines/client.py +14 -375
  8. llama_cloud/resources/projects/client.py +72 -923
  9. llama_cloud/resources/retrievers/client.py +161 -4
  10. llama_cloud/types/__init__.py +10 -32
  11. llama_cloud/types/base_plan.py +3 -0
  12. llama_cloud/types/base_plan_name.py +12 -0
  13. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  14. llama_cloud/types/extract_config.py +0 -3
  15. llama_cloud/types/extract_mode.py +13 -1
  16. llama_cloud/types/extract_run.py +1 -0
  17. llama_cloud/types/llama_extract_settings.py +1 -0
  18. llama_cloud/types/llama_parse_parameters.py +1 -0
  19. llama_cloud/types/parsing_mode.py +12 -0
  20. llama_cloud/types/pipeline_file.py +2 -1
  21. llama_cloud/types/pipeline_file_status.py +33 -0
  22. llama_cloud/types/plan_limits.py +1 -0
  23. llama_cloud/types/preset_composite_retrieval_params.py +4 -2
  24. llama_cloud/types/prompt_conf.py +1 -0
  25. llama_cloud/types/{eval_question_create.py → re_rank_config.py} +6 -2
  26. llama_cloud/types/re_ranker_type.py +41 -0
  27. llama_cloud/types/report_block.py +1 -0
  28. llama_cloud/types/struct_mode.py +4 -0
  29. llama_cloud/types/struct_parse_conf.py +6 -0
  30. llama_cloud/types/usage_and_plan.py +2 -2
  31. llama_cloud/types/{usage.py → usage_response.py} +3 -3
  32. llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
  33. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/RECORD +36 -47
  35. llama_cloud/types/eval_dataset.py +0 -40
  36. llama_cloud/types/eval_dataset_job_params.py +0 -39
  37. llama_cloud/types/eval_dataset_job_record.py +0 -58
  38. llama_cloud/types/eval_execution_params_override.py +0 -37
  39. llama_cloud/types/eval_metric.py +0 -17
  40. llama_cloud/types/eval_question.py +0 -38
  41. llama_cloud/types/eval_question_result.py +0 -52
  42. llama_cloud/types/local_eval.py +0 -47
  43. llama_cloud/types/local_eval_results.py +0 -40
  44. llama_cloud/types/local_eval_sets.py +0 -33
  45. llama_cloud/types/metric_result.py +0 -33
  46. llama_cloud/types/prompt_mixin_prompts.py +0 -39
  47. llama_cloud/types/prompt_spec.py +0 -36
  48. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/LICENSE +0 -0
  49. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py CHANGED
@@ -96,15 +96,7 @@ from .types import (
96
96
  EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding,
97
97
  EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding,
98
98
  EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
99
- EvalDataset,
100
- EvalDatasetJobParams,
101
- EvalDatasetJobRecord,
102
99
  EvalExecutionParams,
103
- EvalExecutionParamsOverride,
104
- EvalMetric,
105
- EvalQuestion,
106
- EvalQuestionCreate,
107
- EvalQuestionResult,
108
100
  ExtractAgent,
109
101
  ExtractAgentCreate,
110
102
  ExtractAgentCreateDataSchema,
@@ -169,9 +161,6 @@ from .types import (
169
161
  Llm,
170
162
  LlmModelData,
171
163
  LlmParameters,
172
- LocalEval,
173
- LocalEvalResults,
174
- LocalEvalSets,
175
164
  ManagedIngestionStatus,
176
165
  ManagedIngestionStatusResponse,
177
166
  MarkdownElementNodeParser,
@@ -182,7 +171,6 @@ from .types import (
182
171
  MetadataFilterValue,
183
172
  MetadataFilters,
184
173
  MetadataFiltersFiltersItem,
185
- MetricResult,
186
174
  NodeParser,
187
175
  NodeRelationship,
188
176
  NoneChunkingConfig,
@@ -246,6 +234,7 @@ from .types import (
246
234
  PipelineFileCustomMetadataValue,
247
235
  PipelineFilePermissionInfoValue,
248
236
  PipelineFileResourceInfoValue,
237
+ PipelineFileStatus,
249
238
  PipelineTransformConfig,
250
239
  PipelineTransformConfig_Advanced,
251
240
  PipelineTransformConfig_Auto,
@@ -261,9 +250,9 @@ from .types import (
261
250
  Project,
262
251
  ProjectCreate,
263
252
  PromptConf,
264
- PromptMixinPrompts,
265
- PromptSpec,
266
253
  PydanticProgramMode,
254
+ ReRankConfig,
255
+ ReRankerType,
267
256
  RecurringCreditGrant,
268
257
  RelatedNodeInfo,
269
258
  RelatedNodeInfoNodeType,
@@ -307,10 +296,10 @@ from .types import (
307
296
  TokenChunkingConfig,
308
297
  TokenTextSplitter,
309
298
  TransformationCategoryNames,
310
- Usage,
311
- UsageActiveAlertsItem,
312
299
  UsageAndPlan,
313
300
  UsageMetricResponse,
301
+ UsageResponse,
302
+ UsageResponseActiveAlertsItem,
314
303
  UserJobRecord,
315
304
  UserOrganization,
316
305
  UserOrganizationCreate,
@@ -474,15 +463,7 @@ __all__ = [
474
463
  "EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
475
464
  "EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding",
476
465
  "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
477
- "EvalDataset",
478
- "EvalDatasetJobParams",
479
- "EvalDatasetJobRecord",
480
466
  "EvalExecutionParams",
481
- "EvalExecutionParamsOverride",
482
- "EvalMetric",
483
- "EvalQuestion",
484
- "EvalQuestionCreate",
485
- "EvalQuestionResult",
486
467
  "ExtractAgent",
487
468
  "ExtractAgentCreate",
488
469
  "ExtractAgentCreateDataSchema",
@@ -551,9 +532,6 @@ __all__ = [
551
532
  "Llm",
552
533
  "LlmModelData",
553
534
  "LlmParameters",
554
- "LocalEval",
555
- "LocalEvalResults",
556
- "LocalEvalSets",
557
535
  "ManagedIngestionStatus",
558
536
  "ManagedIngestionStatusResponse",
559
537
  "MarkdownElementNodeParser",
@@ -564,7 +542,6 @@ __all__ = [
564
542
  "MetadataFilterValue",
565
543
  "MetadataFilters",
566
544
  "MetadataFiltersFiltersItem",
567
- "MetricResult",
568
545
  "NodeParser",
569
546
  "NodeRelationship",
570
547
  "NoneChunkingConfig",
@@ -628,6 +605,7 @@ __all__ = [
628
605
  "PipelineFileCustomMetadataValue",
629
606
  "PipelineFilePermissionInfoValue",
630
607
  "PipelineFileResourceInfoValue",
608
+ "PipelineFileStatus",
631
609
  "PipelineFileUpdateCustomMetadataValue",
632
610
  "PipelineTransformConfig",
633
611
  "PipelineTransformConfig_Advanced",
@@ -653,9 +631,9 @@ __all__ = [
653
631
  "Project",
654
632
  "ProjectCreate",
655
633
  "PromptConf",
656
- "PromptMixinPrompts",
657
- "PromptSpec",
658
634
  "PydanticProgramMode",
635
+ "ReRankConfig",
636
+ "ReRankerType",
659
637
  "RecurringCreditGrant",
660
638
  "RelatedNodeInfo",
661
639
  "RelatedNodeInfoNodeType",
@@ -701,10 +679,10 @@ __all__ = [
701
679
  "TransformationCategoryNames",
702
680
  "UnprocessableEntityError",
703
681
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
704
- "Usage",
705
- "UsageActiveAlertsItem",
706
682
  "UsageAndPlan",
707
683
  "UsageMetricResponse",
684
+ "UsageResponse",
685
+ "UsageResponseActiveAlertsItem",
708
686
  "UserJobRecord",
709
687
  "UserOrganization",
710
688
  "UserOrganizationCreate",
@@ -4,4 +4,4 @@ import enum
4
4
 
5
5
 
6
6
  class LlamaCloudEnvironment(enum.Enum):
7
- DEFAULT = "https://api.cloud.llamaindex.ai/"
7
+ DEFAULT = "https://api.cloud.llamaindex.ai"
@@ -95,6 +95,8 @@ class ChatAppsClient:
95
95
  CompositeRetrievalMode,
96
96
  LlmParameters,
97
97
  PresetCompositeRetrievalParams,
98
+ ReRankConfig,
99
+ ReRankerType,
98
100
  SupportedLlmModelNames,
99
101
  )
100
102
  from llama_cloud.client import LlamaCloud
@@ -110,6 +112,9 @@ class ChatAppsClient:
110
112
  ),
111
113
  retrieval_config=PresetCompositeRetrievalParams(
112
114
  mode=CompositeRetrievalMode.ROUTING,
115
+ rerank_config=ReRankConfig(
116
+ type=ReRankerType.SYSTEM_DEFAULT,
117
+ ),
113
118
  ),
114
119
  )
115
120
  """
@@ -200,6 +205,8 @@ class ChatAppsClient:
200
205
  CompositeRetrievalMode,
201
206
  LlmParameters,
202
207
  PresetCompositeRetrievalParams,
208
+ ReRankConfig,
209
+ ReRankerType,
203
210
  SupportedLlmModelNames,
204
211
  )
205
212
  from llama_cloud.client import LlamaCloud
@@ -214,6 +221,9 @@ class ChatAppsClient:
214
221
  ),
215
222
  retrieval_config=PresetCompositeRetrievalParams(
216
223
  mode=CompositeRetrievalMode.ROUTING,
224
+ rerank_config=ReRankConfig(
225
+ type=ReRankerType.SYSTEM_DEFAULT,
226
+ ),
217
227
  ),
218
228
  )
219
229
  """
@@ -378,6 +388,8 @@ class AsyncChatAppsClient:
378
388
  CompositeRetrievalMode,
379
389
  LlmParameters,
380
390
  PresetCompositeRetrievalParams,
391
+ ReRankConfig,
392
+ ReRankerType,
381
393
  SupportedLlmModelNames,
382
394
  )
383
395
  from llama_cloud.client import AsyncLlamaCloud
@@ -393,6 +405,9 @@ class AsyncChatAppsClient:
393
405
  ),
394
406
  retrieval_config=PresetCompositeRetrievalParams(
395
407
  mode=CompositeRetrievalMode.ROUTING,
408
+ rerank_config=ReRankConfig(
409
+ type=ReRankerType.SYSTEM_DEFAULT,
410
+ ),
396
411
  ),
397
412
  )
398
413
  """
@@ -483,6 +498,8 @@ class AsyncChatAppsClient:
483
498
  CompositeRetrievalMode,
484
499
  LlmParameters,
485
500
  PresetCompositeRetrievalParams,
501
+ ReRankConfig,
502
+ ReRankerType,
486
503
  SupportedLlmModelNames,
487
504
  )
488
505
  from llama_cloud.client import AsyncLlamaCloud
@@ -497,6 +514,9 @@ class AsyncChatAppsClient:
497
514
  ),
498
515
  retrieval_config=PresetCompositeRetrievalParams(
499
516
  mode=CompositeRetrievalMode.ROUTING,
517
+ rerank_config=ReRankConfig(
518
+ type=ReRankerType.SYSTEM_DEFAULT,
519
+ ),
500
520
  ),
501
521
  )
502
522
  """