llama-cloud 0.1.22__py3-none-any.whl → 0.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -259,6 +259,7 @@ from .types import (
259
259
  Pooling,
260
260
  PresetCompositeRetrievalParams,
261
261
  PresetRetrievalParams,
262
+ PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
262
263
  PresignedUrl,
263
264
  ProgressEvent,
264
265
  ProgressEventStatus,
@@ -356,6 +357,7 @@ from .resources import (
356
357
  PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
357
358
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
358
359
  PipelineUpdateTransformConfig,
360
+ RetrievalParamsSearchFiltersInferenceSchemaValue,
359
361
  UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
360
362
  admin,
361
363
  beta,
@@ -668,6 +670,7 @@ __all__ = [
668
670
  "Pooling",
669
671
  "PresetCompositeRetrievalParams",
670
672
  "PresetRetrievalParams",
673
+ "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
671
674
  "PresignedUrl",
672
675
  "ProgressEvent",
673
676
  "ProgressEventStatus",
@@ -698,6 +701,7 @@ __all__ = [
698
701
  "ReportStateEvent",
699
702
  "ReportUpdateEvent",
700
703
  "RetrievalMode",
704
+ "RetrievalParamsSearchFiltersInferenceSchemaValue",
701
705
  "RetrieveResults",
702
706
  "Retriever",
703
707
  "RetrieverCreate",
@@ -52,6 +52,7 @@ from .pipelines import (
52
52
  PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
53
53
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
54
54
  PipelineUpdateTransformConfig,
55
+ RetrievalParamsSearchFiltersInferenceSchemaValue,
55
56
  )
56
57
  from .reports import UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction
57
58
 
@@ -88,6 +89,7 @@ __all__ = [
88
89
  "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
89
90
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
90
91
  "PipelineUpdateTransformConfig",
92
+ "RetrievalParamsSearchFiltersInferenceSchemaValue",
91
93
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
92
94
  "admin",
93
95
  "beta",
@@ -11,6 +11,7 @@ from .types import (
11
11
  PipelineUpdateEmbeddingConfig_OpenaiEmbedding,
12
12
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
13
13
  PipelineUpdateTransformConfig,
14
+ RetrievalParamsSearchFiltersInferenceSchemaValue,
14
15
  )
15
16
 
16
17
  __all__ = [
@@ -24,4 +25,5 @@ __all__ = [
24
25
  "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
25
26
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
26
27
  "PipelineUpdateTransformConfig",
28
+ "RetrievalParamsSearchFiltersInferenceSchemaValue",
27
29
  ]
@@ -39,6 +39,9 @@ from ...types.text_node import TextNode
39
39
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
40
40
  from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
41
41
  from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
42
+ from .types.retrieval_params_search_filters_inference_schema_value import (
43
+ RetrievalParamsSearchFiltersInferenceSchemaValue,
44
+ )
42
45
 
43
46
  try:
44
47
  import pydantic
@@ -63,6 +66,7 @@ class PipelinesClient:
63
66
  project_name: typing.Optional[str] = None,
64
67
  pipeline_name: typing.Optional[str] = None,
65
68
  pipeline_type: typing.Optional[PipelineType] = None,
69
+ organization_id: typing.Optional[str] = None,
66
70
  ) -> typing.List[Pipeline]:
67
71
  """
68
72
  Search for pipelines by various parameters.
@@ -75,6 +79,8 @@ class PipelinesClient:
75
79
  - pipeline_name: typing.Optional[str].
76
80
 
77
81
  - pipeline_type: typing.Optional[PipelineType].
82
+
83
+ - organization_id: typing.Optional[str].
78
84
  ---
79
85
  from llama_cloud import PipelineType
80
86
  from llama_cloud.client import LlamaCloud
@@ -95,6 +101,7 @@ class PipelinesClient:
95
101
  "project_name": project_name,
96
102
  "pipeline_name": pipeline_name,
97
103
  "pipeline_type": pipeline_type,
104
+ "organization_id": organization_id,
98
105
  }
99
106
  ),
100
107
  headers=self._client_wrapper.get_headers(),
@@ -1037,6 +1044,9 @@ class PipelinesClient:
1037
1044
  rerank_top_n: typing.Optional[int] = OMIT,
1038
1045
  alpha: typing.Optional[float] = OMIT,
1039
1046
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1047
+ search_filters_inference_schema: typing.Optional[
1048
+ typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
1049
+ ] = OMIT,
1040
1050
  files_top_k: typing.Optional[int] = OMIT,
1041
1051
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1042
1052
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
@@ -1067,6 +1077,8 @@ class PipelinesClient:
1067
1077
 
1068
1078
  - search_filters: typing.Optional[MetadataFilters].
1069
1079
 
1080
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
1081
+
1070
1082
  - files_top_k: typing.Optional[int].
1071
1083
 
1072
1084
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
@@ -1108,6 +1120,8 @@ class PipelinesClient:
1108
1120
  _request["alpha"] = alpha
1109
1121
  if search_filters is not OMIT:
1110
1122
  _request["search_filters"] = search_filters
1123
+ if search_filters_inference_schema is not OMIT:
1124
+ _request["search_filters_inference_schema"] = search_filters_inference_schema
1111
1125
  if files_top_k is not OMIT:
1112
1126
  _request["files_top_k"] = files_top_k
1113
1127
  if retrieval_mode is not OMIT:
@@ -1681,6 +1695,7 @@ class AsyncPipelinesClient:
1681
1695
  project_name: typing.Optional[str] = None,
1682
1696
  pipeline_name: typing.Optional[str] = None,
1683
1697
  pipeline_type: typing.Optional[PipelineType] = None,
1698
+ organization_id: typing.Optional[str] = None,
1684
1699
  ) -> typing.List[Pipeline]:
1685
1700
  """
1686
1701
  Search for pipelines by various parameters.
@@ -1693,6 +1708,8 @@ class AsyncPipelinesClient:
1693
1708
  - pipeline_name: typing.Optional[str].
1694
1709
 
1695
1710
  - pipeline_type: typing.Optional[PipelineType].
1711
+
1712
+ - organization_id: typing.Optional[str].
1696
1713
  ---
1697
1714
  from llama_cloud import PipelineType
1698
1715
  from llama_cloud.client import AsyncLlamaCloud
@@ -1713,6 +1730,7 @@ class AsyncPipelinesClient:
1713
1730
  "project_name": project_name,
1714
1731
  "pipeline_name": pipeline_name,
1715
1732
  "pipeline_type": pipeline_type,
1733
+ "organization_id": organization_id,
1716
1734
  }
1717
1735
  ),
1718
1736
  headers=self._client_wrapper.get_headers(),
@@ -2657,6 +2675,9 @@ class AsyncPipelinesClient:
2657
2675
  rerank_top_n: typing.Optional[int] = OMIT,
2658
2676
  alpha: typing.Optional[float] = OMIT,
2659
2677
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2678
+ search_filters_inference_schema: typing.Optional[
2679
+ typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
2680
+ ] = OMIT,
2660
2681
  files_top_k: typing.Optional[int] = OMIT,
2661
2682
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2662
2683
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
@@ -2687,6 +2708,8 @@ class AsyncPipelinesClient:
2687
2708
 
2688
2709
  - search_filters: typing.Optional[MetadataFilters].
2689
2710
 
2711
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
2712
+
2690
2713
  - files_top_k: typing.Optional[int].
2691
2714
 
2692
2715
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
@@ -2728,6 +2751,8 @@ class AsyncPipelinesClient:
2728
2751
  _request["alpha"] = alpha
2729
2752
  if search_filters is not OMIT:
2730
2753
  _request["search_filters"] = search_filters
2754
+ if search_filters_inference_schema is not OMIT:
2755
+ _request["search_filters_inference_schema"] = search_filters_inference_schema
2731
2756
  if files_top_k is not OMIT:
2732
2757
  _request["files_top_k"] = files_top_k
2733
2758
  if retrieval_mode is not OMIT:
@@ -12,6 +12,7 @@ from .pipeline_update_embedding_config import (
12
12
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
13
13
  )
14
14
  from .pipeline_update_transform_config import PipelineUpdateTransformConfig
15
+ from .retrieval_params_search_filters_inference_schema_value import RetrievalParamsSearchFiltersInferenceSchemaValue
15
16
 
16
17
  __all__ = [
17
18
  "PipelineFileUpdateCustomMetadataValue",
@@ -24,4 +25,5 @@ __all__ = [
24
25
  "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
25
26
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
26
27
  "PipelineUpdateTransformConfig",
28
+ "RetrievalParamsSearchFiltersInferenceSchemaValue",
27
29
  ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ RetrievalParamsSearchFiltersInferenceSchemaValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -276,6 +276,9 @@ from .playground_session import PlaygroundSession
276
276
  from .pooling import Pooling
277
277
  from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
278
278
  from .preset_retrieval_params import PresetRetrievalParams
279
+ from .preset_retrieval_params_search_filters_inference_schema_value import (
280
+ PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
281
+ )
279
282
  from .presigned_url import PresignedUrl
280
283
  from .progress_event import ProgressEvent
281
284
  from .progress_event_status import ProgressEventStatus
@@ -600,6 +603,7 @@ __all__ = [
600
603
  "Pooling",
601
604
  "PresetCompositeRetrievalParams",
602
605
  "PresetRetrievalParams",
606
+ "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
603
607
  "PresignedUrl",
604
608
  "ProgressEvent",
605
609
  "ProgressEventStatus",
@@ -17,7 +17,6 @@ except ImportError:
17
17
 
18
18
  class LlmConfigResult(pydantic.BaseModel):
19
19
  llm_type: LlmConfigResultLlmType
20
- is_enabled: bool
21
20
  valid: bool
22
21
  error_message: typing.Optional[str]
23
22
 
@@ -5,6 +5,9 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .metadata_filters import MetadataFilters
8
+ from .preset_retrieval_params_search_filters_inference_schema_value import (
9
+ PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
10
+ )
8
11
  from .retrieval_mode import RetrievalMode
9
12
 
10
13
  try:
@@ -28,6 +31,9 @@ class PresetRetrievalParams(pydantic.BaseModel):
28
31
  rerank_top_n: typing.Optional[int]
29
32
  alpha: typing.Optional[float]
30
33
  search_filters: typing.Optional[MetadataFilters]
34
+ search_filters_inference_schema: typing.Optional[
35
+ typing.Dict[str, typing.Optional[PresetRetrievalParamsSearchFiltersInferenceSchemaValue]]
36
+ ]
31
37
  files_top_k: typing.Optional[int]
32
38
  retrieval_mode: typing.Optional[RetrievalMode] = pydantic.Field(description="The retrieval mode for the query.")
33
39
  retrieve_image_nodes: typing.Optional[bool] = pydantic.Field(description="Whether to retrieve image nodes.")
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ PresetRetrievalParamsSearchFiltersInferenceSchemaValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .metadata_filters import MetadataFilters
7
8
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
8
9
  from .text_node_with_score import TextNodeWithScore
9
10
 
@@ -34,6 +35,7 @@ class RetrieveResults(pydantic.BaseModel):
34
35
  metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
35
36
  description="Metadata associated with the retrieval execution"
36
37
  )
38
+ inferred_search_filters: typing.Optional[MetadataFilters]
37
39
  class_name: typing.Optional[str]
38
40
 
39
41
  def json(self, **kwargs: typing.Any) -> str:
@@ -9,32 +9,30 @@ T_Result = typing.TypeVar("T_Result")
9
9
  class SupportedLlmModelNames(str, enum.Enum):
10
10
  GPT_4_O = "GPT_4O"
11
11
  GPT_4_O_MINI = "GPT_4O_MINI"
12
- AZURE_OPENAI_GPT_3_5_TURBO = "AZURE_OPENAI_GPT_3_5_TURBO"
13
12
  AZURE_OPENAI_GPT_4_O = "AZURE_OPENAI_GPT_4O"
14
13
  AZURE_OPENAI_GPT_4_O_MINI = "AZURE_OPENAI_GPT_4O_MINI"
15
14
  AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
16
15
  CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
17
- BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
16
+ BEDROCK_CLAUDE_3_5_SONNET_V_1 = "BEDROCK_CLAUDE_3_5_SONNET_V1"
17
+ BEDROCK_CLAUDE_3_5_SONNET_V_2 = "BEDROCK_CLAUDE_3_5_SONNET_V2"
18
18
  VERTEX_AI_CLAUDE_3_5_SONNET_V_2 = "VERTEX_AI_CLAUDE_3_5_SONNET_V2"
19
19
 
20
20
  def visit(
21
21
  self,
22
22
  gpt_4_o: typing.Callable[[], T_Result],
23
23
  gpt_4_o_mini: typing.Callable[[], T_Result],
24
- azure_openai_gpt_3_5_turbo: typing.Callable[[], T_Result],
25
24
  azure_openai_gpt_4_o: typing.Callable[[], T_Result],
26
25
  azure_openai_gpt_4_o_mini: typing.Callable[[], T_Result],
27
26
  azure_openai_gpt_4: typing.Callable[[], T_Result],
28
27
  claude_3_5_sonnet: typing.Callable[[], T_Result],
29
- bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
28
+ bedrock_claude_3_5_sonnet_v_1: typing.Callable[[], T_Result],
29
+ bedrock_claude_3_5_sonnet_v_2: typing.Callable[[], T_Result],
30
30
  vertex_ai_claude_3_5_sonnet_v_2: typing.Callable[[], T_Result],
31
31
  ) -> T_Result:
32
32
  if self is SupportedLlmModelNames.GPT_4_O:
33
33
  return gpt_4_o()
34
34
  if self is SupportedLlmModelNames.GPT_4_O_MINI:
35
35
  return gpt_4_o_mini()
36
- if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_3_5_TURBO:
37
- return azure_openai_gpt_3_5_turbo()
38
36
  if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O:
39
37
  return azure_openai_gpt_4_o()
40
38
  if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O_MINI:
@@ -43,7 +41,9 @@ class SupportedLlmModelNames(str, enum.Enum):
43
41
  return azure_openai_gpt_4()
44
42
  if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
45
43
  return claude_3_5_sonnet()
46
- if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
47
- return bedrock_claude_3_5_sonnet()
44
+ if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET_V_1:
45
+ return bedrock_claude_3_5_sonnet_v_1()
46
+ if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET_V_2:
47
+ return bedrock_claude_3_5_sonnet_v_2()
48
48
  if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET_V_2:
49
49
  return vertex_ai_claude_3_5_sonnet_v_2()
@@ -23,7 +23,7 @@ class UserOrganization(pydantic.BaseModel):
23
23
  id: str = pydantic.Field(description="Unique identifier")
24
24
  created_at: typing.Optional[dt.datetime]
25
25
  updated_at: typing.Optional[dt.datetime]
26
- email: str = pydantic.Field(description="The user's email address.")
26
+ email: typing.Optional[str]
27
27
  user_id: typing.Optional[str]
28
28
  organization_id: str = pydantic.Field(description="The organization's ID.")
29
29
  pending: typing.Optional[bool] = pydantic.Field(
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.22
3
+ Version: 0.1.23
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -13,7 +13,6 @@ Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
16
  Requires-Dist: certifi (>=2024.7.4)
18
17
  Requires-Dist: httpx (>=0.20.0)
19
18
  Requires-Dist: pydantic (>=1.10)
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=dwxeZ2TI6QaKfB791DPGvzcGkg2VBV3hAqAhs_4aMcY,24113
1
+ llama_cloud/__init__.py,sha256=7ghywbsF_IGjBlM0qQ_ndijMnC4GL1KroZgXD8IWNpI,24345
2
2
  llama_cloud/client.py,sha256=JGV9tNaEgSsDy8vYv7RMPXTmXwbfc9gPvuEfQHYqrq4,5651
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -9,7 +9,7 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=lggYCM9fT5FLxGeehXyAkl6WA_7VxyYwLKdWOOSAgQ4,4035
12
+ llama_cloud/resources/__init__.py,sha256=bDSUCwFqDzA8hFiWENlmoIsOcGu2rUZgWnCtVETKwVs,4145
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=tIfM5KMJXRL0AUAm_s_fx5OzgqMUIxksjhCGuviQXQk,3080
15
15
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -54,12 +54,13 @@ llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRY
54
54
  llama_cloud/resources/organizations/client.py,sha256=56d5VcRg_3Lu_MMdfJCR8uoirBoxmj9vaQbHpwY2zWo,56243
55
55
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
56
56
  llama_cloud/resources/parsing/client.py,sha256=U5J2g78CWYgujfLXlLEIoqatFSPnaG1OYNanx1jm_GQ,78177
57
- llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
58
- llama_cloud/resources/pipelines/client.py,sha256=pfHugOoBmmfawJIGw4tegLxCNQS5Pxki0aBvYn3dn0Q,129708
59
- llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
57
+ llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
58
+ llama_cloud/resources/pipelines/client.py,sha256=Vfth-8S_25P5jkaBAMoNA6Ve-5-iQhZLAwQxaUkDkJ0,131104
59
+ llama_cloud/resources/pipelines/types/__init__.py,sha256=C68NQ5QzA0dFXf9oePFFGmV1vn96jcAp-QAznSgoRYQ,1375
60
60
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
61
61
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
62
62
  llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256=KbkyULMv-qeS3qRd31ia6pd5rOdypS0o2UL42NRcA7E,321
63
+ llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py,sha256=hZWXYlTib0af85ECcerC4xD-bUQe8rG3Q6G1jFTMQcI,228
63
64
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
64
65
  llama_cloud/resources/projects/client.py,sha256=PF36iWtSa5amUt3q56YwLypOZjclIXSubCRv9NttpLs,25404
65
66
  llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
@@ -68,7 +69,7 @@ llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwH
68
69
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
69
70
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
70
71
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
71
- llama_cloud/types/__init__.py,sha256=_aT6mWFd4wz3Bw63_HQoGr0PDdbpf0bDhDpMEypGSGU,28507
72
+ llama_cloud/types/__init__.py,sha256=-Xa6LnSne7UtW1wBpw0_MeNuXT58mZV1iohj-Vfus9A,28708
72
73
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
73
74
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
74
75
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -198,7 +199,7 @@ llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-
198
199
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=-aL8fh-w2Xf4uQs_LHzb3q6LL_onLAcVzCR5yMI4qJw,1571
199
200
  llama_cloud/types/llama_parse_parameters.py,sha256=DNhVZm3YQ_3xZiz7WUrwH7E6jqW2fZ7YGFsdfsYalUk,5773
200
201
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
201
- llama_cloud/types/llm_config_result.py,sha256=rruY77YClxo6X7gUFyknmMHoBFI-sWYMsdlSUvMn83I,1127
202
+ llama_cloud/types/llm_config_result.py,sha256=3NRAvq_jVJlkbLRDgBHTGMDli0av7d0GJME3aiXytKs,1106
202
203
  llama_cloud/types/llm_config_result_llm_type.py,sha256=yrijlC2f1egNDx-tCvDVp68pFmGaJZvVE_D1vS2wA34,1032
203
204
  llama_cloud/types/llm_configs_response.py,sha256=TQ9RLkOzdY-8k0l1NNUZNUl83dmKks0gOQvhj-4hRZs,1073
204
205
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
@@ -275,7 +276,8 @@ llama_cloud/types/plan_limits.py,sha256=WAbDbRl8gsQxvhmuVB0YT8mry-0uKg6c66uivypp
275
276
  llama_cloud/types/playground_session.py,sha256=F8u2KZL2YaOrsT-o1n4zbhyPxSsoduc3ZCzQB8AecFA,1858
276
277
  llama_cloud/types/pooling.py,sha256=5Fr6c8rx9SDWwWzEvD78suob2d79ktodUtLUAUHMbP8,651
277
278
  llama_cloud/types/preset_composite_retrieval_params.py,sha256=yEf1pk4Wz5J6SxgB8elklwuyVDCRSZqfWC6x3hJUS4Q,1366
278
- llama_cloud/types/preset_retrieval_params.py,sha256=gEkjXr4202ebLtPL6pYX5hj5NSwANpAdhZbEHCbE2RA,1782
279
+ llama_cloud/types/preset_retrieval_params.py,sha256=tcWMNsT4IGVBqTYAj-yModGZbM4tk7mZIMGvpuBWH8Y,2079
280
+ llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.py,sha256=BOp-oJMIc3KVU89mmKIhVcwwsO0XBRnuErfsPqpUjSs,234
279
281
  llama_cloud/types/presigned_url.py,sha256=-DOQo7XKvUsl-9Gz7fX6VOHdQLzGH2XRau24ASvG92E,1275
280
282
  llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpCas_dcY,1684
281
283
  llama_cloud/types/progress_event_status.py,sha256=yb4RAXwOKU6Bi7iyYy-3lwhF6_mLz0ZFyGjxIdaByoE,893
@@ -303,7 +305,7 @@ llama_cloud/types/report_state.py,sha256=gjexexoT8GaCamGKvfwivKrfRtvdhEtwSLkAt-j
303
305
  llama_cloud/types/report_state_event.py,sha256=_wf-Cl_skJdrag-7h11tz-HIy1jed_GIG3c-ksuAjT4,1270
304
306
  llama_cloud/types/report_update_event.py,sha256=uLRC79U3pvZ5-kY6pOseQyX1MNH-0m80GUtzpjd6mkI,1270
305
307
  llama_cloud/types/retrieval_mode.py,sha256=wV9q3OdHTuyDWbJCGdxq9Hw6U95WFlJcaMq6KWSTzyw,910
306
- llama_cloud/types/retrieve_results.py,sha256=TBUXK5fT3m9oIzktRyDieXNY_1GHWene23flHX9aH_Y,1945
308
+ llama_cloud/types/retrieve_results.py,sha256=ZuGIJrzxIegzsAopgA-X8690HrcYowhL4Xfhxm3m9H4,2053
307
309
  llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZkM,1626
308
310
  llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
309
311
  llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
@@ -315,7 +317,7 @@ llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDP
315
317
  llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
316
318
  llama_cloud/types/struct_parse_conf.py,sha256=WlL8y0IBvdzGsDtFUlEZLzoUODwmOWAJi0viS9unL18,2297
317
319
  llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
318
- llama_cloud/types/supported_llm_model_names.py,sha256=NplDWGpsGoikd3By5sYhqL5IRFWBUnkz-8A3yf5pPcE,2180
320
+ llama_cloud/types/supported_llm_model_names.py,sha256=ZBGVJDkoK4Kj0Z7DaQ7m-zQhymdf7_03_H4qJPrZ0FM,2213
319
321
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
320
322
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
321
323
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
@@ -326,7 +328,7 @@ llama_cloud/types/usage_metric_response.py,sha256=ukvtNZLeLacv-5F0-GQ5wTBZOPUPEj
326
328
  llama_cloud/types/usage_response.py,sha256=o0u15PGNQmOOie4kJFfc4Rw0jKGLckBJdH0NCAfT8_k,1499
327
329
  llama_cloud/types/usage_response_active_alerts_item.py,sha256=5EgU7go_CPe2Bmio12MwDoJaMnaMW0XjFNvVks0BhQY,1255
328
330
  llama_cloud/types/user_job_record.py,sha256=mJHdokJsemXJOwM2l7fsW3X0SlwSNcy7yHbcXZHh3I4,1098
329
- llama_cloud/types/user_organization.py,sha256=Ydel7grMnKiPMWJmSWhCFCm3v_n286Gk36ANtDLNLd4,1770
331
+ llama_cloud/types/user_organization.py,sha256=yKewpOrMcB-CbujGNTjkX6QiWYr5HVsRIFQ-WX8kp2I,1729
330
332
  llama_cloud/types/user_organization_create.py,sha256=Zj57s9xuYVnLW2p8i4j2QORL-G1y7Ab3avXE1baERQY,1189
331
333
  llama_cloud/types/user_organization_delete.py,sha256=IDYLKfFAXfcJfkEpA0ARbaA0JDcEBe7fTLv833DZXHs,1104
332
334
  llama_cloud/types/user_organization_role.py,sha256=vTM5pYG9NJpTQACn8vzSIt01Ul6jEHCVmyR3vV0isPg,1512
@@ -335,7 +337,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
335
337
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
336
338
  llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
337
339
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
338
- llama_cloud-0.1.22.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
339
- llama_cloud-0.1.22.dist-info/METADATA,sha256=8oiNq-d89044LUmFV1wybkJIdF5qcK4poEPPjh6UrtQ,1194
340
- llama_cloud-0.1.22.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
341
- llama_cloud-0.1.22.dist-info/RECORD,,
340
+ llama_cloud-0.1.23.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
341
+ llama_cloud-0.1.23.dist-info/METADATA,sha256=1aLs2Ad-tIIomTC_Nid1Tsedxj6ni0MPLxFa7u3xQEI,1143
342
+ llama_cloud-0.1.23.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
343
+ llama_cloud-0.1.23.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.3
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any