llama-cloud 0.1.29__py3-none-any.whl → 0.1.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (42) hide show
  1. llama_cloud/__init__.py +26 -16
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -2
  4. llama_cloud/resources/beta/client.py +602 -0
  5. llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -2
  6. llama_cloud/resources/parsing/client.py +56 -0
  7. llama_cloud/resources/pipelines/client.py +64 -0
  8. llama_cloud/types/__init__.py +26 -14
  9. llama_cloud/types/{model_configuration.py → agent_data.py} +8 -7
  10. llama_cloud/types/agent_deployment_summary.py +1 -1
  11. llama_cloud/types/{cloud_google_drive_data_source.py → aggregate_group.py} +8 -5
  12. llama_cloud/types/base_plan.py +3 -0
  13. llama_cloud/types/data_source_component.py +0 -2
  14. llama_cloud/types/data_source_create_component.py +0 -2
  15. llama_cloud/types/filter_operation.py +46 -0
  16. llama_cloud/types/filter_operation_eq.py +6 -0
  17. llama_cloud/types/filter_operation_gt.py +6 -0
  18. llama_cloud/types/filter_operation_gte.py +6 -0
  19. llama_cloud/types/filter_operation_includes_item.py +6 -0
  20. llama_cloud/types/filter_operation_lt.py +6 -0
  21. llama_cloud/types/filter_operation_lte.py +6 -0
  22. llama_cloud/types/input_message.py +2 -2
  23. llama_cloud/types/legacy_parse_job_config.py +13 -0
  24. llama_cloud/types/llama_extract_settings.py +3 -0
  25. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
  26. llama_cloud/types/llama_parse_parameters.py +7 -0
  27. llama_cloud/types/{llama_index_core_base_llms_types_message_role.py → message_role.py} +9 -9
  28. llama_cloud/types/{text_content_block.py → paginated_response_agent_data.py} +5 -5
  29. llama_cloud/types/{message.py → paginated_response_aggregate_group.py} +5 -9
  30. llama_cloud/types/parse_job_config.py +7 -0
  31. llama_cloud/types/pipeline_data_source_component.py +0 -2
  32. llama_cloud/types/playground_session.py +2 -2
  33. llama_cloud/types/role.py +0 -1
  34. llama_cloud/types/{app_schema_chat_chat_message.py → src_app_schema_chat_chat_message.py} +3 -3
  35. llama_cloud/types/user_organization_role.py +0 -1
  36. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/METADATA +1 -1
  37. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/RECORD +39 -35
  38. llama_cloud/resources/responses/__init__.py +0 -2
  39. llama_cloud/resources/responses/client.py +0 -137
  40. llama_cloud/types/app_schema_responses_message_role.py +0 -33
  41. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/LICENSE +0 -0
  42. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/WHEEL +0 -0
@@ -34,6 +34,7 @@ class LlamaParseParameters(pydantic.BaseModel):
34
34
  disable_image_extraction: typing.Optional[bool]
35
35
  invalidate_cache: typing.Optional[bool]
36
36
  outlined_table_extraction: typing.Optional[bool]
37
+ merge_tables_across_pages_in_markdown: typing.Optional[bool]
37
38
  output_pdf_of_document: typing.Optional[bool]
38
39
  do_not_cache: typing.Optional[bool]
39
40
  fast_mode: typing.Optional[bool]
@@ -101,6 +102,12 @@ class LlamaParseParameters(pydantic.BaseModel):
101
102
  strict_mode_reconstruction: typing.Optional[bool]
102
103
  strict_mode_buggy_font: typing.Optional[bool]
103
104
  save_images: typing.Optional[bool]
105
+ hide_headers: typing.Optional[bool]
106
+ hide_footers: typing.Optional[bool]
107
+ page_header_prefix: typing.Optional[str]
108
+ page_header_suffix: typing.Optional[str]
109
+ page_footer_prefix: typing.Optional[str]
110
+ page_footer_suffix: typing.Optional[str]
104
111
  ignore_document_elements_for_layout_detection: typing.Optional[bool]
105
112
  output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
106
113
  internal_is_screenshot_job: typing.Optional[bool]
@@ -6,7 +6,7 @@ import typing
6
6
  T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
- class LlamaIndexCoreBaseLlmsTypesMessageRole(str, enum.Enum):
9
+ class MessageRole(str, enum.Enum):
10
10
  """
11
11
  Message role.
12
12
  """
@@ -31,19 +31,19 @@ class LlamaIndexCoreBaseLlmsTypesMessageRole(str, enum.Enum):
31
31
  chatbot: typing.Callable[[], T_Result],
32
32
  model: typing.Callable[[], T_Result],
33
33
  ) -> T_Result:
34
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.SYSTEM:
34
+ if self is MessageRole.SYSTEM:
35
35
  return system()
36
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.DEVELOPER:
36
+ if self is MessageRole.DEVELOPER:
37
37
  return developer()
38
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.USER:
38
+ if self is MessageRole.USER:
39
39
  return user()
40
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.ASSISTANT:
40
+ if self is MessageRole.ASSISTANT:
41
41
  return assistant()
42
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.FUNCTION:
42
+ if self is MessageRole.FUNCTION:
43
43
  return function()
44
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.TOOL:
44
+ if self is MessageRole.TOOL:
45
45
  return tool()
46
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.CHATBOT:
46
+ if self is MessageRole.CHATBOT:
47
47
  return chatbot()
48
- if self is LlamaIndexCoreBaseLlmsTypesMessageRole.MODEL:
48
+ if self is MessageRole.MODEL:
49
49
  return model()
@@ -3,9 +3,8 @@
3
3
  import datetime as dt
4
4
  import typing
5
5
 
6
- import typing_extensions
7
-
8
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .agent_data import AgentData
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,9 +15,10 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class TextContentBlock(pydantic.BaseModel):
20
- type: typing.Optional[typing_extensions.Literal["text"]]
21
- content: typing.Optional[str] = pydantic.Field(description="Content of the text block")
18
+ class PaginatedResponseAgentData(pydantic.BaseModel):
19
+ items: typing.List[AgentData] = pydantic.Field(description="The list of items.")
20
+ next_page_token: typing.Optional[str]
21
+ total_size: typing.Optional[int]
22
22
 
23
23
  def json(self, **kwargs: typing.Any) -> str:
24
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .app_schema_responses_message_role import AppSchemaResponsesMessageRole
8
- from .text_content_block import TextContentBlock
7
+ from .aggregate_group import AggregateGroup
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,13 +15,10 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class Message(pydantic.BaseModel):
20
- role: AppSchemaResponsesMessageRole = pydantic.Field(
21
- description="Role of the message in the conversation (system, user, assistant)"
22
- )
23
- blocks: typing.List[TextContentBlock] = pydantic.Field(
24
- description="Content of the message. Can be an input or output content block."
25
- )
18
+ class PaginatedResponseAggregateGroup(pydantic.BaseModel):
19
+ items: typing.List[AggregateGroup] = pydantic.Field(description="The list of items.")
20
+ next_page_token: typing.Optional[str]
21
+ total_size: typing.Optional[int]
26
22
 
27
23
  def json(self, **kwargs: typing.Any) -> str:
28
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -36,6 +36,7 @@ class ParseJobConfig(pydantic.BaseModel):
36
36
  disable_image_extraction: typing.Optional[bool]
37
37
  invalidate_cache: typing.Optional[bool]
38
38
  outlined_table_extraction: typing.Optional[bool]
39
+ merge_tables_across_pages_in_markdown: typing.Optional[bool]
39
40
  output_pdf_of_document: typing.Optional[bool]
40
41
  do_not_cache: typing.Optional[bool]
41
42
  fast_mode: typing.Optional[bool]
@@ -103,6 +104,12 @@ class ParseJobConfig(pydantic.BaseModel):
103
104
  strict_mode_reconstruction: typing.Optional[bool]
104
105
  strict_mode_buggy_font: typing.Optional[bool]
105
106
  save_images: typing.Optional[bool]
107
+ hide_headers: typing.Optional[bool]
108
+ hide_footers: typing.Optional[bool]
109
+ page_header_prefix: typing.Optional[str]
110
+ page_header_suffix: typing.Optional[str]
111
+ page_footer_prefix: typing.Optional[str]
112
+ page_footer_suffix: typing.Optional[str]
106
113
  ignore_document_elements_for_layout_detection: typing.Optional[bool]
107
114
  output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
108
115
  internal_is_screenshot_job: typing.Optional[bool]
@@ -5,7 +5,6 @@ import typing
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
6
  from .cloud_box_data_source import CloudBoxDataSource
7
7
  from .cloud_confluence_data_source import CloudConfluenceDataSource
8
- from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
8
  from .cloud_jira_data_source import CloudJiraDataSource
10
9
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
10
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,7 +16,6 @@ PipelineDataSourceComponent = typing.Union[
17
16
  typing.Dict[str, typing.Any],
18
17
  CloudS3DataSource,
19
18
  CloudAzStorageBlobDataSource,
20
- CloudGoogleDriveDataSource,
21
19
  CloudOneDriveDataSource,
22
20
  CloudSharepointDataSource,
23
21
  CloudSlackDataSource,
@@ -4,9 +4,9 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .app_schema_chat_chat_message import AppSchemaChatChatMessage
8
7
  from .llm_parameters import LlmParameters
9
8
  from .preset_retrieval_params import PresetRetrievalParams
9
+ from .src_app_schema_chat_chat_message import SrcAppSchemaChatChatMessage
10
10
 
11
11
  try:
12
12
  import pydantic
@@ -33,7 +33,7 @@ class PlaygroundSession(pydantic.BaseModel):
33
33
  retrieval_params: typing.Optional[PresetRetrievalParams] = pydantic.Field(
34
34
  description="Preset retrieval parameters last used in this session."
35
35
  )
36
- chat_messages: typing.Optional[typing.List[AppSchemaChatChatMessage]] = pydantic.Field(
36
+ chat_messages: typing.Optional[typing.List[SrcAppSchemaChatChatMessage]] = pydantic.Field(
37
37
  description="Chat message history for this session."
38
38
  )
39
39
 
llama_cloud/types/role.py CHANGED
@@ -24,7 +24,6 @@ class Role(pydantic.BaseModel):
24
24
  created_at: typing.Optional[dt.datetime]
25
25
  updated_at: typing.Optional[dt.datetime]
26
26
  name: str = pydantic.Field(description="A name for the role.")
27
- organization_id: typing.Optional[str]
28
27
  permissions: typing.List[Permission] = pydantic.Field(description="The actual permissions of the role.")
29
28
 
30
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,8 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llama_index_core_base_llms_types_message_role import LlamaIndexCoreBaseLlmsTypesMessageRole
8
7
  from .message_annotation import MessageAnnotation
8
+ from .message_role import MessageRole
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -16,13 +16,13 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class AppSchemaChatChatMessage(pydantic.BaseModel):
19
+ class SrcAppSchemaChatChatMessage(pydantic.BaseModel):
20
20
  id: str
21
21
  index: int = pydantic.Field(description="The index of the message in the chat.")
22
22
  annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
23
23
  description="Retrieval annotations for the message."
24
24
  )
25
- role: LlamaIndexCoreBaseLlmsTypesMessageRole = pydantic.Field(description="The role of the message.")
25
+ role: MessageRole = pydantic.Field(description="The role of the message.")
26
26
  content: typing.Optional[str]
27
27
  additional_kwargs: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
28
28
  description="Additional arguments passed to the model"
@@ -26,7 +26,6 @@ class UserOrganizationRole(pydantic.BaseModel):
26
26
  user_id: str = pydantic.Field(description="The user's ID.")
27
27
  organization_id: str = pydantic.Field(description="The organization's ID.")
28
28
  project_ids: typing.Optional[typing.List[str]]
29
- role_id: str = pydantic.Field(description="The role's ID.")
30
29
  role: Role = pydantic.Field(description="The role.")
31
30
 
32
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.29
3
+ Version: 0.1.31
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,5 +1,5 @@
1
- llama_cloud/__init__.py,sha256=c0hb7KN6meETe4lv8VSgTlArJ8oMq507m4k33Wy3M1Q,25281
2
- llama_cloud/client.py,sha256=ylV-19129KufjzRDCoH4yARObhdUxc9vLL4kV-7fIck,6132
1
+ llama_cloud/__init__.py,sha256=T-HghZZ4yA4QPgXeEvHQsmp5o8o1K2amrf7SftKYwE4,25511
2
+ llama_cloud/client.py,sha256=VNO5-JE1H0zWJudlDA9GJ2N6qEKQvxN5Q5QgVNTQPSI,5893
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
5
5
  llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWltcsVM,1507
@@ -9,11 +9,11 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=n3hSlo3KQatoFhDLk7Vm_hB_5lzh70T0S2r3cSpDWec,4211
12
+ llama_cloud/resources/__init__.py,sha256=cFMt4FZb8n6SMbRXYzYqIR-PlJbO7C-jX4iBeCym_8E,4179
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=mzA_ezCjugKNmvWCMWEF0Z0k86ErACWov1VtPV1J2tU,3678
15
15
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
- llama_cloud/resources/beta/client.py,sha256=mfqHAPWQEZwZM0LRYkia36EFdGrU2sZ_Y-MM1JU_0Yg,14966
16
+ llama_cloud/resources/beta/client.py,sha256=uJO08z4WF3I_tVyZEu0SiwfeSx3iQaTUPZkoh6Pevs8,39144
17
17
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
18
18
  llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
19
19
  llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
@@ -23,7 +23,7 @@ llama_cloud/resources/data_sinks/types/data_sink_update_component.py,sha256=EWbs
23
23
  llama_cloud/resources/data_sources/__init__.py,sha256=McURkcNBGHXH1hmRDRmZI1dRzJrekCTHZsgv03r2oZI,227
24
24
  llama_cloud/resources/data_sources/client.py,sha256=SZFm8bW5nkaXringdSnmxHqvVjKM7cNNOtqVXjgTKhc,21855
25
25
  llama_cloud/resources/data_sources/types/__init__.py,sha256=Cd5xEECTzXqQSfJALfJPSjudlSLeb3RENeJVi8vwPbM,303
26
- llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=u9sYcs3A4ZDzKjWCH3W9xIXCcLkZkVZxwoFOhEluqJU,1173
26
+ llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=OjMWPLF9hKl1gUdi9d87uW7W3ITnscphTA1_NLc2PoE,1061
27
27
  llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py,sha256=3aFC-p8MSxjhOu2nFtqk0pixj6RqNqcFnbOYngUdZUk,215
28
28
  llama_cloud/resources/embedding_model_configs/__init__.py,sha256=cXDtKKq-gj7yjFjdQ5GrGyPs-T5tRV_0JjUMGlAbdUs,1115
29
29
  llama_cloud/resources/embedding_model_configs/client.py,sha256=2JDvZJtSger9QJ8luPct-2zvwjaJAR8VcKsTZ1wgYTE,17769
@@ -55,9 +55,9 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
55
55
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
56
56
  llama_cloud/resources/organizations/client.py,sha256=CdrdNdB9R-bOsNqZ4Jbm1BzG1RafXMFjuCsrVYf2OrE,56567
57
57
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
58
- llama_cloud/resources/parsing/client.py,sha256=qVYN7dFAXiGTrSyZPoM-aMsHtVDuRjcgR2skjhc1bTY,86271
58
+ llama_cloud/resources/parsing/client.py,sha256=EHrQKjOl_VPPbcbaXi5TSah8HBf7ooHijhMF7IEzBMg,88117
59
59
  llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
60
- llama_cloud/resources/pipelines/client.py,sha256=BcBqzTPu1LUsdimXvuaaKjUu6w5xjbL-ZBfWsO183Vk,132360
60
+ llama_cloud/resources/pipelines/client.py,sha256=VAqAm0oY_nXGkMPqXuzPEHS9kPtpuOE5sxfyqlzXuSI,134738
61
61
  llama_cloud/resources/pipelines/types/__init__.py,sha256=C68NQ5QzA0dFXf9oePFFGmV1vn96jcAp-QAznSgoRYQ,1375
62
62
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
63
63
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
@@ -69,23 +69,21 @@ llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7
69
69
  llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA3vw-liRKBg,46736
70
70
  llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
71
71
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
72
- llama_cloud/resources/responses/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
73
- llama_cloud/resources/responses/client.py,sha256=ard4U9yZcD89pJ_hyYqeRDIfQYaX2WGl36OK7re8q3U,5481
74
72
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
75
73
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
76
- llama_cloud/types/__init__.py,sha256=tXFGxcZWipC_PE9wpPgtXWvtcHbT0VnZCQSP4jNo-H8,30079
74
+ llama_cloud/types/__init__.py,sha256=ZYnUvMdFPye-wlq-XeyWUmhtVeLpi8c0UR0vSemiHP4,30490
77
75
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
78
76
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
79
77
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
78
+ llama_cloud/types/agent_data.py,sha256=Onaoc1QeIn3Il-8r1vgEzqvef92gHclCO7AC4kucEMI,1220
80
79
  llama_cloud/types/agent_deployment_list.py,sha256=7PWm2GHumo8CfqKU8fDRTJVDV4QQh8My1dhvBPO2zaA,1120
81
- llama_cloud/types/agent_deployment_summary.py,sha256=H1BlLyP5mouiDdELpPhiYMSrMnTvxsWx5OEbHj0LDp0,1603
82
- llama_cloud/types/app_schema_chat_chat_message.py,sha256=e81y7h9e-1cBSlGWuktkbq6G6ql_96qoTafXycZk8dw,1680
83
- llama_cloud/types/app_schema_responses_message_role.py,sha256=Wod45VMOCo-z6DWNqCOOJAaiFuZHjHCPtgqKPlVt2fI,972
80
+ llama_cloud/types/agent_deployment_summary.py,sha256=YEZxnNvTGYHz3zV6eGldVKfcy5S_IM-KlcOzDUqTfiU,1605
81
+ llama_cloud/types/aggregate_group.py,sha256=LybxFl_1snA9VgG6f7sogwO7kYAwH_I88pkYc0oMOH0,1164
84
82
  llama_cloud/types/audio_block.py,sha256=9JIGjZ8GU3C7ICv6XdNVN6_gWXyF18TJPaDuM9OUoMU,1071
85
83
  llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
86
84
  llama_cloud/types/azure_open_ai_embedding.py,sha256=MeDqZoPYFN7Nv_imY9cfqDU9SPlEyAY4HcQZ4PF5X3g,2264
87
85
  llama_cloud/types/azure_open_ai_embedding_config.py,sha256=o1zZhzcGElH3SeixFErrm7P_WFHQ6LvrLem_nKJWunw,1170
88
- llama_cloud/types/base_plan.py,sha256=5DZi20EOciTc5okLAxQDqyGylsW-DflTy14dcvQb2fQ,1910
86
+ llama_cloud/types/base_plan.py,sha256=kuRJi-OxFHbKAxoQWe08IG45_i8xL67WeOZFCGWkOHI,2049
89
87
  llama_cloud/types/base_plan_metronome_plan_type.py,sha256=I3g_dVoWWztbmpWpYmseDqQSbwtlLUl2vS01tfgMjEA,499
90
88
  llama_cloud/types/base_plan_name.py,sha256=keHQaw9YV9ghsWnGfnHrLtB4qNz0v4TWX4_MoO3flRM,1926
91
89
  llama_cloud/types/base_plan_plan_frequency.py,sha256=idUZlDaSdMrMZ2lQ1ytBWM4QyduIZu6Gt2eLU0LVqH4,684
@@ -108,7 +106,6 @@ llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb
108
106
  llama_cloud/types/cloud_confluence_data_source.py,sha256=ok8BOv51SC4Ia9kX3DC8LuZjnP8hmdy-vqzOrTZek2A,1720
109
107
  llama_cloud/types/cloud_document.py,sha256=Rg_H8lcz2TzxEAIdU-m5mGpkM7s0j1Cn4JHkXYddmGs,1255
110
108
  llama_cloud/types/cloud_document_create.py,sha256=fQ1gZAtLCpr-a-sPbMez_5fK9JMU3uyp2tNvIzWNG3U,1278
111
- llama_cloud/types/cloud_google_drive_data_source.py,sha256=Gzr9vtw57Hl2hxa9qoWdIO6XO3DfSLvivJbABVQDJDQ,1219
112
109
  llama_cloud/types/cloud_jira_data_source.py,sha256=9R20k8Ne0Bl9X5dgSxpM_IGOFmC70Llz0pJ93rAKRvw,1458
113
110
  llama_cloud/types/cloud_milvus_vector_store.py,sha256=CHFTJSYPZKYPUU-jpB1MG8OwRvnPiT07o7cYCvQMZLA,1235
114
111
  llama_cloud/types/cloud_mongo_db_atlas_vector_search.py,sha256=CQ9euGBd3a72dvpTapRBhakme-fQbY2OaSoe0GDSHDo,1771
@@ -134,9 +131,9 @@ llama_cloud/types/data_sink_component.py,sha256=uvuxLY3MPDpv_bkT0y-tHSZVPRSHCkDB
134
131
  llama_cloud/types/data_sink_create.py,sha256=dAaFPCwZ5oX0Fbf7ij62dzSaYnrhj3EHmnLnYnw2KgI,1360
135
132
  llama_cloud/types/data_sink_create_component.py,sha256=8QfNKSTJV_sQ0nJxlpfh0fBkMTSnQD1DTJR8ZMYaesI,755
136
133
  llama_cloud/types/data_source.py,sha256=4_lTRToLO4u9LYK66VygCPycrZuyct_aiovlxG5H2sE,1768
137
- llama_cloud/types/data_source_component.py,sha256=yfXHoeHaqUMum7fIs3tZB0pOFMhDbAq7oCJtnob0gWY,1077
134
+ llama_cloud/types/data_source_component.py,sha256=QBxAneOFe8crS0z-eFo3gd1siToQ4hYsLdfB4p3ZeVU,974
138
135
  llama_cloud/types/data_source_create.py,sha256=s0bAX_GUwiRdrL-PXS9ROrvq3xpmqbqzdMa6thqL2P4,1581
139
- llama_cloud/types/data_source_create_component.py,sha256=-P4FGv9Xg951n-77_bb-2_CF-33ZXcUkw52LPQNunBY,1083
136
+ llama_cloud/types/data_source_create_component.py,sha256=6dlkvut0gyy6JA_F4--xPHYOCHi14N6oooWOnOEugzE,980
140
137
  llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
141
138
  llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
142
139
  llama_cloud/types/data_source_update_dispatcher_config.py,sha256=Sh6HhXfEV2Z6PYhkYQucs2MxyKVpL3UPV-I4cbf--bA,1242
@@ -187,6 +184,13 @@ llama_cloud/types/file_parse_public.py,sha256=sshZ0BcjHMGpuz4ylSurv0K_3ejfPrUGGy
187
184
  llama_cloud/types/file_permission_info_value.py,sha256=RyQlNbhvIKS87Ywu7XUaw5jDToZX64M9Wqzu1U_q2Us,197
188
185
  llama_cloud/types/file_resource_info_value.py,sha256=g6T6ELeLK9jgcvX6r-EuAl_4JkwnyqdS0RRoabMReSU,195
189
186
  llama_cloud/types/filter_condition.py,sha256=YEc-NaZbMha4oZVSKerZ6-gNYriNOZmTHTRMKX-9Ju0,678
187
+ llama_cloud/types/filter_operation.py,sha256=lzyF_LQ-bT_wubU2bSbV6q2oncCE3mypz3D6qkAR86U,1663
188
+ llama_cloud/types/filter_operation_eq.py,sha256=7UQkjycQvUFBvd1KRWfNacXAEgp2eGG6XNej0EikP1M,165
189
+ llama_cloud/types/filter_operation_gt.py,sha256=ueeaTBhCGM0xUWLjdFei55ecbtbR3jFuiAtXrinFNDk,165
190
+ llama_cloud/types/filter_operation_gte.py,sha256=A_8I_-EpBNqcX_KbwMdhXI0Kno3WCwZnPofSRJxECpU,166
191
+ llama_cloud/types/filter_operation_includes_item.py,sha256=kwI0NjIZVUfaNU3BBue-AAEkPl_42_GjE_CR0OwZV5Y,175
192
+ llama_cloud/types/filter_operation_lt.py,sha256=Njv9OnuI3tzo88EAMhsVN8BvuzR1164GQP4SggbZe1U,165
193
+ llama_cloud/types/filter_operation_lte.py,sha256=5Evci2M4XfkkWMlY746t52OiTYiO9SaIJ72QDPu2G7U,166
190
194
  llama_cloud/types/filter_operator.py,sha256=tY_DWFVOoLrqDc-soJcSFvUL-MsltK6iLSK7IKK-TPs,2439
191
195
  llama_cloud/types/free_credits_usage.py,sha256=TPktesYpM5gVeBXPbRFun19XaPJo-dIu0Xbrg-iX8qE,1052
192
196
  llama_cloud/types/gemini_embedding.py,sha256=n9vuxFbXt_VNuaZvp7BlkFWmGMgehpJz_ICacIafdYw,1418
@@ -197,20 +201,19 @@ llama_cloud/types/hugging_face_inference_api_embedding_config.py,sha256=EFHhuPCx
197
201
  llama_cloud/types/hugging_face_inference_api_embedding_token.py,sha256=A7-_YryBcsP4G5uRyJ9acao3XwX5-YC3NRndTeDAPj4,144
198
202
  llama_cloud/types/image_block.py,sha256=Bccrsm1-B2hUzObP7Oy1H7IVnurixfTpL03i-yqfZp0,1112
199
203
  llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_WyPcYQKP_DMTqaFPY,1259
200
- llama_cloud/types/input_message.py,sha256=H7XMpGjkk7f9Fgz4YuuD9OBpNDR68lnP91LxCP1R-Vw,1433
204
+ llama_cloud/types/input_message.py,sha256=Ym6-tX6CMWKuHfxRtyM2y16kqSS3BzHged9rFRFkX0g,1346
201
205
  llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
202
206
  llama_cloud/types/job_names.py,sha256=WacongwoJygg_gCyYjPsOVv3cmVtRaX633JNgFxy-d8,3915
203
207
  llama_cloud/types/job_record.py,sha256=Z6sF9AruZJo-kTRgNufAWS3WK1yaEqop6kox1GpBYy4,2219
204
208
  llama_cloud/types/job_record_parameters.py,sha256=Oqxp5y0owPfjLc_NR7AYE8P3zM2PJo36N9olbyNl7AA,3425
205
209
  llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
206
210
  llama_cloud/types/l_lama_parse_transform_config.py,sha256=YQRJZvKh1Ee2FUyW_N0nqYJoW599qBgH3JCH9SH6YLo,1249
207
- llama_cloud/types/legacy_parse_job_config.py,sha256=9NdRkGkUhDkJBMDBvBDmhq_Mkf6bSROrtECVpttlD8k,11878
211
+ llama_cloud/types/legacy_parse_job_config.py,sha256=zZJFYnquo51NbEXhw-yhpRjIuaJNgg-T_fAI2J7-hrM,12660
208
212
  llama_cloud/types/license_info_response.py,sha256=fE9vcWO8k92SBqb_wOyBu_16C61s72utA-SifEi9iBc,1192
209
- llama_cloud/types/llama_extract_settings.py,sha256=bHtF5AD0r896-248e7WKthcbbvrAUdptZrENP2Ed4LM,2388
210
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=tF54vcCwjArHWozzC81bCZfI4gJBmhnx6s592VoQ5UM,1452
213
+ llama_cloud/types/llama_extract_settings.py,sha256=Y60XxsxVHUtX-ZjC0tyNzsaDIj_ojxYC1iy2w4vti54,2532
214
+ llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
211
215
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=-aL8fh-w2Xf4uQs_LHzb3q6LL_onLAcVzCR5yMI4qJw,1571
212
- llama_cloud/types/llama_index_core_base_llms_types_message_role.py,sha256=i8G2QGRrEUmb1P9BrKW3frfTOQ9RlJvMU0FMCRNpE5c,1602
213
- llama_cloud/types/llama_parse_parameters.py,sha256=-U3T_hM--Le35YU6b4lwv_1QG5agy8-Ib9N1DuSnQYs,6000
216
+ llama_cloud/types/llama_parse_parameters.py,sha256=pgWWbeaoC8p01_c0bC--ksHjJt-_A7QUhQdCW4MAVIQ,6325
214
217
  llama_cloud/types/llama_parse_parameters_priority.py,sha256=EFRudtaID_s8rLKlfW8O8O9TDbpZdniIidK-xchhfRI,830
215
218
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
216
219
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
@@ -218,13 +221,12 @@ llama_cloud/types/llm_parameters.py,sha256=RTKYt09lm9a1MlnBfYuTP2x_Ww4byUNNc1TqI
218
221
  llama_cloud/types/load_files_job_config.py,sha256=R5sFgFmV__0mqLUuD7dkFoBJHG2ZLw5px9zRapvYcpE,1069
219
222
  llama_cloud/types/managed_ingestion_status.py,sha256=3KVlcurpEBOPAesBUS5pSYLoQVIyZUlr90Mmv-uALHE,1290
220
223
  llama_cloud/types/managed_ingestion_status_response.py,sha256=rdNpjNbQswF-6JG1e-EU374TP6Pjlxl0p7HJyNmuxTI,1373
221
- llama_cloud/types/message.py,sha256=RnahpUQR7s_QOu7pOdS7GNkZe8rJaPLC-wTJbzhSQV8,1373
222
224
  llama_cloud/types/message_annotation.py,sha256=n4F9w4LxwmGvgXDk6E8YPTMu_g0yEjZhZ_eNFXdS_bc,1017
225
+ llama_cloud/types/message_role.py,sha256=9MpXT9drR33TyT1-NiqB3uGbuxvWwtoOdSmKQE9HmJI,1359
223
226
  llama_cloud/types/metadata_filter.py,sha256=LX2fGsUb4wvF5bj9iWO6IPQGi3i0L2Lb4cE6igeeX9Y,1438
224
227
  llama_cloud/types/metadata_filter_value.py,sha256=ij721gXNI7zbgsuDl9-AqBcXg2WDuVZhYS5F5YqekEs,188
225
228
  llama_cloud/types/metadata_filters.py,sha256=uSf6sB4oQu6WzMPNFG6Tc4euqEiYcj_X14Y5JWt9xVE,1315
226
229
  llama_cloud/types/metadata_filters_filters_item.py,sha256=e8KhD2q6Qc2_aK6r5CvyxC0oWVYO4F4vBIcB9eMEPPM,246
227
- llama_cloud/types/model_configuration.py,sha256=JD_KSml2EB1EpwuuJUdwZeN4_aO7mTd76tQ16zK2vuU,1370
228
230
  llama_cloud/types/node_relationship.py,sha256=2e2PqWm0LOTiImvtsyiuaAPNIl0BItjSrQZTJv65GRA,1209
229
231
  llama_cloud/types/none_chunking_config.py,sha256=D062t314Vp-s4n9h8wNgsYfElI4PonPKmihvjEmaqdA,952
230
232
  llama_cloud/types/none_segmentation_config.py,sha256=j3jUA6E8uFtwDMEu4TFG3Q4ZGCGiuUfUW9AMO1NNqXU,956
@@ -243,7 +245,9 @@ llama_cloud/types/paginated_jobs_history_with_metrics.py,sha256=Bxy6N0x0FARJhgwN
243
245
  llama_cloud/types/paginated_list_cloud_documents_response.py,sha256=MsjS0SWlT0syELDck4x2sxxR3_NC1e6QTdepgVmK9aY,1341
244
246
  llama_cloud/types/paginated_list_pipeline_files_response.py,sha256=2TKR2oHSQRyLMqWz1qQBSIvz-ZJb8U_94367lwOJ2S4,1317
245
247
  llama_cloud/types/paginated_report_response.py,sha256=o79QhQi9r0HZZrhvRlA6WGjxtyPuxN0xONhwXSwxtcs,1104
246
- llama_cloud/types/parse_job_config.py,sha256=sTgkLHhV-FMcf6pfhFpOEHOUQQqw-g8PDNcekDQ86Fo,6463
248
+ llama_cloud/types/paginated_response_agent_data.py,sha256=u6Y-Cq9qjGF5tskMOQChUNqyI91Tk-uQ6vQdi69cs80,1159
249
+ llama_cloud/types/paginated_response_aggregate_group.py,sha256=1ajZLZJLU6-GuQ_PPsEVRFZ6bm9he807F_F_DmB2HlQ,1179
250
+ llama_cloud/types/parse_job_config.py,sha256=gLRQOaPgTfQuaNzriYtjDPucSFXt1AWyG19tGfzoy5M,6788
247
251
  llama_cloud/types/parse_job_config_priority.py,sha256=__-gVv1GzktVCYZVyl6zeDt0pAZwYl-mxM0xkIHPEro,800
248
252
  llama_cloud/types/parse_plan_level.py,sha256=GBkDS19qfHseBa17EXfuTPNT4GNv5alyPrWEvWji3GY,528
249
253
  llama_cloud/types/parser_languages.py,sha256=Ps3IlaSt6tyxEI657N3-vZL96r2puk8wsf31cWnO-SI,10840
@@ -265,7 +269,7 @@ llama_cloud/types/pipeline_create.py,sha256=PKchM5cxkidXVFv2qON0uVh5lv8aqsy5OrZv
265
269
  llama_cloud/types/pipeline_create_embedding_config.py,sha256=PQqmVBFUyZXYKKBmVQF2zPsGp1L6rje6g3RtXEcdfc8,2811
266
270
  llama_cloud/types/pipeline_create_transform_config.py,sha256=HP6tzLsw_pomK1Ye2PYCS_XDZK_TMgg22mz17_zYKFg,303
267
271
  llama_cloud/types/pipeline_data_source.py,sha256=g8coq6ohp09TtqzvB3_A8Nzery3J5knIfxGWzUtozmg,2381
268
- llama_cloud/types/pipeline_data_source_component.py,sha256=c_R2aBl7XXsfJ_ZuK_-PXzzL2nDI4jrbJ0BStlzp87Y,1085
272
+ llama_cloud/types/pipeline_data_source_component.py,sha256=pcAIb6xuRJajDVBF_a4_2USPLtZ8ve-WQvSdKKQu50Q,982
269
273
  llama_cloud/types/pipeline_data_source_create.py,sha256=wMsymqB-YGyf3jdQr-N5ODVG6v0w68EMxGBNdQXeJe0,1178
270
274
  llama_cloud/types/pipeline_data_source_custom_metadata_value.py,sha256=8n3r60sxMx4_udW0yzJZxzyWeK6L3cc2-jLGZFW4EDs,217
271
275
  llama_cloud/types/pipeline_data_source_status.py,sha256=BD4xoftwp9lWC8EjJTnf3boIG_AyzjLPuP4qJxGhmcc,1039
@@ -287,7 +291,7 @@ llama_cloud/types/pipeline_status.py,sha256=aC340nhfuPSrFVZOH_DhgYHWe985J3WNHrwv
287
291
  llama_cloud/types/pipeline_transform_config.py,sha256=zMr-ePLKGjbaScxbAHaSwYBL7rrNibVlnn0cbgElDfU,824
288
292
  llama_cloud/types/pipeline_type.py,sha256=tTqrhxHP5xd7W2dQGD0e5FOv886nwJssyaVlXpWrtRo,551
289
293
  llama_cloud/types/plan_limits.py,sha256=WAbDbRl8gsQxvhmuVB0YT8mry-0uKg6c66uivyppdQU,2056
290
- llama_cloud/types/playground_session.py,sha256=F8u2KZL2YaOrsT-o1n4zbhyPxSsoduc3ZCzQB8AecFA,1858
294
+ llama_cloud/types/playground_session.py,sha256=BZZk9F_FVuMPcCE5dVNACPqHKIvyWGSkbRrrQOweaaw,1868
291
295
  llama_cloud/types/pooling.py,sha256=5Fr6c8rx9SDWwWzEvD78suob2d79ktodUtLUAUHMbP8,651
292
296
  llama_cloud/types/preset_composite_retrieval_params.py,sha256=yEf1pk4Wz5J6SxgB8elklwuyVDCRSZqfWC6x3hJUS4Q,1366
293
297
  llama_cloud/types/preset_retrieval_params.py,sha256=TcyljefpspJSveMR9L5DQHlqW4jZeexBsXus_LkHkJA,2365
@@ -323,17 +327,17 @@ llama_cloud/types/retrieve_results.py,sha256=rHArmu05K3NvIQepHX5nsVOfcMsZj3MaIcP
323
327
  llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZkM,1626
324
328
  llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
325
329
  llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
326
- llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,1384
330
+ llama_cloud/types/role.py,sha256=4pbyLVNPleDd624cDcOhu9y1WvqC0J0gmNirTOW97iA,1342
327
331
  llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
328
332
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
329
333
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
334
+ llama_cloud/types/src_app_schema_chat_chat_message.py,sha256=ddMQXZybeExPVFMNe8FWghyXXWktsujpZ_0Xmou3Zz8,1596
330
335
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
331
336
  llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
332
337
  llama_cloud/types/struct_parse_conf.py,sha256=WlL8y0IBvdzGsDtFUlEZLzoUODwmOWAJi0viS9unL18,2297
333
338
  llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
334
339
  llama_cloud/types/supported_llm_model_names.py,sha256=PXL0gA1lc0GJNzZHnjOscoxHpPW787A8Adh-2egAKo8,2512
335
340
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
336
- llama_cloud/types/text_content_block.py,sha256=MKMBMhJS7Tr-Vmr4MhhDgH8pO6r-_g_8bjWYT8LxitA,1130
337
341
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
338
342
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
339
343
  llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586_HsvDoQbLfuQ,1229
@@ -346,7 +350,7 @@ llama_cloud/types/user_job_record.py,sha256=mJHdokJsemXJOwM2l7fsW3X0SlwSNcy7yHbc
346
350
  llama_cloud/types/user_organization.py,sha256=yKewpOrMcB-CbujGNTjkX6QiWYr5HVsRIFQ-WX8kp2I,1729
347
351
  llama_cloud/types/user_organization_create.py,sha256=Zj57s9xuYVnLW2p8i4j2QORL-G1y7Ab3avXE1baERQY,1189
348
352
  llama_cloud/types/user_organization_delete.py,sha256=bEfgQMdTd6oAMZXtvSm5BhZahG1wAVDBXZ8e7V9UN7w,1159
349
- llama_cloud/types/user_organization_role.py,sha256=vTM5pYG9NJpTQACn8vzSIt01Ul6jEHCVmyR3vV0isPg,1512
353
+ llama_cloud/types/user_organization_role.py,sha256=Tcfu9QISF5nRpo9jvboHzX-Yfg6b676UNfdjzjUIgAs,1448
350
354
  llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
351
355
  llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
352
356
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
@@ -354,7 +358,7 @@ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2B
354
358
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
355
359
  llama_cloud/types/webhook_configuration.py,sha256=_Xm15whrWoKNBuCoO5y_NunA-ByhCAYK87LnC4W-Pzg,1350
356
360
  llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=LTfOwphnoYUQYwsHGTlCxoVU_PseIRAbmQJRBdyXnbg,1519
357
- llama_cloud-0.1.29.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
358
- llama_cloud-0.1.29.dist-info/METADATA,sha256=6yOwKuEn6gMPGcWviE_L-LI5gY4g5n4bAB0PUZQINLc,1194
359
- llama_cloud-0.1.29.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
360
- llama_cloud-0.1.29.dist-info/RECORD,,
361
+ llama_cloud-0.1.31.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
362
+ llama_cloud-0.1.31.dist-info/METADATA,sha256=8CeVAzwOJTcdsTWT_WakSrnOErE56P3ZK70PxR5g5b8,1194
363
+ llama_cloud-0.1.31.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
364
+ llama_cloud-0.1.31.dist-info/RECORD,,
@@ -1,2 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
@@ -1,137 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
- import urllib.parse
5
- from json.decoder import JSONDecodeError
6
-
7
- from ...core.api_error import ApiError
8
- from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
- from ...core.jsonable_encoder import jsonable_encoder
10
- from ...core.remove_none_from_dict import remove_none_from_dict
11
- from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
- from ...types.http_validation_error import HttpValidationError
13
- from ...types.message import Message
14
- from ...types.model_configuration import ModelConfiguration
15
-
16
- try:
17
- import pydantic
18
- if pydantic.__version__.startswith("1."):
19
- raise ImportError
20
- import pydantic.v1 as pydantic # type: ignore
21
- except ImportError:
22
- import pydantic # type: ignore
23
-
24
- # this is used as the default value for optional parameters
25
- OMIT = typing.cast(typing.Any, ...)
26
-
27
-
28
- class ResponsesClient:
29
- def __init__(self, *, client_wrapper: SyncClientWrapper):
30
- self._client_wrapper = client_wrapper
31
-
32
- def generate_response(
33
- self,
34
- *,
35
- project_id: typing.Optional[str] = None,
36
- organization_id: typing.Optional[str] = None,
37
- messages: typing.List[Message],
38
- model_configuration: ModelConfiguration,
39
- ) -> typing.Any:
40
- """
41
- EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
42
-
43
- Parameters:
44
- - project_id: typing.Optional[str].
45
-
46
- - organization_id: typing.Optional[str].
47
-
48
- - messages: typing.List[Message]. List of messages in the conversation
49
-
50
- - model_configuration: ModelConfiguration. Configuration for the model to use in the response
51
- ---
52
- from llama_cloud import ModelConfiguration, SupportedLlmModelNames
53
- from llama_cloud.client import LlamaCloud
54
-
55
- client = LlamaCloud(
56
- token="YOUR_TOKEN",
57
- )
58
- client.responses.generate_response(
59
- messages=[],
60
- model_configuration=ModelConfiguration(
61
- model_name=SupportedLlmModelNames.GPT_4_O,
62
- ),
63
- )
64
- """
65
- _response = self._client_wrapper.httpx_client.request(
66
- "POST",
67
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
68
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
69
- json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
70
- headers=self._client_wrapper.get_headers(),
71
- timeout=60,
72
- )
73
- if 200 <= _response.status_code < 300:
74
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
75
- if _response.status_code == 422:
76
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
77
- try:
78
- _response_json = _response.json()
79
- except JSONDecodeError:
80
- raise ApiError(status_code=_response.status_code, body=_response.text)
81
- raise ApiError(status_code=_response.status_code, body=_response_json)
82
-
83
-
84
- class AsyncResponsesClient:
85
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
86
- self._client_wrapper = client_wrapper
87
-
88
- async def generate_response(
89
- self,
90
- *,
91
- project_id: typing.Optional[str] = None,
92
- organization_id: typing.Optional[str] = None,
93
- messages: typing.List[Message],
94
- model_configuration: ModelConfiguration,
95
- ) -> typing.Any:
96
- """
97
- EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
98
-
99
- Parameters:
100
- - project_id: typing.Optional[str].
101
-
102
- - organization_id: typing.Optional[str].
103
-
104
- - messages: typing.List[Message]. List of messages in the conversation
105
-
106
- - model_configuration: ModelConfiguration. Configuration for the model to use in the response
107
- ---
108
- from llama_cloud import ModelConfiguration, SupportedLlmModelNames
109
- from llama_cloud.client import AsyncLlamaCloud
110
-
111
- client = AsyncLlamaCloud(
112
- token="YOUR_TOKEN",
113
- )
114
- await client.responses.generate_response(
115
- messages=[],
116
- model_configuration=ModelConfiguration(
117
- model_name=SupportedLlmModelNames.GPT_4_O,
118
- ),
119
- )
120
- """
121
- _response = await self._client_wrapper.httpx_client.request(
122
- "POST",
123
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
124
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
125
- json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
126
- headers=self._client_wrapper.get_headers(),
127
- timeout=60,
128
- )
129
- if 200 <= _response.status_code < 300:
130
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
131
- if _response.status_code == 422:
132
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
133
- try:
134
- _response_json = _response.json()
135
- except JSONDecodeError:
136
- raise ApiError(status_code=_response.status_code, body=_response.text)
137
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class AppSchemaResponsesMessageRole(str, enum.Enum):
10
- """
11
- Enum representing the role of a message in a conversation.
12
-
13
- - system: The system message that sets the context or instructions.
14
- - user: The user's message in the conversation.
15
- - assistant: The AI assistant's response in the conversation.
16
- """
17
-
18
- SYSTEM = "system"
19
- USER = "user"
20
- ASSISTANT = "assistant"
21
-
22
- def visit(
23
- self,
24
- system: typing.Callable[[], T_Result],
25
- user: typing.Callable[[], T_Result],
26
- assistant: typing.Callable[[], T_Result],
27
- ) -> T_Result:
28
- if self is AppSchemaResponsesMessageRole.SYSTEM:
29
- return system()
30
- if self is AppSchemaResponsesMessageRole.USER:
31
- return user()
32
- if self is AppSchemaResponsesMessageRole.ASSISTANT:
33
- return assistant()