llama-cloud 0.1.19__py3-none-any.whl → 0.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (52) hide show
  1. llama_cloud/__init__.py +166 -26
  2. llama_cloud/resources/__init__.py +41 -2
  3. llama_cloud/resources/data_sinks/__init__.py +18 -2
  4. llama_cloud/resources/data_sinks/client.py +2 -94
  5. llama_cloud/resources/data_sinks/types/__init__.py +18 -2
  6. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +65 -7
  7. llama_cloud/resources/data_sources/__init__.py +30 -2
  8. llama_cloud/resources/data_sources/types/__init__.py +28 -1
  9. llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -23
  10. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +122 -0
  11. llama_cloud/resources/files/client.py +18 -4
  12. llama_cloud/resources/parsing/client.py +8 -0
  13. llama_cloud/resources/pipelines/client.py +11 -11
  14. llama_cloud/types/__init__.py +146 -28
  15. llama_cloud/types/cloud_jira_data_source.py +0 -4
  16. llama_cloud/types/data_sink_component.py +65 -7
  17. llama_cloud/types/data_sink_create_component.py +65 -7
  18. llama_cloud/types/data_source_component.py +2 -23
  19. llama_cloud/types/data_source_component_one.py +122 -0
  20. llama_cloud/types/data_source_create_component.py +2 -23
  21. llama_cloud/types/data_source_create_component_one.py +122 -0
  22. llama_cloud/types/{base_prompt_template.py → data_source_update_dispatcher_config.py} +9 -7
  23. llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
  24. llama_cloud/types/document_ingestion_job_params.py +43 -0
  25. llama_cloud/types/job_record.py +2 -2
  26. llama_cloud/types/job_record_parameters.py +111 -0
  27. llama_cloud/types/{page_splitter_node_parser.py → l_lama_parse_transform_config.py} +5 -10
  28. llama_cloud/types/legacy_parse_job_config.py +189 -0
  29. llama_cloud/types/llama_parse_parameters.py +1 -0
  30. llama_cloud/types/load_files_job_config.py +35 -0
  31. llama_cloud/types/parse_job_config.py +134 -0
  32. llama_cloud/types/pipeline.py +2 -4
  33. llama_cloud/types/pipeline_create.py +2 -2
  34. llama_cloud/types/pipeline_data_source_component.py +2 -23
  35. llama_cloud/types/pipeline_data_source_component_one.py +122 -0
  36. llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
  37. llama_cloud/types/{configured_transformation_item.py → pipeline_file_updater_config.py} +13 -12
  38. llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
  39. llama_cloud/types/pipeline_metadata_config.py +36 -0
  40. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.20.dist-info}/METADATA +4 -2
  41. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.20.dist-info}/RECORD +43 -40
  42. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.20.dist-info}/WHEEL +1 -1
  43. llama_cloud/types/character_splitter.py +0 -46
  44. llama_cloud/types/code_splitter.py +0 -50
  45. llama_cloud/types/configured_transformation_item_component.py +0 -22
  46. llama_cloud/types/llm.py +0 -60
  47. llama_cloud/types/markdown_element_node_parser.py +0 -51
  48. llama_cloud/types/markdown_node_parser.py +0 -52
  49. llama_cloud/types/pydantic_program_mode.py +0 -41
  50. llama_cloud/types/sentence_splitter.py +0 -50
  51. llama_cloud/types/token_text_splitter.py +0 -50
  52. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.20.dist-info}/LICENSE +0 -0
@@ -1,41 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class PydanticProgramMode(str, enum.Enum):
10
- """
11
- Pydantic program mode.
12
- """
13
-
14
- DEFAULT = "default"
15
- OPENAI = "openai"
16
- LLM = "llm"
17
- FUNCTION = "function"
18
- GUIDANCE = "guidance"
19
- LM_FORMAT_ENFORCER = "lm-format-enforcer"
20
-
21
- def visit(
22
- self,
23
- default: typing.Callable[[], T_Result],
24
- openai: typing.Callable[[], T_Result],
25
- llm: typing.Callable[[], T_Result],
26
- function: typing.Callable[[], T_Result],
27
- guidance: typing.Callable[[], T_Result],
28
- lm_format_enforcer: typing.Callable[[], T_Result],
29
- ) -> T_Result:
30
- if self is PydanticProgramMode.DEFAULT:
31
- return default()
32
- if self is PydanticProgramMode.OPENAI:
33
- return openai()
34
- if self is PydanticProgramMode.LLM:
35
- return llm()
36
- if self is PydanticProgramMode.FUNCTION:
37
- return function()
38
- if self is PydanticProgramMode.GUIDANCE:
39
- return guidance()
40
- if self is PydanticProgramMode.LM_FORMAT_ENFORCER:
41
- return lm_format_enforcer()
@@ -1,50 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class SentenceSplitter(pydantic.BaseModel):
18
- """
19
- Parse text with a preference for complete sentences.
20
-
21
- In general, this class tries to keep sentences and paragraphs together. Therefore
22
- compared to the original TokenTextSplitter, there are less likely to be
23
- hanging sentences or parts of sentences at the end of the node chunk.
24
- """
25
-
26
- include_metadata: typing.Optional[bool] = pydantic.Field(
27
- description="Whether or not to consider metadata when splitting."
28
- )
29
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
30
- callback_manager: typing.Optional[typing.Any]
31
- id_func: typing.Optional[str]
32
- chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
33
- chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
34
- separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
35
- paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
36
- secondary_chunking_regex: typing.Optional[str]
37
- class_name: typing.Optional[str]
38
-
39
- def json(self, **kwargs: typing.Any) -> str:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().json(**kwargs_with_defaults)
42
-
43
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
- return super().dict(**kwargs_with_defaults)
46
-
47
- class Config:
48
- frozen = True
49
- smart_union = True
50
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,50 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class TokenTextSplitter(pydantic.BaseModel):
18
- """
19
- Implementation of splitting text that looks at word tokens.
20
- """
21
-
22
- include_metadata: typing.Optional[bool] = pydantic.Field(
23
- description="Whether or not to consider metadata when splitting."
24
- )
25
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
28
- chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
29
- chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
30
- separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
31
- backup_separators: typing.Optional[typing.List[typing.Any]] = pydantic.Field(
32
- description="Additional separators for splitting."
33
- )
34
- keep_whitespaces: typing.Optional[bool] = pydantic.Field(
35
- description="Whether to keep leading/trailing whitespaces in the chunk."
36
- )
37
- class_name: typing.Optional[str]
38
-
39
- def json(self, **kwargs: typing.Any) -> str:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().json(**kwargs_with_defaults)
42
-
43
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
- return super().dict(**kwargs_with_defaults)
46
-
47
- class Config:
48
- frozen = True
49
- smart_union = True
50
- json_encoders = {dt.datetime: serialize_datetime}