llama-cloud 0.0.16__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (133) hide show
  1. llama_cloud/__init__.py +8 -30
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +2 -10
  4. llama_cloud/resources/data_sinks/__init__.py +2 -2
  5. llama_cloud/resources/data_sinks/client.py +2 -2
  6. llama_cloud/resources/data_sinks/types/__init__.py +1 -2
  7. llama_cloud/resources/data_sinks/types/data_sink_update_component.py +19 -2
  8. llama_cloud/resources/data_sources/__init__.py +2 -2
  9. llama_cloud/resources/data_sources/client.py +6 -6
  10. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  11. llama_cloud/resources/data_sources/types/data_source_update_component.py +23 -2
  12. llama_cloud/resources/extraction/client.py +14 -14
  13. llama_cloud/resources/files/client.py +10 -10
  14. llama_cloud/resources/organizations/client.py +2 -2
  15. llama_cloud/resources/parsing/client.py +68 -60
  16. llama_cloud/resources/pipelines/__init__.py +0 -4
  17. llama_cloud/resources/pipelines/client.py +50 -340
  18. llama_cloud/resources/pipelines/types/__init__.py +1 -7
  19. llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +15 -15
  20. llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +1 -24
  21. llama_cloud/types/__init__.py +9 -23
  22. llama_cloud/types/azure_open_ai_embedding.py +7 -34
  23. llama_cloud/types/base_prompt_template.py +3 -14
  24. llama_cloud/types/bedrock_embedding.py +7 -17
  25. llama_cloud/types/box_auth_mechanism.py +0 -4
  26. llama_cloud/types/character_splitter.py +3 -4
  27. llama_cloud/types/chat_data.py +0 -5
  28. llama_cloud/types/chat_message.py +1 -6
  29. llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -18
  30. llama_cloud/types/cloud_box_data_source.py +6 -16
  31. llama_cloud/types/cloud_chroma_vector_store.py +1 -5
  32. llama_cloud/types/cloud_confluence_data_source.py +6 -10
  33. llama_cloud/types/cloud_document.py +1 -3
  34. llama_cloud/types/cloud_document_create.py +1 -3
  35. llama_cloud/types/{user.py → cloud_google_drive_data_source.py} +6 -6
  36. llama_cloud/types/cloud_jira_data_source.py +4 -6
  37. llama_cloud/types/cloud_notion_page_data_source.py +2 -6
  38. llama_cloud/types/cloud_one_drive_data_source.py +2 -6
  39. llama_cloud/types/cloud_postgres_vector_store.py +0 -4
  40. llama_cloud/types/cloud_s_3_data_source.py +4 -12
  41. llama_cloud/types/cloud_sharepoint_data_source.py +5 -9
  42. llama_cloud/types/cloud_slack_data_source.py +6 -10
  43. llama_cloud/types/cloud_weaviate_vector_store.py +0 -4
  44. llama_cloud/types/code_splitter.py +2 -1
  45. llama_cloud/types/cohere_embedding.py +3 -7
  46. llama_cloud/types/configurable_data_sink_names.py +0 -4
  47. llama_cloud/types/configurable_data_source_names.py +4 -4
  48. llama_cloud/types/configurable_transformation_names.py +0 -4
  49. llama_cloud/types/configured_transformation_item_component.py +29 -2
  50. llama_cloud/types/data_sink.py +2 -2
  51. llama_cloud/types/data_sink_component.py +19 -2
  52. llama_cloud/types/data_sink_create_component.py +19 -2
  53. llama_cloud/types/data_source.py +3 -5
  54. llama_cloud/types/data_source_component.py +23 -2
  55. llama_cloud/types/data_source_create.py +1 -3
  56. llama_cloud/types/data_source_create_component.py +23 -2
  57. llama_cloud/types/eval_dataset.py +2 -2
  58. llama_cloud/types/eval_dataset_job_record.py +7 -13
  59. llama_cloud/types/eval_execution_params_override.py +2 -6
  60. llama_cloud/types/eval_metric.py +17 -0
  61. llama_cloud/types/eval_question.py +2 -6
  62. llama_cloud/types/extend_vertex_text_embedding.py +6 -18
  63. llama_cloud/types/extraction_result.py +5 -3
  64. llama_cloud/types/extraction_schema.py +3 -5
  65. llama_cloud/types/file.py +7 -11
  66. llama_cloud/types/gemini_embedding.py +5 -9
  67. llama_cloud/types/hugging_face_inference_api_embedding.py +10 -26
  68. llama_cloud/types/input_message.py +2 -4
  69. llama_cloud/types/llama_parse_parameters.py +1 -0
  70. llama_cloud/types/llama_parse_supported_file_extensions.py +0 -4
  71. llama_cloud/types/llm.py +9 -8
  72. llama_cloud/types/llm_parameters.py +2 -7
  73. llama_cloud/types/local_eval.py +8 -10
  74. llama_cloud/types/local_eval_results.py +1 -1
  75. llama_cloud/types/managed_ingestion_status_response.py +3 -5
  76. llama_cloud/types/markdown_element_node_parser.py +4 -5
  77. llama_cloud/types/markdown_node_parser.py +2 -1
  78. llama_cloud/types/message_annotation.py +1 -6
  79. llama_cloud/types/metric_result.py +3 -3
  80. llama_cloud/types/node_parser.py +2 -1
  81. llama_cloud/types/node_relationship.py +44 -0
  82. llama_cloud/types/object_type.py +0 -4
  83. llama_cloud/types/open_ai_embedding.py +6 -12
  84. llama_cloud/types/organization.py +2 -2
  85. llama_cloud/types/page_splitter_node_parser.py +3 -2
  86. llama_cloud/types/parsing_job_json_result.py +2 -2
  87. llama_cloud/types/parsing_job_markdown_result.py +1 -1
  88. llama_cloud/types/parsing_job_text_result.py +1 -1
  89. llama_cloud/types/partition_names.py +45 -0
  90. llama_cloud/types/pipeline.py +7 -17
  91. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  92. llama_cloud/types/pipeline_create.py +6 -18
  93. llama_cloud/types/pipeline_create_embedding_config.py +15 -15
  94. llama_cloud/types/pipeline_create_transform_config.py +1 -24
  95. llama_cloud/types/pipeline_data_source.py +5 -11
  96. llama_cloud/types/pipeline_data_source_component.py +23 -2
  97. llama_cloud/types/pipeline_data_source_create.py +1 -3
  98. llama_cloud/types/pipeline_deployment.py +4 -8
  99. llama_cloud/types/pipeline_embedding_config.py +15 -15
  100. llama_cloud/types/pipeline_file.py +10 -18
  101. llama_cloud/types/pipeline_file_create.py +1 -3
  102. llama_cloud/types/playground_session.py +2 -2
  103. llama_cloud/types/preset_retrieval_params.py +8 -11
  104. llama_cloud/types/presigned_url.py +1 -3
  105. llama_cloud/types/project.py +2 -2
  106. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  107. llama_cloud/types/prompt_spec.py +2 -4
  108. llama_cloud/types/related_node_info.py +0 -4
  109. llama_cloud/types/retrieval_mode.py +0 -4
  110. llama_cloud/types/sentence_splitter.py +3 -4
  111. llama_cloud/types/supported_llm_model_names.py +0 -4
  112. llama_cloud/types/text_node.py +3 -9
  113. llama_cloud/types/token_text_splitter.py +2 -1
  114. llama_cloud/types/transformation_category_names.py +0 -4
  115. llama_cloud/types/user_organization.py +5 -9
  116. llama_cloud/types/user_organization_create.py +2 -2
  117. llama_cloud/types/user_organization_delete.py +2 -2
  118. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/METADATA +1 -1
  119. llama_cloud-0.1.0.dist-info/RECORD +226 -0
  120. llama_cloud/resources/auth/__init__.py +0 -2
  121. llama_cloud/resources/auth/client.py +0 -124
  122. llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -23
  123. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -25
  124. llama_cloud/types/configured_transformation_item_component_one.py +0 -35
  125. llama_cloud/types/custom_claims.py +0 -58
  126. llama_cloud/types/data_sink_component_one.py +0 -23
  127. llama_cloud/types/data_sink_create_component_one.py +0 -23
  128. llama_cloud/types/data_source_component_one.py +0 -25
  129. llama_cloud/types/data_source_create_component_one.py +0 -25
  130. llama_cloud/types/pipeline_data_source_component_one.py +0 -25
  131. llama_cloud-0.0.16.dist-info/RECORD +0 -234
  132. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/LICENSE +0 -0
  133. {llama_cloud-0.0.16.dist-info → llama_cloud-0.1.0.dist-info}/WHEEL +0 -0
@@ -25,36 +25,20 @@ class HuggingFaceInferenceApiEmbedding(pydantic.BaseModel):
25
25
  - Uses the feature extraction task: https://huggingface.co/tasks/feature-extraction
26
26
  """
27
27
 
28
- model_name: typing.Optional[str] = pydantic.Field(
29
- description="Hugging Face model name. If None, the task will be used."
30
- )
28
+ model_name: typing.Optional[str]
31
29
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
32
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
33
- num_workers: typing.Optional[int] = pydantic.Field(
34
- description="The number of workers to use for async embedding calls."
35
- )
36
- pooling: typing.Optional[Pooling] = pydantic.Field(
37
- description="Pooling strategy. If None, the model's default pooling is used."
38
- )
39
- query_instruction: typing.Optional[str] = pydantic.Field(
40
- description="Instruction to prepend during query embedding."
41
- )
42
- text_instruction: typing.Optional[str] = pydantic.Field(description="Instruction to prepend during text embedding.")
30
+ callback_manager: typing.Optional[typing.Any]
31
+ num_workers: typing.Optional[int]
32
+ pooling: typing.Optional[Pooling]
33
+ query_instruction: typing.Optional[str]
34
+ text_instruction: typing.Optional[str]
43
35
  token: typing.Optional[HuggingFaceInferenceApiEmbeddingToken] = pydantic.Field(
44
36
  description="Hugging Face token. Will default to the locally saved token. Pass token=False if you don’t want to send your token to the server."
45
37
  )
46
- timeout: typing.Optional[float] = pydantic.Field(
47
- description="The maximum number of seconds to wait for a response from the server. Loading a new model in Inference API can take up to several minutes. Defaults to None, meaning it will loop until the server is available."
48
- )
49
- headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
50
- description="Additional headers to send to the server. By default only the authorization and user-agent headers are sent. Values in this dictionary will override the default values."
51
- )
52
- cookies: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
53
- description="Additional cookies to send to the server."
54
- )
55
- task: typing.Optional[str] = pydantic.Field(
56
- description="Optional task to pick Hugging Face's recommended model, used when model_name is left as default of None."
57
- )
38
+ timeout: typing.Optional[float]
39
+ headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
40
+ cookies: typing.Optional[typing.Dict[str, typing.Optional[str]]]
41
+ task: typing.Optional[str]
58
42
  class_name: typing.Optional[str]
59
43
 
60
44
  def json(self, **kwargs: typing.Any) -> str:
@@ -20,12 +20,10 @@ class InputMessage(pydantic.BaseModel):
20
20
  This is distinct from a ChatMessage because this schema is enforced by the AI Chat library used in the frontend
21
21
  """
22
22
 
23
- id: typing.Optional[str] = pydantic.Field(description="ID of the message, if any. Not necessarily a UUID.")
23
+ id: typing.Optional[str]
24
24
  role: MessageRole
25
25
  content: str
26
- data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
27
- description="Additional data to be stored with the message."
28
- )
26
+ data: typing.Optional[typing.Dict[str, typing.Any]]
29
27
  class_name: typing.Optional[str]
30
28
 
31
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -41,6 +41,7 @@ class LlamaParseParameters(pydantic.BaseModel):
41
41
  page_suffix: typing.Optional[str]
42
42
  webhook_url: typing.Optional[str]
43
43
  take_screenshot: typing.Optional[bool]
44
+ premium_mode: typing.Optional[bool]
44
45
  s_3_input_path: typing.Optional[str] = pydantic.Field(alias="s3_input_path")
45
46
  s_3_output_path_prefix: typing.Optional[str] = pydantic.Field(alias="s3_output_path_prefix")
46
47
 
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class LlamaParseSupportedFileExtensions(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  PDF = ".pdf"
15
11
  DOC = ".doc"
16
12
  DOCX = ".docx"
llama_cloud/types/llm.py CHANGED
@@ -33,16 +33,17 @@ class Llm(pydantic.BaseModel):
33
33
  Pydantic program mode to use for structured prediction.
34
34
  """
35
35
 
36
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
37
- system_prompt: typing.Optional[str] = pydantic.Field(description="System prompt for LLM calls.")
38
- output_parser: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
39
- description="Output parser to parse, validate, and correct errors programmatically."
36
+ callback_manager: typing.Optional[typing.Any]
37
+ system_prompt: typing.Optional[str]
38
+ messages_to_prompt: typing.Optional[str] = pydantic.Field(
39
+ description="Function to convert a list of messages to an LLM prompt."
40
40
  )
41
- pydantic_program_mode: typing.Optional[PydanticProgramMode]
42
- query_wrapper_prompt: typing.Optional[BasePromptTemplate] = pydantic.Field(
43
- description="Query wrapper prompt for LLM calls."
41
+ completion_to_prompt: typing.Optional[str] = pydantic.Field(
42
+ description="Function to convert a completion to an LLM prompt."
44
43
  )
45
- class_name: typing.Optional[str]
44
+ output_parser: typing.Optional[typing.Any]
45
+ pydantic_program_mode: typing.Optional[PydanticProgramMode]
46
+ query_wrapper_prompt: typing.Optional[BasePromptTemplate]
46
47
 
47
48
  def json(self, **kwargs: typing.Any) -> str:
48
49
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -16,16 +16,11 @@ except ImportError:
16
16
 
17
17
 
18
18
  class LlmParameters(pydantic.BaseModel):
19
- """
20
- Base schema model for BaseComponent classes used in the platform.
21
- Comes with special serialization logic for types used commonly in platform codebase.
22
- """
23
-
24
19
  model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
25
20
  description="The name of the model to use for LLM completions."
26
21
  )
27
- system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the completion.")
28
- temperature: typing.Optional[float] = pydantic.Field(description="The temperature value for the model.")
22
+ system_prompt: typing.Optional[str]
23
+ temperature: typing.Optional[float]
29
24
  class_name: typing.Optional[str]
30
25
 
31
26
  def json(self, **kwargs: typing.Any) -> str:
@@ -21,19 +21,17 @@ class LocalEval(pydantic.BaseModel):
21
21
  Output of an BaseEvaluator.
22
22
  """
23
23
 
24
- query: typing.Optional[str] = pydantic.Field(description="Query string")
25
- contexts: typing.Optional[typing.List[str]] = pydantic.Field(description="Context strings")
26
- response: typing.Optional[str] = pydantic.Field(description="Response string")
27
- passing: typing.Optional[bool] = pydantic.Field(description="Binary evaluation result (passing or not)")
28
- feedback: typing.Optional[str] = pydantic.Field(description="Feedback or reasoning for the response")
29
- score: typing.Optional[float] = pydantic.Field(description="Score for the response")
30
- pairwise_source: typing.Optional[str] = pydantic.Field(
31
- description="Used only for pairwise and specifies whether it is from original order of presented answers or flipped order"
32
- )
24
+ query: typing.Optional[str]
25
+ contexts: typing.Optional[typing.List[str]]
26
+ response: typing.Optional[str]
27
+ passing: typing.Optional[bool]
28
+ feedback: typing.Optional[str]
29
+ score: typing.Optional[float]
30
+ pairwise_source: typing.Optional[str]
33
31
  invalid_result: typing.Optional[bool] = pydantic.Field(
34
32
  description="Whether the evaluation result is an invalid one."
35
33
  )
36
- invalid_reason: typing.Optional[str] = pydantic.Field(description="Reason for invalid evaluation.")
34
+ invalid_reason: typing.Optional[str]
37
35
 
38
36
  def json(self, **kwargs: typing.Any) -> str:
39
37
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,7 +21,7 @@ class LocalEvalResults(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  project_id: str = pydantic.Field(description="The ID of the project.")
24
- eval_set_id: typing.Optional[str] = pydantic.Field(description="The ID of the local eval result set.")
24
+ eval_set_id: typing.Optional[str]
25
25
  app_name: str = pydantic.Field(description="The name of the app.")
26
26
  eval_name: str = pydantic.Field(description="The name of the eval.")
27
27
  result: LocalEval = pydantic.Field(description="The eval results.")
@@ -17,12 +17,10 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ManagedIngestionStatusResponse(pydantic.BaseModel):
20
- job_id: typing.Optional[str] = pydantic.Field(description="ID of the latest job.")
21
- deployment_date: typing.Optional[dt.datetime] = pydantic.Field(description="Date of the deployment.")
20
+ job_id: typing.Optional[str]
21
+ deployment_date: typing.Optional[dt.datetime]
22
22
  status: ManagedIngestionStatus = pydantic.Field(description="Status of the ingestion.")
23
- error: typing.Optional[typing.List[IngestionErrorResponse]] = pydantic.Field(
24
- description="List of errors that occurred during ingestion."
25
- )
23
+ error: typing.Optional[typing.List[IngestionErrorResponse]]
26
24
 
27
25
  def json(self, **kwargs: typing.Any) -> str:
28
26
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -28,14 +28,13 @@ class MarkdownElementNodeParser(pydantic.BaseModel):
28
28
  description="Whether or not to consider metadata when splitting."
29
29
  )
30
30
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
31
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
32
- llm: typing.Optional[Llm] = pydantic.Field(description="LLM model to use for summarization.")
31
+ callback_manager: typing.Optional[typing.Any]
32
+ id_func: typing.Optional[str]
33
+ llm: typing.Optional[Llm]
33
34
  summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
34
35
  num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
35
36
  show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
36
- nested_node_parser: typing.Optional[NodeParser] = pydantic.Field(
37
- description="Other types of node parsers to handle some types of nodes."
38
- )
37
+ nested_node_parser: typing.Optional[NodeParser]
39
38
  class_name: typing.Optional[str]
40
39
 
41
40
  def json(self, **kwargs: typing.Any) -> str:
@@ -29,7 +29,8 @@ class MarkdownNodeParser(pydantic.BaseModel):
29
29
  description="Whether or not to consider metadata when splitting."
30
30
  )
31
31
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
32
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
32
+ callback_manager: typing.Optional[typing.Any]
33
+ id_func: typing.Optional[str]
33
34
  class_name: typing.Optional[str]
34
35
 
35
36
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,13 +15,8 @@ except ImportError:
15
15
 
16
16
 
17
17
  class MessageAnnotation(pydantic.BaseModel):
18
- """
19
- Base schema model for BaseComponent classes used in the platform.
20
- Comes with special serialization logic for types used commonly in platform codebase.
21
- """
22
-
23
18
  type: str
24
- data: typing.Optional[typing.Any]
19
+ data: str
25
20
  class_name: typing.Optional[str]
26
21
 
27
22
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,9 +15,9 @@ except ImportError:
15
15
 
16
16
 
17
17
  class MetricResult(pydantic.BaseModel):
18
- passing: typing.Optional[bool] = pydantic.Field(description="Whether the metric passed or not.")
19
- score: typing.Optional[float] = pydantic.Field(description="The score for the metric.")
20
- feedback: typing.Optional[str] = pydantic.Field(description="The reasoning for the metric.")
18
+ passing: typing.Optional[bool]
19
+ score: typing.Optional[float]
20
+ feedback: typing.Optional[str]
21
21
 
22
22
  def json(self, **kwargs: typing.Any) -> str:
23
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -23,7 +23,8 @@ class NodeParser(pydantic.BaseModel):
23
23
  description="Whether or not to consider metadata when splitting."
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
26
+ callback_manager: typing.Optional[typing.Any]
27
+ id_func: typing.Optional[str]
27
28
  class_name: typing.Optional[str]
28
29
 
29
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,44 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class NodeRelationship(str, enum.Enum):
10
+ """
11
+ Node relationships used in `BaseNode` class.
12
+
13
+ Attributes:
14
+ SOURCE: The node is the source document.
15
+ PREVIOUS: The node is the previous node in the document.
16
+ NEXT: The node is the next node in the document.
17
+ PARENT: The node is the parent node in the document.
18
+ CHILD: The node is a child node in the document.
19
+ """
20
+
21
+ ONE = "1"
22
+ TWO = "2"
23
+ THREE = "3"
24
+ FOUR = "4"
25
+ FIVE = "5"
26
+
27
+ def visit(
28
+ self,
29
+ one: typing.Callable[[], T_Result],
30
+ two: typing.Callable[[], T_Result],
31
+ three: typing.Callable[[], T_Result],
32
+ four: typing.Callable[[], T_Result],
33
+ five: typing.Callable[[], T_Result],
34
+ ) -> T_Result:
35
+ if self is NodeRelationship.ONE:
36
+ return one()
37
+ if self is NodeRelationship.TWO:
38
+ return two()
39
+ if self is NodeRelationship.THREE:
40
+ return three()
41
+ if self is NodeRelationship.FOUR:
42
+ return four()
43
+ if self is NodeRelationship.FIVE:
44
+ return five()
@@ -7,10 +7,6 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ObjectType(str, enum.Enum):
10
- """
11
- An enumeration.
12
- """
13
-
14
10
  ONE = "1"
15
11
  TWO = "2"
16
12
  THREE = "3"
@@ -39,27 +39,21 @@ class OpenAiEmbedding(pydantic.BaseModel):
39
39
 
40
40
  model_name: typing.Optional[str] = pydantic.Field(description="The name of the embedding model.")
41
41
  embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
42
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
43
- num_workers: typing.Optional[int] = pydantic.Field(
44
- description="The number of workers to use for async embedding calls."
45
- )
42
+ callback_manager: typing.Optional[typing.Any]
43
+ num_workers: typing.Optional[int]
46
44
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
47
45
  description="Additional kwargs for the OpenAI API."
48
46
  )
49
47
  api_key: str = pydantic.Field(description="The OpenAI API key.")
50
- api_base: typing.Optional[str] = pydantic.Field(description="The base URL for OpenAI API.")
51
- api_version: typing.Optional[str] = pydantic.Field(description="The version for OpenAI API.")
48
+ api_base: typing.Optional[str]
49
+ api_version: typing.Optional[str]
52
50
  max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
53
51
  timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
54
- default_headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
55
- description="The default headers for API requests."
56
- )
52
+ default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
57
53
  reuse_client: typing.Optional[bool] = pydantic.Field(
58
54
  description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
59
55
  )
60
- dimensions: typing.Optional[int] = pydantic.Field(
61
- description="The number of dimensions on the output embedding vectors. Works only with v3 embedding models."
62
- )
56
+ dimensions: typing.Optional[int]
63
57
  class_name: typing.Optional[str]
64
58
 
65
59
  def json(self, **kwargs: typing.Any) -> str:
@@ -20,8 +20,8 @@ class Organization(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: str = pydantic.Field(description="Unique identifier")
23
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
24
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
23
+ created_at: typing.Optional[dt.datetime]
24
+ updated_at: typing.Optional[dt.datetime]
25
25
  name: str = pydantic.Field(description="A name for the organization.")
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,8 +23,9 @@ class PageSplitterNodeParser(pydantic.BaseModel):
23
23
  description="Whether or not to consider metadata when splitting."
24
24
  )
25
25
  include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Dict[str, typing.Any]]
27
- page_separator: typing.Optional[str] = pydantic.Field(description="Separator to split text into pages.")
26
+ callback_manager: typing.Optional[typing.Any]
27
+ id_func: typing.Optional[str]
28
+ page_separator: typing.Optional[str]
28
29
  class_name: typing.Optional[str]
29
30
 
30
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -15,8 +15,8 @@ except ImportError:
15
15
 
16
16
 
17
17
  class ParsingJobJsonResult(pydantic.BaseModel):
18
- pages: typing.Optional[typing.Any]
19
- job_metadata: typing.Optional[typing.Any]
18
+ pages: typing.Any
19
+ job_metadata: typing.Any
20
20
 
21
21
  def json(self, **kwargs: typing.Any) -> str:
22
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -16,7 +16,7 @@ except ImportError:
16
16
 
17
17
  class ParsingJobMarkdownResult(pydantic.BaseModel):
18
18
  markdown: str = pydantic.Field(description="The markdown result of the parsing job")
19
- job_metadata: typing.Optional[typing.Any]
19
+ job_metadata: typing.Any
20
20
 
21
21
  def json(self, **kwargs: typing.Any) -> str:
22
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -16,7 +16,7 @@ except ImportError:
16
16
 
17
17
  class ParsingJobTextResult(pydantic.BaseModel):
18
18
  text: str = pydantic.Field(description="The text result of the parsing job")
19
- job_metadata: typing.Optional[typing.Any]
19
+ job_metadata: typing.Any
20
20
 
21
21
  def json(self, **kwargs: typing.Any) -> str:
22
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,45 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class PartitionNames(str, enum.Enum):
10
+ """
11
+ Enum for dataset partition names.
12
+ """
13
+
14
+ DATA_SOURCE_ID_PARTITION = "data_source_id_partition"
15
+ PIPELINE_ID_PARTITION = "pipeline_id_partition"
16
+ EVAL_DATASET_ID_PARTITION = "eval_dataset_id_partition"
17
+ FILE_ID_PARTITION = "file_id_partition"
18
+ PIPELINE_FILE_ID_PARTITION = "pipeline_file_id_partition"
19
+ FILE_PARSING_ID_PARTITION = "file_parsing_id_partition"
20
+ EXTRACTION_SCHEMA_ID_PARTITION = "extraction_schema_id_partition"
21
+
22
+ def visit(
23
+ self,
24
+ data_source_id_partition: typing.Callable[[], T_Result],
25
+ pipeline_id_partition: typing.Callable[[], T_Result],
26
+ eval_dataset_id_partition: typing.Callable[[], T_Result],
27
+ file_id_partition: typing.Callable[[], T_Result],
28
+ pipeline_file_id_partition: typing.Callable[[], T_Result],
29
+ file_parsing_id_partition: typing.Callable[[], T_Result],
30
+ extraction_schema_id_partition: typing.Callable[[], T_Result],
31
+ ) -> T_Result:
32
+ if self is PartitionNames.DATA_SOURCE_ID_PARTITION:
33
+ return data_source_id_partition()
34
+ if self is PartitionNames.PIPELINE_ID_PARTITION:
35
+ return pipeline_id_partition()
36
+ if self is PartitionNames.EVAL_DATASET_ID_PARTITION:
37
+ return eval_dataset_id_partition()
38
+ if self is PartitionNames.FILE_ID_PARTITION:
39
+ return file_id_partition()
40
+ if self is PartitionNames.PIPELINE_FILE_ID_PARTITION:
41
+ return pipeline_file_id_partition()
42
+ if self is PartitionNames.FILE_PARSING_ID_PARTITION:
43
+ return file_parsing_id_partition()
44
+ if self is PartitionNames.EXTRACTION_SCHEMA_ID_PARTITION:
45
+ return extraction_schema_id_partition()
@@ -29,25 +29,19 @@ class Pipeline(pydantic.BaseModel):
29
29
  """
30
30
 
31
31
  id: str = pydantic.Field(description="Unique identifier")
32
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
33
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
32
+ created_at: typing.Optional[dt.datetime]
33
+ updated_at: typing.Optional[dt.datetime]
34
34
  name: str
35
35
  project_id: str
36
36
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
37
37
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
38
38
  )
39
- managed_pipeline_id: typing.Optional[str] = pydantic.Field(
40
- description="The ID of the ManagedPipeline this playground pipeline is linked to."
41
- )
42
- embedding_config: typing.Optional[PipelineEmbeddingConfig] = pydantic.Field(
43
- description="Configuration for the embedding model."
44
- )
39
+ managed_pipeline_id: typing.Optional[str]
40
+ embedding_config: PipelineEmbeddingConfig
45
41
  configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
46
42
  description="Deprecated don't use it, List of configured transformations."
47
43
  )
48
- config_hash: typing.Optional[PipelineConfigurationHashes] = pydantic.Field(
49
- description="Hashes for the configuration of the pipeline."
50
- )
44
+ config_hash: typing.Optional[PipelineConfigurationHashes]
51
45
  transform_config: typing.Optional[PipelineTransformConfig] = pydantic.Field(
52
46
  description="Configuration for the transformation."
53
47
  )
@@ -57,12 +51,8 @@ class Pipeline(pydantic.BaseModel):
57
51
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
58
52
  description="Eval parameters for the pipeline."
59
53
  )
60
- llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
61
- description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
62
- )
63
- data_sink: typing.Optional[DataSink] = pydantic.Field(
64
- description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
65
- )
54
+ llama_parse_parameters: typing.Optional[LlamaParseParameters]
55
+ data_sink: typing.Optional[DataSink]
66
56
 
67
57
  def json(self, **kwargs: typing.Any) -> str:
68
58
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,9 +19,9 @@ class PipelineConfigurationHashes(pydantic.BaseModel):
19
19
  Hashes for the configuration of a pipeline.
20
20
  """
21
21
 
22
- embedding_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the embedding config.")
23
- parsing_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the llama parse parameters.")
24
- transform_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the transform config.")
22
+ embedding_config_hash: typing.Optional[str]
23
+ parsing_config_hash: typing.Optional[str]
24
+ transform_config_hash: typing.Optional[str]
25
25
 
26
26
  def json(self, **kwargs: typing.Any) -> str:
27
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -27,37 +27,25 @@ class PipelineCreate(pydantic.BaseModel):
27
27
  Schema for creating a pipeline.
28
28
  """
29
29
 
30
- embedding_config: typing.Optional[PipelineCreateEmbeddingConfig] = pydantic.Field(
31
- description="Configuration for the embedding model."
32
- )
30
+ embedding_config: typing.Optional[PipelineCreateEmbeddingConfig]
33
31
  transform_config: typing.Optional[PipelineCreateTransformConfig] = pydantic.Field(
34
32
  description="Configuration for the transformation."
35
33
  )
36
- configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
37
- description="Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline."
38
- )
39
- data_sink_id: typing.Optional[str] = pydantic.Field(
40
- description="Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID."
41
- )
42
- data_sink: typing.Optional[DataSinkCreate] = pydantic.Field(
43
- description="Data sink. When provided instead of data_sink_id, the data sink will be created."
44
- )
34
+ configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]
35
+ data_sink_id: typing.Optional[str]
36
+ data_sink: typing.Optional[DataSinkCreate]
45
37
  preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
46
38
  description="Preset retrieval parameters for the pipeline."
47
39
  )
48
40
  eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
49
41
  description="Eval parameters for the pipeline."
50
42
  )
51
- llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
52
- description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
53
- )
43
+ llama_parse_parameters: typing.Optional[LlamaParseParameters]
54
44
  name: str
55
45
  pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
56
46
  description="Type of pipeline. Either PLAYGROUND or MANAGED."
57
47
  )
58
- managed_pipeline_id: typing.Optional[str] = pydantic.Field(
59
- description="The ID of the ManagedPipeline this playground pipeline is linked to."
60
- )
48
+ managed_pipeline_id: typing.Optional[str]
61
49
 
62
50
  def json(self, **kwargs: typing.Any) -> str:
63
51
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -15,8 +15,8 @@ from .open_ai_embedding_config import OpenAiEmbeddingConfig
15
15
  from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
16
16
 
17
17
 
18
- class PipelineCreateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
19
- type: typing_extensions.Literal["OPENAI_EMBEDDING"]
18
+ class PipelineCreateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
19
+ type: typing_extensions.Literal["AZURE_EMBEDDING"]
20
20
 
21
21
  class Config:
22
22
  frozen = True
@@ -24,8 +24,8 @@ class PipelineCreateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
24
24
  allow_population_by_field_name = True
25
25
 
26
26
 
27
- class PipelineCreateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
28
- type: typing_extensions.Literal["AZURE_EMBEDDING"]
27
+ class PipelineCreateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
28
+ type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
29
29
 
30
30
  class Config:
31
31
  frozen = True
@@ -33,8 +33,8 @@ class PipelineCreateEmbeddingConfig_AzureEmbedding(AzureOpenAiEmbeddingConfig):
33
33
  allow_population_by_field_name = True
34
34
 
35
35
 
36
- class PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
37
- type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
36
+ class PipelineCreateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
37
+ type: typing_extensions.Literal["COHERE_EMBEDDING"]
38
38
 
39
39
  class Config:
40
40
  frozen = True
@@ -42,8 +42,8 @@ class PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInference
42
42
  allow_population_by_field_name = True
43
43
 
44
44
 
45
- class PipelineCreateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
46
- type: typing_extensions.Literal["BEDROCK_EMBEDDING"]
45
+ class PipelineCreateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
46
+ type: typing_extensions.Literal["GEMINI_EMBEDDING"]
47
47
 
48
48
  class Config:
49
49
  frozen = True
@@ -51,8 +51,8 @@ class PipelineCreateEmbeddingConfig_BedrockEmbedding(BedrockEmbeddingConfig):
51
51
  allow_population_by_field_name = True
52
52
 
53
53
 
54
- class PipelineCreateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
55
- type: typing_extensions.Literal["GEMINI_EMBEDDING"]
54
+ class PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmbeddingConfig):
55
+ type: typing_extensions.Literal["HUGGINGFACE_API_EMBEDDING"]
56
56
 
57
57
  class Config:
58
58
  frozen = True
@@ -60,8 +60,8 @@ class PipelineCreateEmbeddingConfig_GeminiEmbedding(GeminiEmbeddingConfig):
60
60
  allow_population_by_field_name = True
61
61
 
62
62
 
63
- class PipelineCreateEmbeddingConfig_CohereEmbedding(CohereEmbeddingConfig):
64
- type: typing_extensions.Literal["COHERE_EMBEDDING"]
63
+ class PipelineCreateEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
64
+ type: typing_extensions.Literal["OPENAI_EMBEDDING"]
65
65
 
66
66
  class Config:
67
67
  frozen = True
@@ -79,11 +79,11 @@ class PipelineCreateEmbeddingConfig_VertexaiEmbedding(VertexAiEmbeddingConfig):
79
79
 
80
80
 
81
81
  PipelineCreateEmbeddingConfig = typing.Union[
82
- PipelineCreateEmbeddingConfig_OpenaiEmbedding,
83
82
  PipelineCreateEmbeddingConfig_AzureEmbedding,
84
- PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding,
85
83
  PipelineCreateEmbeddingConfig_BedrockEmbedding,
86
- PipelineCreateEmbeddingConfig_GeminiEmbedding,
87
84
  PipelineCreateEmbeddingConfig_CohereEmbedding,
85
+ PipelineCreateEmbeddingConfig_GeminiEmbedding,
86
+ PipelineCreateEmbeddingConfig_HuggingfaceApiEmbedding,
87
+ PipelineCreateEmbeddingConfig_OpenaiEmbedding,
88
88
  PipelineCreateEmbeddingConfig_VertexaiEmbedding,
89
89
  ]