llama-cloud 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (49) hide show
  1. llama_cloud/__init__.py +10 -32
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/chat_apps/client.py +20 -0
  4. llama_cloud/resources/evals/client.py +0 -643
  5. llama_cloud/resources/llama_extract/client.py +98 -6
  6. llama_cloud/resources/parsing/client.py +8 -0
  7. llama_cloud/resources/pipelines/client.py +14 -375
  8. llama_cloud/resources/projects/client.py +72 -923
  9. llama_cloud/resources/retrievers/client.py +161 -4
  10. llama_cloud/types/__init__.py +10 -32
  11. llama_cloud/types/base_plan.py +3 -0
  12. llama_cloud/types/base_plan_name.py +12 -0
  13. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  14. llama_cloud/types/extract_config.py +0 -3
  15. llama_cloud/types/extract_mode.py +13 -1
  16. llama_cloud/types/extract_run.py +1 -0
  17. llama_cloud/types/llama_extract_settings.py +1 -0
  18. llama_cloud/types/llama_parse_parameters.py +1 -0
  19. llama_cloud/types/parsing_mode.py +12 -0
  20. llama_cloud/types/pipeline_file.py +2 -1
  21. llama_cloud/types/pipeline_file_status.py +33 -0
  22. llama_cloud/types/plan_limits.py +1 -0
  23. llama_cloud/types/preset_composite_retrieval_params.py +4 -2
  24. llama_cloud/types/prompt_conf.py +1 -0
  25. llama_cloud/types/{eval_question_create.py → re_rank_config.py} +6 -2
  26. llama_cloud/types/re_ranker_type.py +41 -0
  27. llama_cloud/types/report_block.py +1 -0
  28. llama_cloud/types/struct_mode.py +4 -0
  29. llama_cloud/types/struct_parse_conf.py +6 -0
  30. llama_cloud/types/usage_and_plan.py +2 -2
  31. llama_cloud/types/{usage.py → usage_response.py} +3 -3
  32. llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
  33. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/RECORD +36 -47
  35. llama_cloud/types/eval_dataset.py +0 -40
  36. llama_cloud/types/eval_dataset_job_params.py +0 -39
  37. llama_cloud/types/eval_dataset_job_record.py +0 -58
  38. llama_cloud/types/eval_execution_params_override.py +0 -37
  39. llama_cloud/types/eval_metric.py +0 -17
  40. llama_cloud/types/eval_question.py +0 -38
  41. llama_cloud/types/eval_question_result.py +0 -52
  42. llama_cloud/types/local_eval.py +0 -47
  43. llama_cloud/types/local_eval_results.py +0 -40
  44. llama_cloud/types/local_eval_sets.py +0 -33
  45. llama_cloud/types/metric_result.py +0 -33
  46. llama_cloud/types/prompt_mixin_prompts.py +0 -39
  47. llama_cloud/types/prompt_spec.py +0 -36
  48. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/LICENSE +0 -0
  49. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/WHEEL +0 -0
@@ -1,52 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .eval_execution_params import EvalExecutionParams
8
- from .metric_result import MetricResult
9
- from .text_node import TextNode
10
-
11
- try:
12
- import pydantic
13
- if pydantic.__version__.startswith("1."):
14
- raise ImportError
15
- import pydantic.v1 as pydantic # type: ignore
16
- except ImportError:
17
- import pydantic # type: ignore
18
-
19
-
20
- class EvalQuestionResult(pydantic.BaseModel):
21
- """
22
- Schema for the result of an eval question job.
23
- """
24
-
25
- eval_question_id: str = pydantic.Field(description="The ID of the question that was executed.")
26
- pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the question was executed against.")
27
- source_nodes: typing.List[TextNode] = pydantic.Field(
28
- description="The nodes retrieved by the pipeline for the given question."
29
- )
30
- answer: str = pydantic.Field(description="The answer to the question.")
31
- eval_metrics: typing.Dict[str, MetricResult] = pydantic.Field(description="The eval metrics for the question.")
32
- eval_dataset_execution_id: str = pydantic.Field(
33
- description="The ID of the EvalDatasetJobRecord that this result was generated from."
34
- )
35
- eval_dataset_execution_params: EvalExecutionParams = pydantic.Field(
36
- description="The EvalExecutionParams that were used when this result was generated."
37
- )
38
- eval_finished_at: dt.datetime = pydantic.Field(description="The timestamp when the eval finished.")
39
- class_name: typing.Optional[str]
40
-
41
- def json(self, **kwargs: typing.Any) -> str:
42
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
- return super().json(**kwargs_with_defaults)
44
-
45
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
- return super().dict(**kwargs_with_defaults)
48
-
49
- class Config:
50
- frozen = True
51
- smart_union = True
52
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,47 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class LocalEval(pydantic.BaseModel):
18
- """
19
- Evaluation result, EvaluationResult from llama_index.
20
-
21
- Output of an BaseEvaluator.
22
- """
23
-
24
- query: typing.Optional[str]
25
- contexts: typing.Optional[typing.List[str]]
26
- response: typing.Optional[str]
27
- passing: typing.Optional[bool]
28
- feedback: typing.Optional[str]
29
- score: typing.Optional[float]
30
- pairwise_source: typing.Optional[str]
31
- invalid_result: typing.Optional[bool] = pydantic.Field(
32
- description="Whether the evaluation result is an invalid one."
33
- )
34
- invalid_reason: typing.Optional[str]
35
-
36
- def json(self, **kwargs: typing.Any) -> str:
37
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
38
- return super().json(**kwargs_with_defaults)
39
-
40
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
41
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
42
- return super().dict(**kwargs_with_defaults)
43
-
44
- class Config:
45
- frozen = True
46
- smart_union = True
47
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,40 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .local_eval import LocalEval
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class LocalEvalResults(pydantic.BaseModel):
19
- """
20
- Schema for the result of a local evaluation.
21
- """
22
-
23
- project_id: str = pydantic.Field(description="The ID of the project.")
24
- eval_set_id: typing.Optional[str]
25
- app_name: str = pydantic.Field(description="The name of the app.")
26
- eval_name: str = pydantic.Field(description="The name of the eval.")
27
- result: LocalEval = pydantic.Field(description="The eval results.")
28
-
29
- def json(self, **kwargs: typing.Any) -> str:
30
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
- return super().json(**kwargs_with_defaults)
32
-
33
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
- return super().dict(**kwargs_with_defaults)
36
-
37
- class Config:
38
- frozen = True
39
- smart_union = True
40
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class LocalEvalSets(pydantic.BaseModel):
18
- eval_set_id: str = pydantic.Field(description="The ID of the eval set.")
19
- app_name: str = pydantic.Field(description="The name of the app.")
20
- upload_time: dt.datetime = pydantic.Field(description="The time of the upload.")
21
-
22
- def json(self, **kwargs: typing.Any) -> str:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().json(**kwargs_with_defaults)
25
-
26
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
- return super().dict(**kwargs_with_defaults)
29
-
30
- class Config:
31
- frozen = True
32
- smart_union = True
33
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class MetricResult(pydantic.BaseModel):
18
- passing: typing.Optional[bool]
19
- score: typing.Optional[float]
20
- feedback: typing.Optional[str]
21
-
22
- def json(self, **kwargs: typing.Any) -> str:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().json(**kwargs_with_defaults)
25
-
26
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
- return super().dict(**kwargs_with_defaults)
29
-
30
- class Config:
31
- frozen = True
32
- smart_union = True
33
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,39 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .prompt_spec import PromptSpec
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class PromptMixinPrompts(pydantic.BaseModel):
19
- """
20
- Schema for the prompts derived from the PromptMixin.
21
- """
22
-
23
- project_id: str = pydantic.Field(description="The ID of the project.")
24
- id: typing.Optional[str]
25
- name: str = pydantic.Field(description="The name of the prompt set.")
26
- prompts: typing.List[PromptSpec] = pydantic.Field(description="The prompts.")
27
-
28
- def json(self, **kwargs: typing.Any) -> str:
29
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
- return super().json(**kwargs_with_defaults)
31
-
32
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
- return super().dict(**kwargs_with_defaults)
35
-
36
- class Config:
37
- frozen = True
38
- smart_union = True
39
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,36 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .app_schema_chat_chat_message import AppSchemaChatChatMessage
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class PromptSpec(pydantic.BaseModel):
19
- prompt_key: str = pydantic.Field(description="The key of the prompt in the PromptMixin.")
20
- prompt_class: str = pydantic.Field(description="The class of the prompt (PromptTemplate or ChatPromptTemplate).")
21
- prompt_type: str = pydantic.Field(description="The type of prompt.")
22
- template: typing.Optional[str]
23
- message_templates: typing.Optional[typing.List[AppSchemaChatChatMessage]]
24
-
25
- def json(self, **kwargs: typing.Any) -> str:
26
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
- return super().json(**kwargs_with_defaults)
28
-
29
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
- return super().dict(**kwargs_with_defaults)
32
-
33
- class Config:
34
- frozen = True
35
- smart_union = True
36
- json_encoders = {dt.datetime: serialize_datetime}