llama-cloud 0.1.23__py3-none-any.whl → 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (31) hide show
  1. llama_cloud/__init__.py +4 -8
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -2
  4. llama_cloud/resources/files/client.py +5 -4
  5. llama_cloud/resources/jobs/client.py +8 -0
  6. llama_cloud/resources/llama_extract/client.py +92 -24
  7. llama_cloud/resources/organizations/client.py +14 -4
  8. llama_cloud/resources/parsing/client.py +8 -0
  9. llama_cloud/resources/pipelines/client.py +20 -0
  10. llama_cloud/types/__init__.py +4 -6
  11. llama_cloud/types/composite_retrieval_result.py +5 -1
  12. llama_cloud/types/extract_config.py +3 -0
  13. llama_cloud/types/extract_models.py +20 -8
  14. llama_cloud/types/{llm_config_result.py → file_id_presigned_url.py} +9 -5
  15. llama_cloud/types/legacy_parse_job_config.py +1 -0
  16. llama_cloud/types/llama_extract_settings.py +3 -1
  17. llama_cloud/types/llama_parse_parameters.py +1 -0
  18. llama_cloud/types/page_figure_metadata.py +1 -0
  19. llama_cloud/types/{llm_configs_response.py → page_figure_node_with_score.py} +9 -4
  20. llama_cloud/types/parse_job_config.py +1 -0
  21. llama_cloud/types/preset_retrieval_params.py +6 -0
  22. llama_cloud/types/retrieve_results.py +5 -1
  23. llama_cloud/types/supported_llm_model_names.py +12 -4
  24. llama_cloud/types/user_organization_delete.py +1 -0
  25. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.25.dist-info}/METADATA +1 -1
  26. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.25.dist-info}/RECORD +28 -31
  27. llama_cloud/resources/admin/__init__.py +0 -2
  28. llama_cloud/resources/admin/client.py +0 -78
  29. llama_cloud/types/llm_config_result_llm_type.py +0 -33
  30. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.25.dist-info}/LICENSE +0 -0
  31. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.25.dist-info}/WHEEL +0 -0
@@ -1,78 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import urllib.parse
4
- from json.decoder import JSONDecodeError
5
-
6
- from ...core.api_error import ApiError
7
- from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
- from ...errors.unprocessable_entity_error import UnprocessableEntityError
9
- from ...types.http_validation_error import HttpValidationError
10
- from ...types.llm_configs_response import LlmConfigsResponse
11
-
12
- try:
13
- import pydantic
14
- if pydantic.__version__.startswith("1."):
15
- raise ImportError
16
- import pydantic.v1 as pydantic # type: ignore
17
- except ImportError:
18
- import pydantic # type: ignore
19
-
20
-
21
- class AdminClient:
22
- def __init__(self, *, client_wrapper: SyncClientWrapper):
23
- self._client_wrapper = client_wrapper
24
-
25
- def get_llm_configs(self) -> LlmConfigsResponse:
26
- """
27
- from llama_cloud.client import LlamaCloud
28
-
29
- client = LlamaCloud(
30
- token="YOUR_TOKEN",
31
- )
32
- client.admin.get_llm_configs()
33
- """
34
- _response = self._client_wrapper.httpx_client.request(
35
- "GET",
36
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llm-configs"),
37
- headers=self._client_wrapper.get_headers(),
38
- timeout=60,
39
- )
40
- if 200 <= _response.status_code < 300:
41
- return pydantic.parse_obj_as(LlmConfigsResponse, _response.json()) # type: ignore
42
- if _response.status_code == 422:
43
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
44
- try:
45
- _response_json = _response.json()
46
- except JSONDecodeError:
47
- raise ApiError(status_code=_response.status_code, body=_response.text)
48
- raise ApiError(status_code=_response.status_code, body=_response_json)
49
-
50
-
51
- class AsyncAdminClient:
52
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
53
- self._client_wrapper = client_wrapper
54
-
55
- async def get_llm_configs(self) -> LlmConfigsResponse:
56
- """
57
- from llama_cloud.client import AsyncLlamaCloud
58
-
59
- client = AsyncLlamaCloud(
60
- token="YOUR_TOKEN",
61
- )
62
- await client.admin.get_llm_configs()
63
- """
64
- _response = await self._client_wrapper.httpx_client.request(
65
- "GET",
66
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/llm-configs"),
67
- headers=self._client_wrapper.get_headers(),
68
- timeout=60,
69
- )
70
- if 200 <= _response.status_code < 300:
71
- return pydantic.parse_obj_as(LlmConfigsResponse, _response.json()) # type: ignore
72
- if _response.status_code == 422:
73
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
74
- try:
75
- _response_json = _response.json()
76
- except JSONDecodeError:
77
- raise ApiError(status_code=_response.status_code, body=_response.text)
78
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class LlmConfigResultLlmType(str, enum.Enum):
10
- OPENAI = "openai"
11
- ANTHROPIC = "anthropic"
12
- GEMINI = "gemini"
13
- AWS_BEDROCK = "aws_bedrock"
14
- AZURE_OPENAI = "azure_openai"
15
-
16
- def visit(
17
- self,
18
- openai: typing.Callable[[], T_Result],
19
- anthropic: typing.Callable[[], T_Result],
20
- gemini: typing.Callable[[], T_Result],
21
- aws_bedrock: typing.Callable[[], T_Result],
22
- azure_openai: typing.Callable[[], T_Result],
23
- ) -> T_Result:
24
- if self is LlmConfigResultLlmType.OPENAI:
25
- return openai()
26
- if self is LlmConfigResultLlmType.ANTHROPIC:
27
- return anthropic()
28
- if self is LlmConfigResultLlmType.GEMINI:
29
- return gemini()
30
- if self is LlmConfigResultLlmType.AWS_BEDROCK:
31
- return aws_bedrock()
32
- if self is LlmConfigResultLlmType.AZURE_OPENAI:
33
- return azure_openai()