llama-cloud 0.0.8__py3-none-any.whl → 0.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +22 -0
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +13 -1
- llama_cloud/resources/data_sinks/client.py +40 -8
- llama_cloud/resources/data_sources/client.py +48 -12
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +4 -0
- llama_cloud/resources/extraction/client.py +55 -38
- llama_cloud/resources/organizations/__init__.py +2 -0
- llama_cloud/resources/organizations/client.py +867 -0
- llama_cloud/resources/parsing/client.py +104 -0
- llama_cloud/resources/pipelines/client.py +358 -24
- llama_cloud/resources/projects/client.py +28 -8
- llama_cloud/types/__init__.py +20 -0
- llama_cloud/types/chat_data.py +38 -0
- llama_cloud/types/cloud_azure_ai_search_vector_store.py +1 -1
- llama_cloud/types/cloud_confluence_data_source.py +45 -0
- llama_cloud/types/cloud_jira_data_source.py +43 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/configurable_data_source_names.py +8 -0
- llama_cloud/types/data_source_component_one.py +4 -0
- llama_cloud/types/data_source_create_component_one.py +4 -0
- llama_cloud/types/eval_dataset_job_record.py +1 -0
- llama_cloud/types/extraction_job.py +35 -0
- llama_cloud/types/extraction_schema.py +1 -2
- llama_cloud/types/llama_parse_parameters.py +5 -0
- llama_cloud/types/organization.py +38 -0
- llama_cloud/types/organization_create.py +35 -0
- llama_cloud/types/pipeline.py +0 -3
- llama_cloud/types/pipeline_create.py +0 -3
- llama_cloud/types/pipeline_data_source_component_one.py +4 -0
- llama_cloud/types/preset_retrieval_params.py +5 -0
- llama_cloud/types/project.py +1 -1
- llama_cloud/types/retrieval_mode.py +29 -0
- llama_cloud/types/user_organization.py +49 -0
- llama_cloud/types/user_organization_create.py +36 -0
- llama_cloud/types/user_organization_delete.py +36 -0
- {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/METADATA +2 -1
- {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/RECORD +40 -28
- {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/WHEEL +1 -1
- {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/LICENSE +0 -0
|
@@ -34,11 +34,15 @@ class ProjectsClient:
|
|
|
34
34
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
35
35
|
self._client_wrapper = client_wrapper
|
|
36
36
|
|
|
37
|
-
def list_projects(
|
|
37
|
+
def list_projects(
|
|
38
|
+
self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
|
|
39
|
+
) -> typing.List[Project]:
|
|
38
40
|
"""
|
|
39
41
|
List projects or get one by name
|
|
40
42
|
|
|
41
43
|
Parameters:
|
|
44
|
+
- organization_id: typing.Optional[str].
|
|
45
|
+
|
|
42
46
|
- project_name: typing.Optional[str].
|
|
43
47
|
---
|
|
44
48
|
from llama_cloud.client import LlamaCloud
|
|
@@ -51,7 +55,7 @@ class ProjectsClient:
|
|
|
51
55
|
_response = self._client_wrapper.httpx_client.request(
|
|
52
56
|
"GET",
|
|
53
57
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
54
|
-
params=remove_none_from_dict({"project_name": project_name}),
|
|
58
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
|
|
55
59
|
headers=self._client_wrapper.get_headers(),
|
|
56
60
|
timeout=60,
|
|
57
61
|
)
|
|
@@ -65,11 +69,13 @@ class ProjectsClient:
|
|
|
65
69
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
66
70
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
67
71
|
|
|
68
|
-
def create_project(self, *, request: ProjectCreate) -> Project:
|
|
72
|
+
def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
69
73
|
"""
|
|
70
74
|
Create a new project.
|
|
71
75
|
|
|
72
76
|
Parameters:
|
|
77
|
+
- organization_id: typing.Optional[str].
|
|
78
|
+
|
|
73
79
|
- request: ProjectCreate.
|
|
74
80
|
---
|
|
75
81
|
from llama_cloud import ProjectCreate
|
|
@@ -87,6 +93,7 @@ class ProjectsClient:
|
|
|
87
93
|
_response = self._client_wrapper.httpx_client.request(
|
|
88
94
|
"POST",
|
|
89
95
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
96
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
90
97
|
json=jsonable_encoder(request),
|
|
91
98
|
headers=self._client_wrapper.get_headers(),
|
|
92
99
|
timeout=60,
|
|
@@ -101,12 +108,14 @@ class ProjectsClient:
|
|
|
101
108
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
102
109
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
103
110
|
|
|
104
|
-
def upsert_project(self, *, request: ProjectCreate) -> Project:
|
|
111
|
+
def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
105
112
|
"""
|
|
106
113
|
Upsert a project.
|
|
107
114
|
Updates if a project with the same name already exists. Otherwise, creates a new project.
|
|
108
115
|
|
|
109
116
|
Parameters:
|
|
117
|
+
- organization_id: typing.Optional[str].
|
|
118
|
+
|
|
110
119
|
- request: ProjectCreate.
|
|
111
120
|
---
|
|
112
121
|
from llama_cloud import ProjectCreate
|
|
@@ -124,6 +133,7 @@ class ProjectsClient:
|
|
|
124
133
|
_response = self._client_wrapper.httpx_client.request(
|
|
125
134
|
"PUT",
|
|
126
135
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
136
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
127
137
|
json=jsonable_encoder(request),
|
|
128
138
|
headers=self._client_wrapper.get_headers(),
|
|
129
139
|
timeout=60,
|
|
@@ -620,11 +630,15 @@ class AsyncProjectsClient:
|
|
|
620
630
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
621
631
|
self._client_wrapper = client_wrapper
|
|
622
632
|
|
|
623
|
-
async def list_projects(
|
|
633
|
+
async def list_projects(
|
|
634
|
+
self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
|
|
635
|
+
) -> typing.List[Project]:
|
|
624
636
|
"""
|
|
625
637
|
List projects or get one by name
|
|
626
638
|
|
|
627
639
|
Parameters:
|
|
640
|
+
- organization_id: typing.Optional[str].
|
|
641
|
+
|
|
628
642
|
- project_name: typing.Optional[str].
|
|
629
643
|
---
|
|
630
644
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -637,7 +651,7 @@ class AsyncProjectsClient:
|
|
|
637
651
|
_response = await self._client_wrapper.httpx_client.request(
|
|
638
652
|
"GET",
|
|
639
653
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
640
|
-
params=remove_none_from_dict({"project_name": project_name}),
|
|
654
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
|
|
641
655
|
headers=self._client_wrapper.get_headers(),
|
|
642
656
|
timeout=60,
|
|
643
657
|
)
|
|
@@ -651,11 +665,13 @@ class AsyncProjectsClient:
|
|
|
651
665
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
652
666
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
653
667
|
|
|
654
|
-
async def create_project(self, *, request: ProjectCreate) -> Project:
|
|
668
|
+
async def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
655
669
|
"""
|
|
656
670
|
Create a new project.
|
|
657
671
|
|
|
658
672
|
Parameters:
|
|
673
|
+
- organization_id: typing.Optional[str].
|
|
674
|
+
|
|
659
675
|
- request: ProjectCreate.
|
|
660
676
|
---
|
|
661
677
|
from llama_cloud import ProjectCreate
|
|
@@ -673,6 +689,7 @@ class AsyncProjectsClient:
|
|
|
673
689
|
_response = await self._client_wrapper.httpx_client.request(
|
|
674
690
|
"POST",
|
|
675
691
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
692
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
676
693
|
json=jsonable_encoder(request),
|
|
677
694
|
headers=self._client_wrapper.get_headers(),
|
|
678
695
|
timeout=60,
|
|
@@ -687,12 +704,14 @@ class AsyncProjectsClient:
|
|
|
687
704
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
688
705
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
689
706
|
|
|
690
|
-
async def upsert_project(self, *, request: ProjectCreate) -> Project:
|
|
707
|
+
async def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
691
708
|
"""
|
|
692
709
|
Upsert a project.
|
|
693
710
|
Updates if a project with the same name already exists. Otherwise, creates a new project.
|
|
694
711
|
|
|
695
712
|
Parameters:
|
|
713
|
+
- organization_id: typing.Optional[str].
|
|
714
|
+
|
|
696
715
|
- request: ProjectCreate.
|
|
697
716
|
---
|
|
698
717
|
from llama_cloud import ProjectCreate
|
|
@@ -710,6 +729,7 @@ class AsyncProjectsClient:
|
|
|
710
729
|
_response = await self._client_wrapper.httpx_client.request(
|
|
711
730
|
"PUT",
|
|
712
731
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
732
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
713
733
|
json=jsonable_encoder(request),
|
|
714
734
|
headers=self._client_wrapper.get_headers(),
|
|
715
735
|
timeout=60,
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -4,12 +4,15 @@ from .azure_open_ai_embedding import AzureOpenAiEmbedding
|
|
|
4
4
|
from .base import Base
|
|
5
5
|
from .base_prompt_template import BasePromptTemplate
|
|
6
6
|
from .bedrock_embedding import BedrockEmbedding
|
|
7
|
+
from .chat_data import ChatData
|
|
7
8
|
from .chat_message import ChatMessage
|
|
8
9
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
9
10
|
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
10
11
|
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
12
|
+
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
11
13
|
from .cloud_document import CloudDocument
|
|
12
14
|
from .cloud_document_create import CloudDocumentCreate
|
|
15
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
13
16
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
14
17
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
15
18
|
from .cloud_pinecone_vector_store import CloudPineconeVectorStore
|
|
@@ -53,6 +56,7 @@ from .eval_llm_model_data import EvalLlmModelData
|
|
|
53
56
|
from .eval_question import EvalQuestion
|
|
54
57
|
from .eval_question_create import EvalQuestionCreate
|
|
55
58
|
from .eval_question_result import EvalQuestionResult
|
|
59
|
+
from .extraction_job import ExtractionJob
|
|
56
60
|
from .extraction_result import ExtractionResult
|
|
57
61
|
from .extraction_result_data_value import ExtractionResultDataValue
|
|
58
62
|
from .extraction_schema import ExtractionSchema
|
|
@@ -86,6 +90,8 @@ from .metric_result import MetricResult
|
|
|
86
90
|
from .node_parser import NodeParser
|
|
87
91
|
from .object_type import ObjectType
|
|
88
92
|
from .open_ai_embedding import OpenAiEmbedding
|
|
93
|
+
from .organization import Organization
|
|
94
|
+
from .organization_create import OrganizationCreate
|
|
89
95
|
from .parser_languages import ParserLanguages
|
|
90
96
|
from .parsing_history_item import ParsingHistoryItem
|
|
91
97
|
from .parsing_job import ParsingJob
|
|
@@ -116,6 +122,7 @@ from .prompt_mixin_prompts import PromptMixinPrompts
|
|
|
116
122
|
from .prompt_spec import PromptSpec
|
|
117
123
|
from .pydantic_program_mode import PydanticProgramMode
|
|
118
124
|
from .related_node_info import RelatedNodeInfo
|
|
125
|
+
from .retrieval_mode import RetrievalMode
|
|
119
126
|
from .retrieve_results import RetrieveResults
|
|
120
127
|
from .sentence_splitter import SentenceSplitter
|
|
121
128
|
from .simple_file_node_parser import SimpleFileNodeParser
|
|
@@ -127,6 +134,9 @@ from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
|
127
134
|
from .text_node_with_score import TextNodeWithScore
|
|
128
135
|
from .token_text_splitter import TokenTextSplitter
|
|
129
136
|
from .transformation_category_names import TransformationCategoryNames
|
|
137
|
+
from .user_organization import UserOrganization
|
|
138
|
+
from .user_organization_create import UserOrganizationCreate
|
|
139
|
+
from .user_organization_delete import UserOrganizationDelete
|
|
130
140
|
from .validation_error import ValidationError
|
|
131
141
|
from .validation_error_loc_item import ValidationErrorLocItem
|
|
132
142
|
|
|
@@ -135,12 +145,15 @@ __all__ = [
|
|
|
135
145
|
"Base",
|
|
136
146
|
"BasePromptTemplate",
|
|
137
147
|
"BedrockEmbedding",
|
|
148
|
+
"ChatData",
|
|
138
149
|
"ChatMessage",
|
|
139
150
|
"CloudAzStorageBlobDataSource",
|
|
140
151
|
"CloudAzureAiSearchVectorStore",
|
|
141
152
|
"CloudChromaVectorStore",
|
|
153
|
+
"CloudConfluenceDataSource",
|
|
142
154
|
"CloudDocument",
|
|
143
155
|
"CloudDocumentCreate",
|
|
156
|
+
"CloudJiraDataSource",
|
|
144
157
|
"CloudNotionPageDataSource",
|
|
145
158
|
"CloudOneDriveDataSource",
|
|
146
159
|
"CloudPineconeVectorStore",
|
|
@@ -184,6 +197,7 @@ __all__ = [
|
|
|
184
197
|
"EvalQuestion",
|
|
185
198
|
"EvalQuestionCreate",
|
|
186
199
|
"EvalQuestionResult",
|
|
200
|
+
"ExtractionJob",
|
|
187
201
|
"ExtractionResult",
|
|
188
202
|
"ExtractionResultDataValue",
|
|
189
203
|
"ExtractionSchema",
|
|
@@ -217,6 +231,8 @@ __all__ = [
|
|
|
217
231
|
"NodeParser",
|
|
218
232
|
"ObjectType",
|
|
219
233
|
"OpenAiEmbedding",
|
|
234
|
+
"Organization",
|
|
235
|
+
"OrganizationCreate",
|
|
220
236
|
"ParserLanguages",
|
|
221
237
|
"ParsingHistoryItem",
|
|
222
238
|
"ParsingJob",
|
|
@@ -247,6 +263,7 @@ __all__ = [
|
|
|
247
263
|
"PromptSpec",
|
|
248
264
|
"PydanticProgramMode",
|
|
249
265
|
"RelatedNodeInfo",
|
|
266
|
+
"RetrievalMode",
|
|
250
267
|
"RetrieveResults",
|
|
251
268
|
"SentenceSplitter",
|
|
252
269
|
"SimpleFileNodeParser",
|
|
@@ -258,6 +275,9 @@ __all__ = [
|
|
|
258
275
|
"TextNodeWithScore",
|
|
259
276
|
"TokenTextSplitter",
|
|
260
277
|
"TransformationCategoryNames",
|
|
278
|
+
"UserOrganization",
|
|
279
|
+
"UserOrganizationCreate",
|
|
280
|
+
"UserOrganizationDelete",
|
|
261
281
|
"ValidationError",
|
|
262
282
|
"ValidationErrorLocItem",
|
|
263
283
|
]
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .preset_retrieval_params import PresetRetrievalParams
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ChatData(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Base schema model for BaseComponent classes used in the platform.
|
|
21
|
+
Comes with special serialization logic for types used commonly in platform codebase.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
retrieval_parameters: PresetRetrievalParams
|
|
25
|
+
class_name: typing.Optional[str]
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -24,7 +24,7 @@ class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
|
|
|
24
24
|
search_service_endpoint: str
|
|
25
25
|
search_service_api_version: typing.Optional[str]
|
|
26
26
|
index_name: typing.Optional[str]
|
|
27
|
-
filterable_metadata_field_keys: typing.Optional[typing.
|
|
27
|
+
filterable_metadata_field_keys: typing.Optional[typing.Dict[str, typing.Any]]
|
|
28
28
|
embedding_dimension: typing.Optional[int]
|
|
29
29
|
class_name: typing.Optional[str]
|
|
30
30
|
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CloudConfluenceDataSource(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Base component object to capture class names.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
server_url: str = pydantic.Field(description="The server URL of the Confluence instance.")
|
|
23
|
+
authentication_mechanism: str = pydantic.Field(
|
|
24
|
+
description="Type of Authentication for connecting to Confluence APIs."
|
|
25
|
+
)
|
|
26
|
+
user_name: typing.Optional[str] = pydantic.Field(description="The username to use for authentication.")
|
|
27
|
+
api_token: typing.Optional[str] = pydantic.Field(description="The API token to use for authentication.")
|
|
28
|
+
space_key: typing.Optional[str] = pydantic.Field(description="The space key to read from.")
|
|
29
|
+
page_ids: typing.Optional[str] = pydantic.Field(description="The page IDs of the Confluence to read from.")
|
|
30
|
+
cql: typing.Optional[str] = pydantic.Field(description="The CQL query to use for fetching pages.")
|
|
31
|
+
label: typing.Optional[str] = pydantic.Field(description="The label to use for fetching pages.")
|
|
32
|
+
class_name: typing.Optional[str]
|
|
33
|
+
|
|
34
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
35
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
36
|
+
return super().json(**kwargs_with_defaults)
|
|
37
|
+
|
|
38
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
39
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
40
|
+
return super().dict(**kwargs_with_defaults)
|
|
41
|
+
|
|
42
|
+
class Config:
|
|
43
|
+
frozen = True
|
|
44
|
+
smart_union = True
|
|
45
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CloudJiraDataSource(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Cloud Jira Data Source integrating JiraReader.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
email: typing.Optional[str] = pydantic.Field(description="The email address to use for authentication.")
|
|
23
|
+
api_token: typing.Optional[str] = pydantic.Field(
|
|
24
|
+
description="The API/ Access Token used for Basic, PAT and OAuth2 authentication."
|
|
25
|
+
)
|
|
26
|
+
server_url: typing.Optional[str] = pydantic.Field(description="The server url for Jira Cloud.")
|
|
27
|
+
cloud_id: typing.Optional[str] = pydantic.Field(description="The cloud ID, used in case of OAuth2.")
|
|
28
|
+
authentication_mechanism: str = pydantic.Field(description="Type of Authentication for connecting to Jira APIs.")
|
|
29
|
+
query: str = pydantic.Field(description="JQL (Jira Query Language) query to search.")
|
|
30
|
+
class_name: typing.Optional[str]
|
|
31
|
+
|
|
32
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().json(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().dict(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
class Config:
|
|
41
|
+
frozen = True
|
|
42
|
+
smart_union = True
|
|
43
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -22,6 +22,7 @@ class CloudSharepointDataSource(pydantic.BaseModel):
|
|
|
22
22
|
site_name: str = pydantic.Field(description="The name of the SharePoint site to download from.")
|
|
23
23
|
folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
|
|
24
24
|
folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
|
|
25
|
+
drive_name: typing.Optional[str] = pydantic.Field(description="The name of the Sharepoint drive to read from.")
|
|
25
26
|
client_id: str = pydantic.Field(description="The client ID to use for authentication.")
|
|
26
27
|
client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
|
|
27
28
|
tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
|
|
@@ -17,6 +17,8 @@ class ConfigurableDataSourceNames(str, enum.Enum):
|
|
|
17
17
|
MICROSOFT_SHAREPOINT = "MICROSOFT_SHAREPOINT"
|
|
18
18
|
SLACK = "SLACK"
|
|
19
19
|
NOTION_PAGE = "NOTION_PAGE"
|
|
20
|
+
CONFLUENCE = "CONFLUENCE"
|
|
21
|
+
JIRA = "JIRA"
|
|
20
22
|
|
|
21
23
|
def visit(
|
|
22
24
|
self,
|
|
@@ -26,6 +28,8 @@ class ConfigurableDataSourceNames(str, enum.Enum):
|
|
|
26
28
|
microsoft_sharepoint: typing.Callable[[], T_Result],
|
|
27
29
|
slack: typing.Callable[[], T_Result],
|
|
28
30
|
notion_page: typing.Callable[[], T_Result],
|
|
31
|
+
confluence: typing.Callable[[], T_Result],
|
|
32
|
+
jira: typing.Callable[[], T_Result],
|
|
29
33
|
) -> T_Result:
|
|
30
34
|
if self is ConfigurableDataSourceNames.S_3:
|
|
31
35
|
return s_3()
|
|
@@ -39,3 +43,7 @@ class ConfigurableDataSourceNames(str, enum.Enum):
|
|
|
39
43
|
return slack()
|
|
40
44
|
if self is ConfigurableDataSourceNames.NOTION_PAGE:
|
|
41
45
|
return notion_page()
|
|
46
|
+
if self is ConfigurableDataSourceNames.CONFLUENCE:
|
|
47
|
+
return confluence()
|
|
48
|
+
if self is ConfigurableDataSourceNames.JIRA:
|
|
49
|
+
return jira()
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
+
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
7
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
6
8
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
7
9
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
8
10
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
@@ -16,4 +18,6 @@ DataSourceComponentOne = typing.Union[
|
|
|
16
18
|
CloudSharepointDataSource,
|
|
17
19
|
CloudSlackDataSource,
|
|
18
20
|
CloudNotionPageDataSource,
|
|
21
|
+
CloudConfluenceDataSource,
|
|
22
|
+
CloudJiraDataSource,
|
|
19
23
|
]
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
+
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
7
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
6
8
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
7
9
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
8
10
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
@@ -16,4 +18,6 @@ DataSourceCreateComponentOne = typing.Union[
|
|
|
16
18
|
CloudSharepointDataSource,
|
|
17
19
|
CloudSlackDataSource,
|
|
18
20
|
CloudNotionPageDataSource,
|
|
21
|
+
CloudConfluenceDataSource,
|
|
22
|
+
CloudJiraDataSource,
|
|
19
23
|
]
|
|
@@ -38,6 +38,7 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
|
|
|
38
38
|
description="The correlation ID for this job. Used for tracking the job across services."
|
|
39
39
|
)
|
|
40
40
|
parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
|
|
41
|
+
user_id: typing.Optional[str] = pydantic.Field(description="The ID of the user that created this job")
|
|
41
42
|
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
42
43
|
id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
|
|
43
44
|
status: StatusEnum
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .file import File
|
|
8
|
+
from .status_enum import StatusEnum
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExtractionJob(pydantic.BaseModel):
|
|
20
|
+
id: str = pydantic.Field(description="The id of the extraction job")
|
|
21
|
+
status: StatusEnum = pydantic.Field(description="The status of the extraction job")
|
|
22
|
+
file: File = pydantic.Field(description="The file that the extract was extracted from")
|
|
23
|
+
|
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
+
return super().json(**kwargs_with_defaults)
|
|
27
|
+
|
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -25,10 +25,9 @@ class ExtractionSchema(pydantic.BaseModel):
|
|
|
25
25
|
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
26
26
|
name: str = pydantic.Field(description="The name of the extraction schema")
|
|
27
27
|
project_id: str = pydantic.Field(description="The ID of the project that the extraction schema belongs to")
|
|
28
|
-
data_schema: typing.Dict[str, ExtractionSchemaDataSchemaValue] = pydantic.Field(
|
|
28
|
+
data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaDataSchemaValue]] = pydantic.Field(
|
|
29
29
|
description="The schema of the data"
|
|
30
30
|
)
|
|
31
|
-
openai_api_key: str = pydantic.Field(description="The API key for the OpenAI API")
|
|
32
31
|
|
|
33
32
|
def json(self, **kwargs: typing.Any) -> str:
|
|
34
33
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -33,6 +33,11 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
33
33
|
page_separator: typing.Optional[str]
|
|
34
34
|
bounding_box: typing.Optional[str]
|
|
35
35
|
target_pages: typing.Optional[str]
|
|
36
|
+
use_vendor_multimodal_model: typing.Optional[str]
|
|
37
|
+
vendor_multimodal_model_name: typing.Optional[str]
|
|
38
|
+
vendor_multimodal_api_key: typing.Optional[str]
|
|
39
|
+
page_prefix: typing.Optional[str]
|
|
40
|
+
page_suffix: typing.Optional[str]
|
|
36
41
|
|
|
37
42
|
def json(self, **kwargs: typing.Any) -> str:
|
|
38
43
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Organization(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Schema for an organization.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
23
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
24
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
25
|
+
name: str = pydantic.Field(description="A name for the organization.")
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class OrganizationCreate(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Schema for creating an organization.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
name: str = pydantic.Field(description="A name for the organization.")
|
|
23
|
+
|
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
+
return super().json(**kwargs_with_defaults)
|
|
27
|
+
|
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/pipeline.py
CHANGED
|
@@ -43,9 +43,6 @@ class Pipeline(pydantic.BaseModel):
|
|
|
43
43
|
eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
|
|
44
44
|
description="Eval parameters for the pipeline."
|
|
45
45
|
)
|
|
46
|
-
llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
|
|
47
|
-
description="Whether to use LlamaParse during pipeline execution."
|
|
48
|
-
)
|
|
49
46
|
llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
50
47
|
description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
|
|
51
48
|
)
|
|
@@ -40,9 +40,6 @@ class PipelineCreate(pydantic.BaseModel):
|
|
|
40
40
|
eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
|
|
41
41
|
description="Eval parameters for the pipeline."
|
|
42
42
|
)
|
|
43
|
-
llama_parse_enabled: typing.Optional[bool] = pydantic.Field(
|
|
44
|
-
description="Whether to use LlamaParse during pipeline execution."
|
|
45
|
-
)
|
|
46
43
|
llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
47
44
|
description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
|
|
48
45
|
)
|
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
+
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
7
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
6
8
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
7
9
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
8
10
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
@@ -16,4 +18,6 @@ PipelineDataSourceComponentOne = typing.Union[
|
|
|
16
18
|
CloudSharepointDataSource,
|
|
17
19
|
CloudSlackDataSource,
|
|
18
20
|
CloudNotionPageDataSource,
|
|
21
|
+
CloudConfluenceDataSource,
|
|
22
|
+
CloudJiraDataSource,
|
|
19
23
|
]
|
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .metadata_filters import MetadataFilters
|
|
8
|
+
from .retrieval_mode import RetrievalMode
|
|
8
9
|
|
|
9
10
|
try:
|
|
10
11
|
import pydantic
|
|
@@ -28,6 +29,10 @@ class PresetRetrievalParams(pydantic.BaseModel):
|
|
|
28
29
|
description="Alpha value for hybrid retrieval to determine the weights between dense and sparse retrieval. 0 is sparse retrieval and 1 is dense retrieval."
|
|
29
30
|
)
|
|
30
31
|
search_filters: typing.Optional[MetadataFilters] = pydantic.Field(description="Search filters for retrieval.")
|
|
32
|
+
files_top_k: typing.Optional[int] = pydantic.Field(
|
|
33
|
+
description="Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content)."
|
|
34
|
+
)
|
|
35
|
+
retrieval_mode: typing.Optional[RetrievalMode] = pydantic.Field(description="The retrieval mode for the query.")
|
|
31
36
|
|
|
32
37
|
def json(self, **kwargs: typing.Any) -> str:
|
|
33
38
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/project.py
CHANGED
|
@@ -24,7 +24,7 @@ class Project(pydantic.BaseModel):
|
|
|
24
24
|
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
25
25
|
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
26
26
|
ad_hoc_eval_dataset_id: typing.Optional[str]
|
|
27
|
-
|
|
27
|
+
organization_id: str = pydantic.Field(description="The Organization ID the project is under.")
|
|
28
28
|
is_default: typing.Optional[bool] = pydantic.Field(
|
|
29
29
|
description="Whether this project is the default project for the user."
|
|
30
30
|
)
|