llama-cloud 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +34 -4
- llama_cloud/client.py +6 -0
- llama_cloud/resources/__init__.py +16 -1
- llama_cloud/resources/data_sinks/client.py +40 -8
- llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +2 -0
- llama_cloud/resources/data_sources/client.py +48 -12
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +6 -4
- llama_cloud/resources/extraction/__init__.py +5 -0
- llama_cloud/resources/extraction/client.py +632 -0
- llama_cloud/resources/extraction/types/__init__.py +5 -0
- llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +7 -0
- llama_cloud/resources/organizations/__init__.py +2 -0
- llama_cloud/resources/organizations/client.py +786 -0
- llama_cloud/resources/pipelines/client.py +312 -12
- llama_cloud/resources/projects/client.py +28 -8
- llama_cloud/types/__init__.py +28 -4
- llama_cloud/types/azure_open_ai_embedding.py +3 -0
- llama_cloud/types/{cloud_google_drive_data_source.py → chat_params.py} +5 -6
- llama_cloud/types/cloud_azure_ai_search_vector_store.py +42 -0
- llama_cloud/types/cloud_jira_data_source.py +43 -0
- llama_cloud/types/{cloud_gcs_data_source.py → cloud_notion_page_data_source.py} +4 -6
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/cloud_slack_data_source.py +42 -0
- llama_cloud/types/configurable_data_sink_names.py +4 -0
- llama_cloud/types/configurable_data_source_names.py +12 -8
- llama_cloud/types/data_sink_component_one.py +2 -0
- llama_cloud/types/data_sink_create_component_one.py +2 -0
- llama_cloud/types/data_source_component_one.py +6 -4
- llama_cloud/types/data_source_create_component_one.py +6 -4
- llama_cloud/types/eval_dataset_job_record.py +1 -0
- llama_cloud/types/extraction_result.py +42 -0
- llama_cloud/types/extraction_result_data_value.py +5 -0
- llama_cloud/types/extraction_schema.py +43 -0
- llama_cloud/types/extraction_schema_data_schema_value.py +7 -0
- llama_cloud/types/organization.py +38 -0
- llama_cloud/types/organization_create.py +35 -0
- llama_cloud/types/pipeline_data_source_component_one.py +6 -4
- llama_cloud/types/preset_retrieval_params.py +5 -0
- llama_cloud/types/project.py +1 -1
- llama_cloud/types/retrieval_mode.py +29 -0
- llama_cloud/types/text_node.py +1 -0
- llama_cloud/types/user_organization.py +40 -0
- llama_cloud/types/user_organization_create.py +36 -0
- {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/RECORD +47 -29
- {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.7.dist-info → llama_cloud-0.0.9.dist-info}/WHEEL +0 -0
|
@@ -34,11 +34,15 @@ class ProjectsClient:
|
|
|
34
34
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
35
35
|
self._client_wrapper = client_wrapper
|
|
36
36
|
|
|
37
|
-
def list_projects(
|
|
37
|
+
def list_projects(
|
|
38
|
+
self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
|
|
39
|
+
) -> typing.List[Project]:
|
|
38
40
|
"""
|
|
39
41
|
List projects or get one by name
|
|
40
42
|
|
|
41
43
|
Parameters:
|
|
44
|
+
- organization_id: typing.Optional[str].
|
|
45
|
+
|
|
42
46
|
- project_name: typing.Optional[str].
|
|
43
47
|
---
|
|
44
48
|
from llama_cloud.client import LlamaCloud
|
|
@@ -51,7 +55,7 @@ class ProjectsClient:
|
|
|
51
55
|
_response = self._client_wrapper.httpx_client.request(
|
|
52
56
|
"GET",
|
|
53
57
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
54
|
-
params=remove_none_from_dict({"project_name": project_name}),
|
|
58
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
|
|
55
59
|
headers=self._client_wrapper.get_headers(),
|
|
56
60
|
timeout=60,
|
|
57
61
|
)
|
|
@@ -65,11 +69,13 @@ class ProjectsClient:
|
|
|
65
69
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
66
70
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
67
71
|
|
|
68
|
-
def create_project(self, *, request: ProjectCreate) -> Project:
|
|
72
|
+
def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
69
73
|
"""
|
|
70
74
|
Create a new project.
|
|
71
75
|
|
|
72
76
|
Parameters:
|
|
77
|
+
- organization_id: typing.Optional[str].
|
|
78
|
+
|
|
73
79
|
- request: ProjectCreate.
|
|
74
80
|
---
|
|
75
81
|
from llama_cloud import ProjectCreate
|
|
@@ -87,6 +93,7 @@ class ProjectsClient:
|
|
|
87
93
|
_response = self._client_wrapper.httpx_client.request(
|
|
88
94
|
"POST",
|
|
89
95
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
96
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
90
97
|
json=jsonable_encoder(request),
|
|
91
98
|
headers=self._client_wrapper.get_headers(),
|
|
92
99
|
timeout=60,
|
|
@@ -101,12 +108,14 @@ class ProjectsClient:
|
|
|
101
108
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
102
109
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
103
110
|
|
|
104
|
-
def upsert_project(self, *, request: ProjectCreate) -> Project:
|
|
111
|
+
def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
105
112
|
"""
|
|
106
113
|
Upsert a project.
|
|
107
114
|
Updates if a project with the same name already exists. Otherwise, creates a new project.
|
|
108
115
|
|
|
109
116
|
Parameters:
|
|
117
|
+
- organization_id: typing.Optional[str].
|
|
118
|
+
|
|
110
119
|
- request: ProjectCreate.
|
|
111
120
|
---
|
|
112
121
|
from llama_cloud import ProjectCreate
|
|
@@ -124,6 +133,7 @@ class ProjectsClient:
|
|
|
124
133
|
_response = self._client_wrapper.httpx_client.request(
|
|
125
134
|
"PUT",
|
|
126
135
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
136
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
127
137
|
json=jsonable_encoder(request),
|
|
128
138
|
headers=self._client_wrapper.get_headers(),
|
|
129
139
|
timeout=60,
|
|
@@ -620,11 +630,15 @@ class AsyncProjectsClient:
|
|
|
620
630
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
621
631
|
self._client_wrapper = client_wrapper
|
|
622
632
|
|
|
623
|
-
async def list_projects(
|
|
633
|
+
async def list_projects(
|
|
634
|
+
self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
|
|
635
|
+
) -> typing.List[Project]:
|
|
624
636
|
"""
|
|
625
637
|
List projects or get one by name
|
|
626
638
|
|
|
627
639
|
Parameters:
|
|
640
|
+
- organization_id: typing.Optional[str].
|
|
641
|
+
|
|
628
642
|
- project_name: typing.Optional[str].
|
|
629
643
|
---
|
|
630
644
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -637,7 +651,7 @@ class AsyncProjectsClient:
|
|
|
637
651
|
_response = await self._client_wrapper.httpx_client.request(
|
|
638
652
|
"GET",
|
|
639
653
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
640
|
-
params=remove_none_from_dict({"project_name": project_name}),
|
|
654
|
+
params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
|
|
641
655
|
headers=self._client_wrapper.get_headers(),
|
|
642
656
|
timeout=60,
|
|
643
657
|
)
|
|
@@ -651,11 +665,13 @@ class AsyncProjectsClient:
|
|
|
651
665
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
652
666
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
653
667
|
|
|
654
|
-
async def create_project(self, *, request: ProjectCreate) -> Project:
|
|
668
|
+
async def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
655
669
|
"""
|
|
656
670
|
Create a new project.
|
|
657
671
|
|
|
658
672
|
Parameters:
|
|
673
|
+
- organization_id: typing.Optional[str].
|
|
674
|
+
|
|
659
675
|
- request: ProjectCreate.
|
|
660
676
|
---
|
|
661
677
|
from llama_cloud import ProjectCreate
|
|
@@ -673,6 +689,7 @@ class AsyncProjectsClient:
|
|
|
673
689
|
_response = await self._client_wrapper.httpx_client.request(
|
|
674
690
|
"POST",
|
|
675
691
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
692
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
676
693
|
json=jsonable_encoder(request),
|
|
677
694
|
headers=self._client_wrapper.get_headers(),
|
|
678
695
|
timeout=60,
|
|
@@ -687,12 +704,14 @@ class AsyncProjectsClient:
|
|
|
687
704
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
688
705
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
689
706
|
|
|
690
|
-
async def upsert_project(self, *, request: ProjectCreate) -> Project:
|
|
707
|
+
async def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
|
|
691
708
|
"""
|
|
692
709
|
Upsert a project.
|
|
693
710
|
Updates if a project with the same name already exists. Otherwise, creates a new project.
|
|
694
711
|
|
|
695
712
|
Parameters:
|
|
713
|
+
- organization_id: typing.Optional[str].
|
|
714
|
+
|
|
696
715
|
- request: ProjectCreate.
|
|
697
716
|
---
|
|
698
717
|
from llama_cloud import ProjectCreate
|
|
@@ -710,6 +729,7 @@ class AsyncProjectsClient:
|
|
|
710
729
|
_response = await self._client_wrapper.httpx_client.request(
|
|
711
730
|
"PUT",
|
|
712
731
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
|
|
732
|
+
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
713
733
|
json=jsonable_encoder(request),
|
|
714
734
|
headers=self._client_wrapper.get_headers(),
|
|
715
735
|
timeout=60,
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -5,18 +5,21 @@ from .base import Base
|
|
|
5
5
|
from .base_prompt_template import BasePromptTemplate
|
|
6
6
|
from .bedrock_embedding import BedrockEmbedding
|
|
7
7
|
from .chat_message import ChatMessage
|
|
8
|
+
from .chat_params import ChatParams
|
|
8
9
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
10
|
+
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
9
11
|
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
10
12
|
from .cloud_document import CloudDocument
|
|
11
13
|
from .cloud_document_create import CloudDocumentCreate
|
|
12
|
-
from .
|
|
13
|
-
from .
|
|
14
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
15
|
+
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
14
16
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
15
17
|
from .cloud_pinecone_vector_store import CloudPineconeVectorStore
|
|
16
18
|
from .cloud_postgres_vector_store import CloudPostgresVectorStore
|
|
17
19
|
from .cloud_qdrant_vector_store import CloudQdrantVectorStore
|
|
18
20
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
19
21
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
22
|
+
from .cloud_slack_data_source import CloudSlackDataSource
|
|
20
23
|
from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
|
|
21
24
|
from .code_splitter import CodeSplitter
|
|
22
25
|
from .cohere_embedding import CohereEmbedding
|
|
@@ -52,6 +55,10 @@ from .eval_llm_model_data import EvalLlmModelData
|
|
|
52
55
|
from .eval_question import EvalQuestion
|
|
53
56
|
from .eval_question_create import EvalQuestionCreate
|
|
54
57
|
from .eval_question_result import EvalQuestionResult
|
|
58
|
+
from .extraction_result import ExtractionResult
|
|
59
|
+
from .extraction_result_data_value import ExtractionResultDataValue
|
|
60
|
+
from .extraction_schema import ExtractionSchema
|
|
61
|
+
from .extraction_schema_data_schema_value import ExtractionSchemaDataSchemaValue
|
|
55
62
|
from .file import File
|
|
56
63
|
from .file_resource_info_value import FileResourceInfoValue
|
|
57
64
|
from .filter_condition import FilterCondition
|
|
@@ -81,6 +88,8 @@ from .metric_result import MetricResult
|
|
|
81
88
|
from .node_parser import NodeParser
|
|
82
89
|
from .object_type import ObjectType
|
|
83
90
|
from .open_ai_embedding import OpenAiEmbedding
|
|
91
|
+
from .organization import Organization
|
|
92
|
+
from .organization_create import OrganizationCreate
|
|
84
93
|
from .parser_languages import ParserLanguages
|
|
85
94
|
from .parsing_history_item import ParsingHistoryItem
|
|
86
95
|
from .parsing_job import ParsingJob
|
|
@@ -111,6 +120,7 @@ from .prompt_mixin_prompts import PromptMixinPrompts
|
|
|
111
120
|
from .prompt_spec import PromptSpec
|
|
112
121
|
from .pydantic_program_mode import PydanticProgramMode
|
|
113
122
|
from .related_node_info import RelatedNodeInfo
|
|
123
|
+
from .retrieval_mode import RetrievalMode
|
|
114
124
|
from .retrieve_results import RetrieveResults
|
|
115
125
|
from .sentence_splitter import SentenceSplitter
|
|
116
126
|
from .simple_file_node_parser import SimpleFileNodeParser
|
|
@@ -122,6 +132,8 @@ from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
|
122
132
|
from .text_node_with_score import TextNodeWithScore
|
|
123
133
|
from .token_text_splitter import TokenTextSplitter
|
|
124
134
|
from .transformation_category_names import TransformationCategoryNames
|
|
135
|
+
from .user_organization import UserOrganization
|
|
136
|
+
from .user_organization_create import UserOrganizationCreate
|
|
125
137
|
from .validation_error import ValidationError
|
|
126
138
|
from .validation_error_loc_item import ValidationErrorLocItem
|
|
127
139
|
|
|
@@ -131,18 +143,21 @@ __all__ = [
|
|
|
131
143
|
"BasePromptTemplate",
|
|
132
144
|
"BedrockEmbedding",
|
|
133
145
|
"ChatMessage",
|
|
146
|
+
"ChatParams",
|
|
134
147
|
"CloudAzStorageBlobDataSource",
|
|
148
|
+
"CloudAzureAiSearchVectorStore",
|
|
135
149
|
"CloudChromaVectorStore",
|
|
136
150
|
"CloudDocument",
|
|
137
151
|
"CloudDocumentCreate",
|
|
138
|
-
"
|
|
139
|
-
"
|
|
152
|
+
"CloudJiraDataSource",
|
|
153
|
+
"CloudNotionPageDataSource",
|
|
140
154
|
"CloudOneDriveDataSource",
|
|
141
155
|
"CloudPineconeVectorStore",
|
|
142
156
|
"CloudPostgresVectorStore",
|
|
143
157
|
"CloudQdrantVectorStore",
|
|
144
158
|
"CloudS3DataSource",
|
|
145
159
|
"CloudSharepointDataSource",
|
|
160
|
+
"CloudSlackDataSource",
|
|
146
161
|
"CloudWeaviateVectorStore",
|
|
147
162
|
"CodeSplitter",
|
|
148
163
|
"CohereEmbedding",
|
|
@@ -178,6 +193,10 @@ __all__ = [
|
|
|
178
193
|
"EvalQuestion",
|
|
179
194
|
"EvalQuestionCreate",
|
|
180
195
|
"EvalQuestionResult",
|
|
196
|
+
"ExtractionResult",
|
|
197
|
+
"ExtractionResultDataValue",
|
|
198
|
+
"ExtractionSchema",
|
|
199
|
+
"ExtractionSchemaDataSchemaValue",
|
|
181
200
|
"File",
|
|
182
201
|
"FileResourceInfoValue",
|
|
183
202
|
"FilterCondition",
|
|
@@ -207,6 +226,8 @@ __all__ = [
|
|
|
207
226
|
"NodeParser",
|
|
208
227
|
"ObjectType",
|
|
209
228
|
"OpenAiEmbedding",
|
|
229
|
+
"Organization",
|
|
230
|
+
"OrganizationCreate",
|
|
210
231
|
"ParserLanguages",
|
|
211
232
|
"ParsingHistoryItem",
|
|
212
233
|
"ParsingJob",
|
|
@@ -237,6 +258,7 @@ __all__ = [
|
|
|
237
258
|
"PromptSpec",
|
|
238
259
|
"PydanticProgramMode",
|
|
239
260
|
"RelatedNodeInfo",
|
|
261
|
+
"RetrievalMode",
|
|
240
262
|
"RetrieveResults",
|
|
241
263
|
"SentenceSplitter",
|
|
242
264
|
"SimpleFileNodeParser",
|
|
@@ -248,6 +270,8 @@ __all__ = [
|
|
|
248
270
|
"TextNodeWithScore",
|
|
249
271
|
"TokenTextSplitter",
|
|
250
272
|
"TransformationCategoryNames",
|
|
273
|
+
"UserOrganization",
|
|
274
|
+
"UserOrganizationCreate",
|
|
251
275
|
"ValidationError",
|
|
252
276
|
"ValidationErrorLocItem",
|
|
253
277
|
]
|
|
@@ -62,6 +62,9 @@ class AzureOpenAiEmbedding(pydantic.BaseModel):
|
|
|
62
62
|
)
|
|
63
63
|
azure_endpoint: typing.Optional[str] = pydantic.Field(description="The Azure endpoint to use.")
|
|
64
64
|
azure_deployment: typing.Optional[str] = pydantic.Field(description="The Azure deployment to use.")
|
|
65
|
+
use_azure_ad: bool = pydantic.Field(
|
|
66
|
+
description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
|
|
67
|
+
)
|
|
65
68
|
class_name: typing.Optional[str]
|
|
66
69
|
|
|
67
70
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -4,6 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .chat_message import ChatMessage
|
|
7
8
|
|
|
8
9
|
try:
|
|
9
10
|
import pydantic
|
|
@@ -14,15 +15,13 @@ except ImportError:
|
|
|
14
15
|
import pydantic # type: ignore
|
|
15
16
|
|
|
16
17
|
|
|
17
|
-
class
|
|
18
|
+
class ChatParams(pydantic.BaseModel):
|
|
18
19
|
"""
|
|
19
|
-
Base
|
|
20
|
+
Base schema model for BaseComponent classes used in the platform.
|
|
21
|
+
Comes with special serialization logic for types used commonly in platform codebase.
|
|
20
22
|
"""
|
|
21
23
|
|
|
22
|
-
|
|
23
|
-
service_account_key: typing.Dict[str, typing.Any] = pydantic.Field(
|
|
24
|
-
description="The service account key JSON to use for authentication."
|
|
25
|
-
)
|
|
24
|
+
messages: typing.List[ChatMessage]
|
|
26
25
|
class_name: typing.Optional[str]
|
|
27
26
|
|
|
28
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Cloud Azure AI Search Vector Store.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
supports_nested_metadata_filters: typing.Optional[bool]
|
|
23
|
+
search_service_api_key: str
|
|
24
|
+
search_service_endpoint: str
|
|
25
|
+
search_service_api_version: typing.Optional[str]
|
|
26
|
+
index_name: typing.Optional[str]
|
|
27
|
+
filterable_metadata_field_keys: typing.Optional[typing.List[str]]
|
|
28
|
+
embedding_dimension: typing.Optional[int]
|
|
29
|
+
class_name: typing.Optional[str]
|
|
30
|
+
|
|
31
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().json(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
+
return super().dict(**kwargs_with_defaults)
|
|
38
|
+
|
|
39
|
+
class Config:
|
|
40
|
+
frozen = True
|
|
41
|
+
smart_union = True
|
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CloudJiraDataSource(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Cloud Jira Data Source integrating JiraReader.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
email: typing.Optional[str] = pydantic.Field(description="The email address to use for authentication.")
|
|
23
|
+
api_token: typing.Optional[str] = pydantic.Field(
|
|
24
|
+
description="The API/ Access Token used for Basic, PAT and OAuth2 authentication."
|
|
25
|
+
)
|
|
26
|
+
server_url: typing.Optional[str] = pydantic.Field(description="The server url for Jira Cloud.")
|
|
27
|
+
cloud_id: typing.Optional[str] = pydantic.Field(description="The cloud ID, used in case of OAuth2.")
|
|
28
|
+
authentication_mechanism: str = pydantic.Field(description="Type of Authentication for connecting to Jira APIs.")
|
|
29
|
+
query: str = pydantic.Field(description="JQL (Jira Query Language) query to search.")
|
|
30
|
+
class_name: typing.Optional[str]
|
|
31
|
+
|
|
32
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().json(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().dict(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
class Config:
|
|
41
|
+
frozen = True
|
|
42
|
+
smart_union = True
|
|
43
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -14,16 +14,14 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
17
|
+
class CloudNotionPageDataSource(pydantic.BaseModel):
|
|
18
18
|
"""
|
|
19
19
|
Base component object to capture class names.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
description="The service account key JSON to use for authentication."
|
|
26
|
-
)
|
|
22
|
+
integration_token: str = pydantic.Field(description="The integration token to use for authentication.")
|
|
23
|
+
database_ids: typing.Optional[str] = pydantic.Field(description="The Notion Database Id to read content from.")
|
|
24
|
+
page_ids: typing.Optional[str] = pydantic.Field(description="The Page ID's of the Notion to read from.")
|
|
27
25
|
class_name: typing.Optional[str]
|
|
28
26
|
|
|
29
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -22,6 +22,7 @@ class CloudSharepointDataSource(pydantic.BaseModel):
|
|
|
22
22
|
site_name: str = pydantic.Field(description="The name of the SharePoint site to download from.")
|
|
23
23
|
folder_path: typing.Optional[str] = pydantic.Field(description="The path of the Sharepoint folder to read from.")
|
|
24
24
|
folder_id: typing.Optional[str] = pydantic.Field(description="The ID of the Sharepoint folder to read from.")
|
|
25
|
+
drive_name: typing.Optional[str] = pydantic.Field(description="The name of the Sharepoint drive to read from.")
|
|
25
26
|
client_id: str = pydantic.Field(description="The client ID to use for authentication.")
|
|
26
27
|
client_secret: str = pydantic.Field(description="The client secret to use for authentication.")
|
|
27
28
|
tenant_id: str = pydantic.Field(description="The tenant ID to use for authentication.")
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CloudSlackDataSource(pydantic.BaseModel):
|
|
18
|
+
"""
|
|
19
|
+
Base component object to capture class names.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
slack_token: str = pydantic.Field(description="Slack Bot Token.")
|
|
23
|
+
channel_ids: typing.Optional[str] = pydantic.Field(description="Slack Channel.")
|
|
24
|
+
latest_date: typing.Optional[str] = pydantic.Field(description="Latest date.")
|
|
25
|
+
earliest_date: typing.Optional[str] = pydantic.Field(description="Earliest date.")
|
|
26
|
+
earliest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Earliest date timestamp.")
|
|
27
|
+
latest_date_timestamp: typing.Optional[float] = pydantic.Field(description="Latest date timestamp.")
|
|
28
|
+
channel_patterns: typing.Optional[str] = pydantic.Field(description="Slack Channel name pattern.")
|
|
29
|
+
class_name: typing.Optional[str]
|
|
30
|
+
|
|
31
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().json(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
+
return super().dict(**kwargs_with_defaults)
|
|
38
|
+
|
|
39
|
+
class Config:
|
|
40
|
+
frozen = True
|
|
41
|
+
smart_union = True
|
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -16,6 +16,7 @@ class ConfigurableDataSinkNames(str, enum.Enum):
|
|
|
16
16
|
POSTGRES = "POSTGRES"
|
|
17
17
|
QDRANT = "QDRANT"
|
|
18
18
|
WEAVIATE = "WEAVIATE"
|
|
19
|
+
AZUREAI_SEARCH = "AZUREAI_SEARCH"
|
|
19
20
|
|
|
20
21
|
def visit(
|
|
21
22
|
self,
|
|
@@ -24,6 +25,7 @@ class ConfigurableDataSinkNames(str, enum.Enum):
|
|
|
24
25
|
postgres: typing.Callable[[], T_Result],
|
|
25
26
|
qdrant: typing.Callable[[], T_Result],
|
|
26
27
|
weaviate: typing.Callable[[], T_Result],
|
|
28
|
+
azureai_search: typing.Callable[[], T_Result],
|
|
27
29
|
) -> T_Result:
|
|
28
30
|
if self is ConfigurableDataSinkNames.CHROMA:
|
|
29
31
|
return chroma()
|
|
@@ -35,3 +37,5 @@ class ConfigurableDataSinkNames(str, enum.Enum):
|
|
|
35
37
|
return qdrant()
|
|
36
38
|
if self is ConfigurableDataSinkNames.WEAVIATE:
|
|
37
39
|
return weaviate()
|
|
40
|
+
if self is ConfigurableDataSinkNames.AZUREAI_SEARCH:
|
|
41
|
+
return azureai_search()
|
|
@@ -13,29 +13,33 @@ class ConfigurableDataSourceNames(str, enum.Enum):
|
|
|
13
13
|
|
|
14
14
|
S_3 = "S3"
|
|
15
15
|
AZURE_STORAGE_BLOB = "AZURE_STORAGE_BLOB"
|
|
16
|
-
GCS = "GCS"
|
|
17
|
-
GOOGLE_DRIVE = "GOOGLE_DRIVE"
|
|
18
16
|
MICROSOFT_ONEDRIVE = "MICROSOFT_ONEDRIVE"
|
|
19
17
|
MICROSOFT_SHAREPOINT = "MICROSOFT_SHAREPOINT"
|
|
18
|
+
SLACK = "SLACK"
|
|
19
|
+
NOTION_PAGE = "NOTION_PAGE"
|
|
20
|
+
JIRA = "JIRA"
|
|
20
21
|
|
|
21
22
|
def visit(
|
|
22
23
|
self,
|
|
23
24
|
s_3: typing.Callable[[], T_Result],
|
|
24
25
|
azure_storage_blob: typing.Callable[[], T_Result],
|
|
25
|
-
gcs: typing.Callable[[], T_Result],
|
|
26
|
-
google_drive: typing.Callable[[], T_Result],
|
|
27
26
|
microsoft_onedrive: typing.Callable[[], T_Result],
|
|
28
27
|
microsoft_sharepoint: typing.Callable[[], T_Result],
|
|
28
|
+
slack: typing.Callable[[], T_Result],
|
|
29
|
+
notion_page: typing.Callable[[], T_Result],
|
|
30
|
+
jira: typing.Callable[[], T_Result],
|
|
29
31
|
) -> T_Result:
|
|
30
32
|
if self is ConfigurableDataSourceNames.S_3:
|
|
31
33
|
return s_3()
|
|
32
34
|
if self is ConfigurableDataSourceNames.AZURE_STORAGE_BLOB:
|
|
33
35
|
return azure_storage_blob()
|
|
34
|
-
if self is ConfigurableDataSourceNames.GCS:
|
|
35
|
-
return gcs()
|
|
36
|
-
if self is ConfigurableDataSourceNames.GOOGLE_DRIVE:
|
|
37
|
-
return google_drive()
|
|
38
36
|
if self is ConfigurableDataSourceNames.MICROSOFT_ONEDRIVE:
|
|
39
37
|
return microsoft_onedrive()
|
|
40
38
|
if self is ConfigurableDataSourceNames.MICROSOFT_SHAREPOINT:
|
|
41
39
|
return microsoft_sharepoint()
|
|
40
|
+
if self is ConfigurableDataSourceNames.SLACK:
|
|
41
|
+
return slack()
|
|
42
|
+
if self is ConfigurableDataSourceNames.NOTION_PAGE:
|
|
43
|
+
return notion_page()
|
|
44
|
+
if self is ConfigurableDataSourceNames.JIRA:
|
|
45
|
+
return jira()
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
|
+
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
5
6
|
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
6
7
|
from .cloud_pinecone_vector_store import CloudPineconeVectorStore
|
|
7
8
|
from .cloud_postgres_vector_store import CloudPostgresVectorStore
|
|
@@ -14,4 +15,5 @@ DataSinkComponentOne = typing.Union[
|
|
|
14
15
|
CloudPostgresVectorStore,
|
|
15
16
|
CloudQdrantVectorStore,
|
|
16
17
|
CloudWeaviateVectorStore,
|
|
18
|
+
CloudAzureAiSearchVectorStore,
|
|
17
19
|
]
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
|
+
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
5
6
|
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
6
7
|
from .cloud_pinecone_vector_store import CloudPineconeVectorStore
|
|
7
8
|
from .cloud_postgres_vector_store import CloudPostgresVectorStore
|
|
@@ -14,4 +15,5 @@ DataSinkCreateComponentOne = typing.Union[
|
|
|
14
15
|
CloudPostgresVectorStore,
|
|
15
16
|
CloudQdrantVectorStore,
|
|
16
17
|
CloudWeaviateVectorStore,
|
|
18
|
+
CloudAzureAiSearchVectorStore,
|
|
17
19
|
]
|
|
@@ -3,17 +3,19 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
-
from .
|
|
7
|
-
from .
|
|
6
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
7
|
+
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
8
8
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
9
9
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
10
10
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
11
|
+
from .cloud_slack_data_source import CloudSlackDataSource
|
|
11
12
|
|
|
12
13
|
DataSourceComponentOne = typing.Union[
|
|
13
14
|
CloudS3DataSource,
|
|
14
15
|
CloudAzStorageBlobDataSource,
|
|
15
|
-
CloudGcsDataSource,
|
|
16
|
-
CloudGoogleDriveDataSource,
|
|
17
16
|
CloudOneDriveDataSource,
|
|
18
17
|
CloudSharepointDataSource,
|
|
18
|
+
CloudSlackDataSource,
|
|
19
|
+
CloudNotionPageDataSource,
|
|
20
|
+
CloudJiraDataSource,
|
|
19
21
|
]
|
|
@@ -3,17 +3,19 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
-
from .
|
|
7
|
-
from .
|
|
6
|
+
from .cloud_jira_data_source import CloudJiraDataSource
|
|
7
|
+
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
8
8
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
9
9
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
10
10
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
11
|
+
from .cloud_slack_data_source import CloudSlackDataSource
|
|
11
12
|
|
|
12
13
|
DataSourceCreateComponentOne = typing.Union[
|
|
13
14
|
CloudS3DataSource,
|
|
14
15
|
CloudAzStorageBlobDataSource,
|
|
15
|
-
CloudGcsDataSource,
|
|
16
|
-
CloudGoogleDriveDataSource,
|
|
17
16
|
CloudOneDriveDataSource,
|
|
18
17
|
CloudSharepointDataSource,
|
|
18
|
+
CloudSlackDataSource,
|
|
19
|
+
CloudNotionPageDataSource,
|
|
20
|
+
CloudJiraDataSource,
|
|
19
21
|
]
|
|
@@ -38,6 +38,7 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
|
|
|
38
38
|
description="The correlation ID for this job. Used for tracking the job across services."
|
|
39
39
|
)
|
|
40
40
|
parent_job_execution_id: typing.Optional[str] = pydantic.Field(description="The ID of the parent job execution.")
|
|
41
|
+
user_id: typing.Optional[str] = pydantic.Field(description="The ID of the user that created this job")
|
|
41
42
|
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
42
43
|
id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
|
|
43
44
|
status: StatusEnum
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .extraction_result_data_value import ExtractionResultDataValue
|
|
8
|
+
from .file import File
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ExtractionResult(pydantic.BaseModel):
|
|
20
|
+
"""
|
|
21
|
+
Schema for an extraction result.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
25
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
26
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
27
|
+
schema_id: str = pydantic.Field(description="The id of the schema")
|
|
28
|
+
data: typing.Dict[str, ExtractionResultDataValue] = pydantic.Field(description="The data extracted from the file")
|
|
29
|
+
file: File = pydantic.Field(description="The file that the extract was extracted from")
|
|
30
|
+
|
|
31
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().json(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
+
return super().dict(**kwargs_with_defaults)
|
|
38
|
+
|
|
39
|
+
class Config:
|
|
40
|
+
frozen = True
|
|
41
|
+
smart_union = True
|
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|