athena-intelligence 0.1.122__py3-none-any.whl → 0.1.124__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- athena/__init__.py +26 -2
- athena/core/client_wrapper.py +1 -1
- athena/tools/client.py +125 -0
- athena/tools/structured_data_extractor/client.py +149 -29
- athena/types/__init__.py +24 -2
- athena/types/asset_node.py +42 -0
- athena/types/chunk.py +36 -0
- athena/types/chunk_content_item.py +58 -0
- athena/types/chunk_result.py +35 -0
- athena/types/chunk_result_chunk_id.py +5 -0
- athena/types/{structured_data_extractor_reponse.py → folder_response.py} +5 -6
- athena/types/image_url_content.py +33 -0
- athena/types/prompt_message.py +35 -0
- athena/types/structured_data_extractor_response.py +42 -0
- athena/types/text_content.py +33 -0
- athena/types/type.py +25 -0
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.124.dist-info}/METADATA +1 -1
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.124.dist-info}/RECORD +19 -9
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.124.dist-info}/WHEEL +0 -0
athena/__init__.py
CHANGED
@@ -1,7 +1,14 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
3
|
from .types import (
|
4
|
+
AssetNode,
|
4
5
|
AssetNotFoundError,
|
6
|
+
Chunk,
|
7
|
+
ChunkContentItem,
|
8
|
+
ChunkContentItem_ImageUrl,
|
9
|
+
ChunkContentItem_Text,
|
10
|
+
ChunkResult,
|
11
|
+
ChunkResultChunkId,
|
5
12
|
CustomAgentResponse,
|
6
13
|
DataFrameRequestOut,
|
7
14
|
DataFrameRequestOutColumnsItem,
|
@@ -12,16 +19,21 @@ from .types import (
|
|
12
19
|
DriveAgentResponse,
|
13
20
|
FileChunkRequestOut,
|
14
21
|
FileTooLargeError,
|
22
|
+
FolderResponse,
|
15
23
|
GeneralAgentConfig,
|
16
24
|
GeneralAgentConfigEnabledToolsItem,
|
17
25
|
GeneralAgentRequest,
|
18
26
|
GeneralAgentResponse,
|
27
|
+
ImageUrlContent,
|
19
28
|
ParentFolderError,
|
29
|
+
PromptMessage,
|
20
30
|
ResearchAgentResponse,
|
21
31
|
SaveAssetRequestOut,
|
22
32
|
SqlAgentResponse,
|
23
|
-
|
33
|
+
StructuredDataExtractorResponse,
|
34
|
+
TextContent,
|
24
35
|
Tool,
|
36
|
+
Type,
|
25
37
|
)
|
26
38
|
from .errors import (
|
27
39
|
BadRequestError,
|
@@ -39,9 +51,16 @@ from .tools import ToolsDataFrameRequestColumnsItem
|
|
39
51
|
from .version import __version__
|
40
52
|
|
41
53
|
__all__ = [
|
54
|
+
"AssetNode",
|
42
55
|
"AssetNotFoundError",
|
43
56
|
"AthenaEnvironment",
|
44
57
|
"BadRequestError",
|
58
|
+
"Chunk",
|
59
|
+
"ChunkContentItem",
|
60
|
+
"ChunkContentItem_ImageUrl",
|
61
|
+
"ChunkContentItem_Text",
|
62
|
+
"ChunkResult",
|
63
|
+
"ChunkResultChunkId",
|
45
64
|
"ContentTooLargeError",
|
46
65
|
"CustomAgentResponse",
|
47
66
|
"DataFrameRequestOut",
|
@@ -53,20 +72,25 @@ __all__ = [
|
|
53
72
|
"DriveAgentResponse",
|
54
73
|
"FileChunkRequestOut",
|
55
74
|
"FileTooLargeError",
|
75
|
+
"FolderResponse",
|
56
76
|
"GeneralAgentConfig",
|
57
77
|
"GeneralAgentConfigEnabledToolsItem",
|
58
78
|
"GeneralAgentRequest",
|
59
79
|
"GeneralAgentResponse",
|
80
|
+
"ImageUrlContent",
|
60
81
|
"InternalServerError",
|
61
82
|
"NotFoundError",
|
62
83
|
"ParentFolderError",
|
84
|
+
"PromptMessage",
|
63
85
|
"QueryExecuteRequestDatabaseAssetIds",
|
64
86
|
"ResearchAgentResponse",
|
65
87
|
"SaveAssetRequestOut",
|
66
88
|
"SqlAgentResponse",
|
67
|
-
"
|
89
|
+
"StructuredDataExtractorResponse",
|
90
|
+
"TextContent",
|
68
91
|
"Tool",
|
69
92
|
"ToolsDataFrameRequestColumnsItem",
|
93
|
+
"Type",
|
70
94
|
"UnauthorizedError",
|
71
95
|
"UnprocessableEntityError",
|
72
96
|
"UnsupportedMediaTypeError",
|
athena/core/client_wrapper.py
CHANGED
@@ -17,7 +17,7 @@ class BaseClientWrapper:
|
|
17
17
|
headers: typing.Dict[str, str] = {
|
18
18
|
"X-Fern-Language": "Python",
|
19
19
|
"X-Fern-SDK-Name": "athena-intelligence",
|
20
|
-
"X-Fern-SDK-Version": "0.1.
|
20
|
+
"X-Fern-SDK-Version": "0.1.124",
|
21
21
|
}
|
22
22
|
headers["X-API-KEY"] = self.api_key
|
23
23
|
return headers
|
athena/tools/client.py
CHANGED
@@ -20,6 +20,7 @@ from ..types.data_frame_request_out import DataFrameRequestOut
|
|
20
20
|
from ..types.data_frame_unknown_format_error import DataFrameUnknownFormatError
|
21
21
|
from ..types.file_chunk_request_out import FileChunkRequestOut
|
22
22
|
from ..types.file_too_large_error import FileTooLargeError
|
23
|
+
from ..types.folder_response import FolderResponse
|
23
24
|
from ..types.parent_folder_error import ParentFolderError
|
24
25
|
from ..types.save_asset_request_out import SaveAssetRequestOut
|
25
26
|
from .calendar.client import AsyncCalendarClient, CalendarClient
|
@@ -94,6 +95,68 @@ class ToolsClient:
|
|
94
95
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
95
96
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
96
97
|
|
98
|
+
def list_contents(
|
99
|
+
self,
|
100
|
+
*,
|
101
|
+
folder_id: typing.Optional[str] = None,
|
102
|
+
include_asset_details: typing.Optional[bool] = None,
|
103
|
+
include_system_files: typing.Optional[bool] = None,
|
104
|
+
request_options: typing.Optional[RequestOptions] = None
|
105
|
+
) -> FolderResponse:
|
106
|
+
"""
|
107
|
+
List contents of a folder or entire workspace in a tree structure.
|
108
|
+
|
109
|
+
Parameters
|
110
|
+
----------
|
111
|
+
folder_id : typing.Optional[str]
|
112
|
+
|
113
|
+
include_asset_details : typing.Optional[bool]
|
114
|
+
|
115
|
+
include_system_files : typing.Optional[bool]
|
116
|
+
|
117
|
+
request_options : typing.Optional[RequestOptions]
|
118
|
+
Request-specific configuration.
|
119
|
+
|
120
|
+
Returns
|
121
|
+
-------
|
122
|
+
FolderResponse
|
123
|
+
Successful Response
|
124
|
+
|
125
|
+
Examples
|
126
|
+
--------
|
127
|
+
from athena.client import Athena
|
128
|
+
|
129
|
+
client = Athena(
|
130
|
+
api_key="YOUR_API_KEY",
|
131
|
+
)
|
132
|
+
client.tools.list_contents()
|
133
|
+
"""
|
134
|
+
_response = self._client_wrapper.httpx_client.request(
|
135
|
+
"api/v0/tools/contents",
|
136
|
+
method="GET",
|
137
|
+
params={
|
138
|
+
"folder_id": folder_id,
|
139
|
+
"include_asset_details": include_asset_details,
|
140
|
+
"include_system_files": include_system_files,
|
141
|
+
},
|
142
|
+
request_options=request_options,
|
143
|
+
)
|
144
|
+
if 200 <= _response.status_code < 300:
|
145
|
+
return pydantic_v1.parse_obj_as(FolderResponse, _response.json()) # type: ignore
|
146
|
+
if _response.status_code == 400:
|
147
|
+
raise BadRequestError(pydantic_v1.parse_obj_as(ParentFolderError, _response.json())) # type: ignore
|
148
|
+
if _response.status_code == 401:
|
149
|
+
raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
150
|
+
if _response.status_code == 404:
|
151
|
+
raise NotFoundError(pydantic_v1.parse_obj_as(AssetNotFoundError, _response.json())) # type: ignore
|
152
|
+
if _response.status_code == 422:
|
153
|
+
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
154
|
+
try:
|
155
|
+
_response_json = _response.json()
|
156
|
+
except JSONDecodeError:
|
157
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
158
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
159
|
+
|
97
160
|
def data_frame(
|
98
161
|
self,
|
99
162
|
*,
|
@@ -347,6 +410,68 @@ class AsyncToolsClient:
|
|
347
410
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
348
411
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
349
412
|
|
413
|
+
async def list_contents(
|
414
|
+
self,
|
415
|
+
*,
|
416
|
+
folder_id: typing.Optional[str] = None,
|
417
|
+
include_asset_details: typing.Optional[bool] = None,
|
418
|
+
include_system_files: typing.Optional[bool] = None,
|
419
|
+
request_options: typing.Optional[RequestOptions] = None
|
420
|
+
) -> FolderResponse:
|
421
|
+
"""
|
422
|
+
List contents of a folder or entire workspace in a tree structure.
|
423
|
+
|
424
|
+
Parameters
|
425
|
+
----------
|
426
|
+
folder_id : typing.Optional[str]
|
427
|
+
|
428
|
+
include_asset_details : typing.Optional[bool]
|
429
|
+
|
430
|
+
include_system_files : typing.Optional[bool]
|
431
|
+
|
432
|
+
request_options : typing.Optional[RequestOptions]
|
433
|
+
Request-specific configuration.
|
434
|
+
|
435
|
+
Returns
|
436
|
+
-------
|
437
|
+
FolderResponse
|
438
|
+
Successful Response
|
439
|
+
|
440
|
+
Examples
|
441
|
+
--------
|
442
|
+
from athena.client import AsyncAthena
|
443
|
+
|
444
|
+
client = AsyncAthena(
|
445
|
+
api_key="YOUR_API_KEY",
|
446
|
+
)
|
447
|
+
await client.tools.list_contents()
|
448
|
+
"""
|
449
|
+
_response = await self._client_wrapper.httpx_client.request(
|
450
|
+
"api/v0/tools/contents",
|
451
|
+
method="GET",
|
452
|
+
params={
|
453
|
+
"folder_id": folder_id,
|
454
|
+
"include_asset_details": include_asset_details,
|
455
|
+
"include_system_files": include_system_files,
|
456
|
+
},
|
457
|
+
request_options=request_options,
|
458
|
+
)
|
459
|
+
if 200 <= _response.status_code < 300:
|
460
|
+
return pydantic_v1.parse_obj_as(FolderResponse, _response.json()) # type: ignore
|
461
|
+
if _response.status_code == 400:
|
462
|
+
raise BadRequestError(pydantic_v1.parse_obj_as(ParentFolderError, _response.json())) # type: ignore
|
463
|
+
if _response.status_code == 401:
|
464
|
+
raise UnauthorizedError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
465
|
+
if _response.status_code == 404:
|
466
|
+
raise NotFoundError(pydantic_v1.parse_obj_as(AssetNotFoundError, _response.json())) # type: ignore
|
467
|
+
if _response.status_code == 422:
|
468
|
+
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
469
|
+
try:
|
470
|
+
_response_json = _response.json()
|
471
|
+
except JSONDecodeError:
|
472
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
473
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
474
|
+
|
350
475
|
async def data_frame(
|
351
476
|
self,
|
352
477
|
*,
|
@@ -8,7 +8,9 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
8
|
from ...core.pydantic_utilities import pydantic_v1
|
9
9
|
from ...core.request_options import RequestOptions
|
10
10
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
11
|
-
from ...types.
|
11
|
+
from ...types.chunk import Chunk
|
12
|
+
from ...types.prompt_message import PromptMessage
|
13
|
+
from ...types.structured_data_extractor_response import StructuredDataExtractorResponse
|
12
14
|
|
13
15
|
# this is used as the default value for optional parameters
|
14
16
|
OMIT = typing.cast(typing.Any, ...)
|
@@ -21,58 +23,117 @@ class StructuredDataExtractorClient:
|
|
21
23
|
def invoke(
|
22
24
|
self,
|
23
25
|
*,
|
24
|
-
|
26
|
+
chunks: typing.Sequence[Chunk],
|
25
27
|
json_schema: typing.Dict[str, typing.Any],
|
26
|
-
|
28
|
+
chunk_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
27
29
|
reduce: typing.Optional[bool] = OMIT,
|
30
|
+
reduce_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
28
31
|
request_options: typing.Optional[RequestOptions] = None
|
29
|
-
) ->
|
32
|
+
) -> StructuredDataExtractorResponse:
|
30
33
|
"""
|
31
|
-
|
34
|
+
Extract structured data.
|
35
|
+
|
36
|
+
tl;dr:
|
37
|
+
|
38
|
+
- pass a valid JSON schema in `json_schema`
|
39
|
+
- pass the page chunks as a list of `Chunk` objects, by default: `{"type": "text", "content": "..."}`
|
40
|
+
- leave all other fields as default
|
41
|
+
|
42
|
+
Detailed configuration (only relevant for complex use cases):
|
43
|
+
|
44
|
+
The structured data extractor's architecture follows the map-reduce pattern,
|
45
|
+
where the asset is divided into chunks, the schema is extracted from each chunk,
|
46
|
+
and the chunks are then reduced to a single structured data object.
|
47
|
+
|
48
|
+
In some applications, you may not want to:
|
49
|
+
|
50
|
+
- map (if your input asset is small enough)
|
51
|
+
- reduce (if your output object is large enough that it will overflow the output length;
|
52
|
+
if you're extracting a long list of entities; if youre )
|
53
|
+
to extract all instances of the schema).
|
54
|
+
|
55
|
+
You can configure these behaviors with the `map` and `reduce` fields.
|
32
56
|
|
33
57
|
Parameters
|
34
58
|
----------
|
35
|
-
|
36
|
-
The
|
59
|
+
chunks : typing.Sequence[Chunk]
|
60
|
+
The chunks from which to extract structured data.
|
37
61
|
|
38
62
|
json_schema : typing.Dict[str, typing.Any]
|
39
|
-
The JSON schema to use for validation (version draft 2020-12).
|
63
|
+
The JSON schema to use for validation (version draft 2020-12). See the docs [here](https://json-schema.org/learn/getting-started-step-by-step).
|
40
64
|
|
41
|
-
|
42
|
-
|
65
|
+
chunk_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
66
|
+
The prompt to use for the data extraction over *each individual chunk*. It must be a list of messages. The chunk content will be appended as a list of human messages.
|
43
67
|
|
44
68
|
reduce : typing.Optional[bool]
|
45
69
|
If `map`, whether to reduce the chunks to a single structured object (true) or return the full list (false). Use True unless you want to preserve duplicates from each page or expect the object to overflow the output context.
|
46
70
|
|
71
|
+
reduce_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
72
|
+
The prompt to use for the reduce steps. It must be a list of messages. The two extraction attempts will be appended as a list of human messages.
|
73
|
+
|
47
74
|
request_options : typing.Optional[RequestOptions]
|
48
75
|
Request-specific configuration.
|
49
76
|
|
50
77
|
Returns
|
51
78
|
-------
|
52
|
-
|
79
|
+
StructuredDataExtractorResponse
|
53
80
|
Successful Response
|
54
81
|
|
55
82
|
Examples
|
56
83
|
--------
|
84
|
+
from athena import Chunk, ChunkContentItem_Text
|
57
85
|
from athena.client import Athena
|
58
86
|
|
59
87
|
client = Athena(
|
60
88
|
api_key="YOUR_API_KEY",
|
61
89
|
)
|
62
90
|
client.tools.structured_data_extractor.invoke(
|
63
|
-
|
64
|
-
|
91
|
+
chunks=[
|
92
|
+
Chunk(
|
93
|
+
chunk_id="1",
|
94
|
+
content=[
|
95
|
+
ChunkContentItem_Text(
|
96
|
+
text="John Smith is a 35 year old developer. You can reach him at john.smith@example.com",
|
97
|
+
)
|
98
|
+
],
|
99
|
+
),
|
100
|
+
Chunk(
|
101
|
+
chunk_id="2",
|
102
|
+
content=[
|
103
|
+
ChunkContentItem_Text(
|
104
|
+
text="Jane Doe is a 25 year old developer. You can reach her at jane@example.com",
|
105
|
+
)
|
106
|
+
],
|
107
|
+
),
|
108
|
+
],
|
109
|
+
json_schema={
|
110
|
+
"description": "A person",
|
111
|
+
"properties": {
|
112
|
+
"age": {"type": "integer"},
|
113
|
+
"email": {"type": "string"},
|
114
|
+
"name": {"type": "string"},
|
115
|
+
},
|
116
|
+
"required": ["name"],
|
117
|
+
"title": "Person",
|
118
|
+
"type": "object",
|
119
|
+
},
|
65
120
|
)
|
66
121
|
"""
|
67
122
|
_response = self._client_wrapper.httpx_client.request(
|
68
123
|
"api/v0/tools/structured-data-extractor/invoke",
|
69
124
|
method="POST",
|
70
|
-
json={
|
125
|
+
json={
|
126
|
+
"chunk_messages": chunk_messages,
|
127
|
+
"chunks": chunks,
|
128
|
+
"json_schema": json_schema,
|
129
|
+
"reduce": reduce,
|
130
|
+
"reduce_messages": reduce_messages,
|
131
|
+
},
|
71
132
|
request_options=request_options,
|
72
133
|
omit=OMIT,
|
73
134
|
)
|
74
135
|
if 200 <= _response.status_code < 300:
|
75
|
-
return pydantic_v1.parse_obj_as(
|
136
|
+
return pydantic_v1.parse_obj_as(StructuredDataExtractorResponse, _response.json()) # type: ignore
|
76
137
|
if _response.status_code == 422:
|
77
138
|
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
78
139
|
try:
|
@@ -89,58 +150,117 @@ class AsyncStructuredDataExtractorClient:
|
|
89
150
|
async def invoke(
|
90
151
|
self,
|
91
152
|
*,
|
92
|
-
|
153
|
+
chunks: typing.Sequence[Chunk],
|
93
154
|
json_schema: typing.Dict[str, typing.Any],
|
94
|
-
|
155
|
+
chunk_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
95
156
|
reduce: typing.Optional[bool] = OMIT,
|
157
|
+
reduce_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
96
158
|
request_options: typing.Optional[RequestOptions] = None
|
97
|
-
) ->
|
159
|
+
) -> StructuredDataExtractorResponse:
|
98
160
|
"""
|
99
|
-
|
161
|
+
Extract structured data.
|
162
|
+
|
163
|
+
tl;dr:
|
164
|
+
|
165
|
+
- pass a valid JSON schema in `json_schema`
|
166
|
+
- pass the page chunks as a list of `Chunk` objects, by default: `{"type": "text", "content": "..."}`
|
167
|
+
- leave all other fields as default
|
168
|
+
|
169
|
+
Detailed configuration (only relevant for complex use cases):
|
170
|
+
|
171
|
+
The structured data extractor's architecture follows the map-reduce pattern,
|
172
|
+
where the asset is divided into chunks, the schema is extracted from each chunk,
|
173
|
+
and the chunks are then reduced to a single structured data object.
|
174
|
+
|
175
|
+
In some applications, you may not want to:
|
176
|
+
|
177
|
+
- map (if your input asset is small enough)
|
178
|
+
- reduce (if your output object is large enough that it will overflow the output length;
|
179
|
+
if you're extracting a long list of entities; if youre )
|
180
|
+
to extract all instances of the schema).
|
181
|
+
|
182
|
+
You can configure these behaviors with the `map` and `reduce` fields.
|
100
183
|
|
101
184
|
Parameters
|
102
185
|
----------
|
103
|
-
|
104
|
-
The
|
186
|
+
chunks : typing.Sequence[Chunk]
|
187
|
+
The chunks from which to extract structured data.
|
105
188
|
|
106
189
|
json_schema : typing.Dict[str, typing.Any]
|
107
|
-
The JSON schema to use for validation (version draft 2020-12).
|
190
|
+
The JSON schema to use for validation (version draft 2020-12). See the docs [here](https://json-schema.org/learn/getting-started-step-by-step).
|
108
191
|
|
109
|
-
|
110
|
-
|
192
|
+
chunk_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
193
|
+
The prompt to use for the data extraction over *each individual chunk*. It must be a list of messages. The chunk content will be appended as a list of human messages.
|
111
194
|
|
112
195
|
reduce : typing.Optional[bool]
|
113
196
|
If `map`, whether to reduce the chunks to a single structured object (true) or return the full list (false). Use True unless you want to preserve duplicates from each page or expect the object to overflow the output context.
|
114
197
|
|
198
|
+
reduce_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
199
|
+
The prompt to use for the reduce steps. It must be a list of messages. The two extraction attempts will be appended as a list of human messages.
|
200
|
+
|
115
201
|
request_options : typing.Optional[RequestOptions]
|
116
202
|
Request-specific configuration.
|
117
203
|
|
118
204
|
Returns
|
119
205
|
-------
|
120
|
-
|
206
|
+
StructuredDataExtractorResponse
|
121
207
|
Successful Response
|
122
208
|
|
123
209
|
Examples
|
124
210
|
--------
|
211
|
+
from athena import Chunk, ChunkContentItem_Text
|
125
212
|
from athena.client import AsyncAthena
|
126
213
|
|
127
214
|
client = AsyncAthena(
|
128
215
|
api_key="YOUR_API_KEY",
|
129
216
|
)
|
130
217
|
await client.tools.structured_data_extractor.invoke(
|
131
|
-
|
132
|
-
|
218
|
+
chunks=[
|
219
|
+
Chunk(
|
220
|
+
chunk_id="1",
|
221
|
+
content=[
|
222
|
+
ChunkContentItem_Text(
|
223
|
+
text="John Smith is a 35 year old developer. You can reach him at john.smith@example.com",
|
224
|
+
)
|
225
|
+
],
|
226
|
+
),
|
227
|
+
Chunk(
|
228
|
+
chunk_id="2",
|
229
|
+
content=[
|
230
|
+
ChunkContentItem_Text(
|
231
|
+
text="Jane Doe is a 25 year old developer. You can reach her at jane@example.com",
|
232
|
+
)
|
233
|
+
],
|
234
|
+
),
|
235
|
+
],
|
236
|
+
json_schema={
|
237
|
+
"description": "A person",
|
238
|
+
"properties": {
|
239
|
+
"age": {"type": "integer"},
|
240
|
+
"email": {"type": "string"},
|
241
|
+
"name": {"type": "string"},
|
242
|
+
},
|
243
|
+
"required": ["name"],
|
244
|
+
"title": "Person",
|
245
|
+
"type": "object",
|
246
|
+
},
|
133
247
|
)
|
134
248
|
"""
|
135
249
|
_response = await self._client_wrapper.httpx_client.request(
|
136
250
|
"api/v0/tools/structured-data-extractor/invoke",
|
137
251
|
method="POST",
|
138
|
-
json={
|
252
|
+
json={
|
253
|
+
"chunk_messages": chunk_messages,
|
254
|
+
"chunks": chunks,
|
255
|
+
"json_schema": json_schema,
|
256
|
+
"reduce": reduce,
|
257
|
+
"reduce_messages": reduce_messages,
|
258
|
+
},
|
139
259
|
request_options=request_options,
|
140
260
|
omit=OMIT,
|
141
261
|
)
|
142
262
|
if 200 <= _response.status_code < 300:
|
143
|
-
return pydantic_v1.parse_obj_as(
|
263
|
+
return pydantic_v1.parse_obj_as(StructuredDataExtractorResponse, _response.json()) # type: ignore
|
144
264
|
if _response.status_code == 422:
|
145
265
|
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
146
266
|
try:
|
athena/types/__init__.py
CHANGED
@@ -1,6 +1,11 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
|
+
from .asset_node import AssetNode
|
3
4
|
from .asset_not_found_error import AssetNotFoundError
|
5
|
+
from .chunk import Chunk
|
6
|
+
from .chunk_content_item import ChunkContentItem, ChunkContentItem_ImageUrl, ChunkContentItem_Text
|
7
|
+
from .chunk_result import ChunkResult
|
8
|
+
from .chunk_result_chunk_id import ChunkResultChunkId
|
4
9
|
from .custom_agent_response import CustomAgentResponse
|
5
10
|
from .data_frame_request_out import DataFrameRequestOut
|
6
11
|
from .data_frame_request_out_columns_item import DataFrameRequestOutColumnsItem
|
@@ -11,19 +16,31 @@ from .document_chunk import DocumentChunk
|
|
11
16
|
from .drive_agent_response import DriveAgentResponse
|
12
17
|
from .file_chunk_request_out import FileChunkRequestOut
|
13
18
|
from .file_too_large_error import FileTooLargeError
|
19
|
+
from .folder_response import FolderResponse
|
14
20
|
from .general_agent_config import GeneralAgentConfig
|
15
21
|
from .general_agent_config_enabled_tools_item import GeneralAgentConfigEnabledToolsItem
|
16
22
|
from .general_agent_request import GeneralAgentRequest
|
17
23
|
from .general_agent_response import GeneralAgentResponse
|
24
|
+
from .image_url_content import ImageUrlContent
|
18
25
|
from .parent_folder_error import ParentFolderError
|
26
|
+
from .prompt_message import PromptMessage
|
19
27
|
from .research_agent_response import ResearchAgentResponse
|
20
28
|
from .save_asset_request_out import SaveAssetRequestOut
|
21
29
|
from .sql_agent_response import SqlAgentResponse
|
22
|
-
from .
|
30
|
+
from .structured_data_extractor_response import StructuredDataExtractorResponse
|
31
|
+
from .text_content import TextContent
|
23
32
|
from .tool import Tool
|
33
|
+
from .type import Type
|
24
34
|
|
25
35
|
__all__ = [
|
36
|
+
"AssetNode",
|
26
37
|
"AssetNotFoundError",
|
38
|
+
"Chunk",
|
39
|
+
"ChunkContentItem",
|
40
|
+
"ChunkContentItem_ImageUrl",
|
41
|
+
"ChunkContentItem_Text",
|
42
|
+
"ChunkResult",
|
43
|
+
"ChunkResultChunkId",
|
27
44
|
"CustomAgentResponse",
|
28
45
|
"DataFrameRequestOut",
|
29
46
|
"DataFrameRequestOutColumnsItem",
|
@@ -34,14 +51,19 @@ __all__ = [
|
|
34
51
|
"DriveAgentResponse",
|
35
52
|
"FileChunkRequestOut",
|
36
53
|
"FileTooLargeError",
|
54
|
+
"FolderResponse",
|
37
55
|
"GeneralAgentConfig",
|
38
56
|
"GeneralAgentConfigEnabledToolsItem",
|
39
57
|
"GeneralAgentRequest",
|
40
58
|
"GeneralAgentResponse",
|
59
|
+
"ImageUrlContent",
|
41
60
|
"ParentFolderError",
|
61
|
+
"PromptMessage",
|
42
62
|
"ResearchAgentResponse",
|
43
63
|
"SaveAssetRequestOut",
|
44
64
|
"SqlAgentResponse",
|
45
|
-
"
|
65
|
+
"StructuredDataExtractorResponse",
|
66
|
+
"TextContent",
|
46
67
|
"Tool",
|
68
|
+
"Type",
|
47
69
|
]
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import datetime as dt
|
6
|
+
import typing
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
10
|
+
|
11
|
+
|
12
|
+
class AssetNode(pydantic_v1.BaseModel):
|
13
|
+
"""
|
14
|
+
Model representing a node in the folder tree.
|
15
|
+
"""
|
16
|
+
|
17
|
+
children: typing.Optional[typing.Dict[str, typing.Optional[AssetNode]]] = None
|
18
|
+
id: str
|
19
|
+
media_type: str
|
20
|
+
name: str
|
21
|
+
type: str
|
22
|
+
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().json(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
28
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
29
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
30
|
+
|
31
|
+
return deep_union_pydantic_dicts(
|
32
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
33
|
+
)
|
34
|
+
|
35
|
+
class Config:
|
36
|
+
frozen = True
|
37
|
+
smart_union = True
|
38
|
+
extra = pydantic_v1.Extra.allow
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
40
|
+
|
41
|
+
|
42
|
+
AssetNode.update_forward_refs()
|
athena/types/chunk.py
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_content_item import ChunkContentItem
|
9
|
+
|
10
|
+
|
11
|
+
class Chunk(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
A chunk of content to extract data from.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_id: str
|
17
|
+
content: typing.List[ChunkContentItem]
|
18
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[str]]] = None
|
19
|
+
|
20
|
+
def json(self, **kwargs: typing.Any) -> str:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().json(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
25
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
27
|
+
|
28
|
+
return deep_union_pydantic_dicts(
|
29
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
30
|
+
)
|
31
|
+
|
32
|
+
class Config:
|
33
|
+
frozen = True
|
34
|
+
smart_union = True
|
35
|
+
extra = pydantic_v1.Extra.allow
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import datetime as dt
|
6
|
+
import typing
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
10
|
+
|
11
|
+
|
12
|
+
class ChunkContentItem_Text(pydantic_v1.BaseModel):
|
13
|
+
text: str
|
14
|
+
type: typing.Literal["text"] = "text"
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
23
|
+
|
24
|
+
return deep_union_pydantic_dicts(
|
25
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
26
|
+
)
|
27
|
+
|
28
|
+
class Config:
|
29
|
+
frozen = True
|
30
|
+
smart_union = True
|
31
|
+
extra = pydantic_v1.Extra.allow
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
33
|
+
|
34
|
+
|
35
|
+
class ChunkContentItem_ImageUrl(pydantic_v1.BaseModel):
|
36
|
+
image_url: typing.Dict[str, str]
|
37
|
+
type: typing.Literal["image_url"] = "image_url"
|
38
|
+
|
39
|
+
def json(self, **kwargs: typing.Any) -> str:
|
40
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
41
|
+
return super().json(**kwargs_with_defaults)
|
42
|
+
|
43
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
44
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
45
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
46
|
+
|
47
|
+
return deep_union_pydantic_dicts(
|
48
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
49
|
+
)
|
50
|
+
|
51
|
+
class Config:
|
52
|
+
frozen = True
|
53
|
+
smart_union = True
|
54
|
+
extra = pydantic_v1.Extra.allow
|
55
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
56
|
+
|
57
|
+
|
58
|
+
ChunkContentItem = typing.Union[ChunkContentItem_Text, ChunkContentItem_ImageUrl]
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_result_chunk_id import ChunkResultChunkId
|
9
|
+
|
10
|
+
|
11
|
+
class ChunkResult(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
The result of a chunk extraction.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_id: ChunkResultChunkId
|
17
|
+
chunk_result: typing.Optional[typing.Dict[str, typing.Any]] = None
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
26
|
+
|
27
|
+
return deep_union_pydantic_dicts(
|
28
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
29
|
+
)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
extra = pydantic_v1.Extra.allow
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -5,17 +5,16 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .asset_node import AssetNode
|
8
9
|
|
9
10
|
|
10
|
-
class
|
11
|
+
class FolderResponse(pydantic_v1.BaseModel):
|
11
12
|
"""
|
12
|
-
|
13
|
+
Combined response with tree data and visualization.
|
13
14
|
"""
|
14
15
|
|
15
|
-
|
16
|
-
|
17
|
-
The extracted structured data. Guaranteed to match `json_schema`.
|
18
|
-
"""
|
16
|
+
structure_tree_ascii: str
|
17
|
+
tree_data: typing.Dict[str, AssetNode]
|
19
18
|
|
20
19
|
def json(self, **kwargs: typing.Any) -> str:
|
21
20
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class ImageUrlContent(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
An image content item.
|
13
|
+
"""
|
14
|
+
|
15
|
+
image_url: typing.Dict[str, str]
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
24
|
+
|
25
|
+
return deep_union_pydantic_dicts(
|
26
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
27
|
+
)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
smart_union = True
|
32
|
+
extra = pydantic_v1.Extra.allow
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .type import Type
|
9
|
+
|
10
|
+
|
11
|
+
class PromptMessage(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
A message to use for the structured data extractor.
|
14
|
+
"""
|
15
|
+
|
16
|
+
content: str
|
17
|
+
type: Type
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
26
|
+
|
27
|
+
return deep_union_pydantic_dicts(
|
28
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
29
|
+
)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
extra = pydantic_v1.Extra.allow
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_result import ChunkResult
|
9
|
+
|
10
|
+
|
11
|
+
class StructuredDataExtractorResponse(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
The agent's response.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_by_chunk_data: typing.Optional[typing.List[ChunkResult]] = pydantic_v1.Field(default=None)
|
17
|
+
"""
|
18
|
+
The extracted structured data for each chunk. A list where each element is guaranteed to match `json_schema`.
|
19
|
+
"""
|
20
|
+
|
21
|
+
reduced_data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
22
|
+
"""
|
23
|
+
If reduce is True, the reduced structured data, otherwise null. Guaranteed to match `json_schema`.
|
24
|
+
"""
|
25
|
+
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
28
|
+
return super().json(**kwargs_with_defaults)
|
29
|
+
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
31
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
32
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
33
|
+
|
34
|
+
return deep_union_pydantic_dicts(
|
35
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
36
|
+
)
|
37
|
+
|
38
|
+
class Config:
|
39
|
+
frozen = True
|
40
|
+
smart_union = True
|
41
|
+
extra = pydantic_v1.Extra.allow
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TextContent(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
A text content item in a multimodal message content.
|
13
|
+
"""
|
14
|
+
|
15
|
+
text: str
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
24
|
+
|
25
|
+
return deep_union_pydantic_dicts(
|
26
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
27
|
+
)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
smart_union = True
|
32
|
+
extra = pydantic_v1.Extra.allow
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
athena/types/type.py
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class Type(str, enum.Enum):
|
10
|
+
SYSTEM = "system"
|
11
|
+
HUMAN = "human"
|
12
|
+
USER = "user"
|
13
|
+
|
14
|
+
def visit(
|
15
|
+
self,
|
16
|
+
system: typing.Callable[[], T_Result],
|
17
|
+
human: typing.Callable[[], T_Result],
|
18
|
+
user: typing.Callable[[], T_Result],
|
19
|
+
) -> T_Result:
|
20
|
+
if self is Type.SYSTEM:
|
21
|
+
return system()
|
22
|
+
if self is Type.HUMAN:
|
23
|
+
return human()
|
24
|
+
if self is Type.USER:
|
25
|
+
return user()
|
@@ -1,4 +1,4 @@
|
|
1
|
-
athena/__init__.py,sha256=
|
1
|
+
athena/__init__.py,sha256=Q5F_OwlFj6Y4zWtw4ncDPXp16HWBRrQn7Q4fJYBa5Pc,2538
|
2
2
|
athena/agents/__init__.py,sha256=I6MO2O_hb6KLa8oDHbGNSAhcPE-dsrX6LMcAEhsg3PQ,160
|
3
3
|
athena/agents/client.py,sha256=aI8rNhXBSVJ-hvjnIoCK9sKvHB0e95Zkn-3YpXOKFrY,6721
|
4
4
|
athena/agents/drive/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
@@ -13,7 +13,7 @@ athena/base_client.py,sha256=-kVdOlIibBz48lxWratdQAzT7fTvZsORvOMF3KoPDPw,5647
|
|
13
13
|
athena/client.py,sha256=4PUPrBPCMTFpHR1yuKVR5eC1AYBl_25SMf6ZH82JHB0,19039
|
14
14
|
athena/core/__init__.py,sha256=UFXpYzcGxWQUucU1TkjOQ9mGWN3A5JohluOIWVYKU4I,973
|
15
15
|
athena/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
16
|
-
athena/core/client_wrapper.py,sha256=
|
16
|
+
athena/core/client_wrapper.py,sha256=PsnlYk4Sr5_x1d3Z8hUp0g3i5pD1dqfmKXH-mHvU04Q,1806
|
17
17
|
athena/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
18
18
|
athena/core/file.py,sha256=sy1RUGZ3aJYuw998bZytxxo6QdgKmlnlgBaMvwEKCGg,1480
|
19
19
|
athena/core/http_client.py,sha256=Z4NuAsJD-51yqmoME17O5sxwx5orSp1wsnd6bPyKcgA,17768
|
@@ -39,17 +39,22 @@ athena/query/types/query_execute_request_database_asset_ids.py,sha256=aoVl5Xb34Q
|
|
39
39
|
athena/tools/__init__.py,sha256=DREW2sa5Z-Rj8v1LQ-tNhflI-EO_oDc6NCmF5v0LWeU,288
|
40
40
|
athena/tools/calendar/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
41
41
|
athena/tools/calendar/client.py,sha256=hKWzWyl1GwFG69oX3tektwNfy2sV5Lt6PRy9vTyPLOo,5283
|
42
|
-
athena/tools/client.py,sha256
|
42
|
+
athena/tools/client.py,sha256=-hjk99ABo74-qT8OMMqASl0c7oOkdhHSMFluBa9l5qA,25965
|
43
43
|
athena/tools/email/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
44
44
|
athena/tools/email/client.py,sha256=epUkV5af3eilVgRR81SFZAf29JuhEWKMkdMuN6qDLUM,7593
|
45
45
|
athena/tools/structured_data_extractor/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
46
|
-
athena/tools/structured_data_extractor/client.py,sha256=
|
46
|
+
athena/tools/structured_data_extractor/client.py,sha256=Wr2r_kU8WTRiKA2qfCfqvmllddj3MjT1Vqr27La8gO8,11281
|
47
47
|
athena/tools/tasks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
48
48
|
athena/tools/tasks/client.py,sha256=5kT6ulh2YDIbNYiv-knBjtF-ST7p0dUvZyrd7t5O61s,2975
|
49
49
|
athena/tools/types/__init__.py,sha256=cA-ZQm6veQAP3_vKu9KkZpISsQqgTBN_Z--FGY1c2iA,197
|
50
50
|
athena/tools/types/tools_data_frame_request_columns_item.py,sha256=GA1FUlTV_CfSc-KToTAwFf4Exl0rr4fsweVZupztjw0,138
|
51
|
-
athena/types/__init__.py,sha256=
|
51
|
+
athena/types/__init__.py,sha256=IbCXHjNzSdiLn2HrPsxj90DLzYDp63WJk4FsCVoymK0,2639
|
52
|
+
athena/types/asset_node.py,sha256=CiqYxuYCXhOs9XAvBrUaVVMZpf8gdocVTAUBmUS5l1g,1375
|
52
53
|
athena/types/asset_not_found_error.py,sha256=ZcgqRuzvO4Z8vVVxwtDB-QtKhpVIVV3hqQuJeUoOoJE,1121
|
54
|
+
athena/types/chunk.py,sha256=M4O7Sj3EMvkXioQneuKbptr1n5XNGCU9fVxYR12XG9o,1340
|
55
|
+
athena/types/chunk_content_item.py,sha256=2B1mTc0a4h7jyKRiYwfC573fM4xijhNEgfd_FI-myj4,2251
|
56
|
+
athena/types/chunk_result.py,sha256=b74rp4xNKm3r0R76N-VnoaKrEKeBzMWRGI2PVMyiXpc,1310
|
57
|
+
athena/types/chunk_result_chunk_id.py,sha256=pzJ6yL6NdUtseoeU4Kw2jlxSTMCVew2TrjhR1MbCuFg,124
|
53
58
|
athena/types/custom_agent_response.py,sha256=_Vm_fJq4cETtOawBW7p0cvH4Jmle26lHQZ73A8MdLX0,1263
|
54
59
|
athena/types/data_frame_request_out.py,sha256=1CEBe-baDQi0uz_EgMw0TKGYXGj6KV44cL3ViRTZLKM,1669
|
55
60
|
athena/types/data_frame_request_out_columns_item.py,sha256=9cjzciFv6C8n8Griytt_q_8ovkzHViS5tvUcMDfkfKE,143
|
@@ -60,17 +65,22 @@ athena/types/document_chunk.py,sha256=deXiiMA_U5EabUh1Fg2AB4ElYuc5OsTnrU7JwLApJm
|
|
60
65
|
athena/types/drive_agent_response.py,sha256=UMKF43e5WScH0a9ITuxjwGWzAzvXAl1OsfRVeXSyfjk,1218
|
61
66
|
athena/types/file_chunk_request_out.py,sha256=Ju0I_UpSitjQ-XqSIRvvg2XA6QtfCLZClPq5PUqmPNg,1258
|
62
67
|
athena/types/file_too_large_error.py,sha256=AinkrcgR7lcTILAD8RX0x48P3GlSoAh1OihxMvSvRuo,1120
|
68
|
+
athena/types/folder_response.py,sha256=qq0hLRvfJFeXUz7Cc8oeyCabG73ac2H4lM_j0QW38YY,1280
|
63
69
|
athena/types/general_agent_config.py,sha256=FaswWVsDTsL5Fs9Tlx4zSK1S8OrsFnzruEt7l72XlGA,1457
|
64
70
|
athena/types/general_agent_config_enabled_tools_item.py,sha256=6gYaU7uIDJbgygtBKLdYL-VbPxxbEcxwRsT8VaW5vN8,165
|
65
71
|
athena/types/general_agent_request.py,sha256=NnUVtz8U1VoA1SJapbp163Wf_inEQVeFCYWJvM4P-qI,1449
|
66
72
|
athena/types/general_agent_response.py,sha256=9BxqXzchSti5O0Ch_WJkvmkawkBhpH03QlZIbKdYbAY,1212
|
73
|
+
athena/types/image_url_content.py,sha256=AivFiET-XA7guQ_rWEGOOafDuQBXTvO8-rMGmKucCss,1182
|
67
74
|
athena/types/parent_folder_error.py,sha256=ZMF-i3mZY6Mu1n5uQ60Q3mIIfehlWuXtgFUkSYspkx8,1120
|
75
|
+
athena/types/prompt_message.py,sha256=0z2qlWbqHCG2j7hvWBDvDpQrHLDCI3h8Z0kg8AOOgKs,1227
|
68
76
|
athena/types/research_agent_response.py,sha256=-1mX4M0IEWDFH3alSZdtuhZHSerjWYJQkn74r3Dp26g,1235
|
69
77
|
athena/types/save_asset_request_out.py,sha256=5bpBaUV3oeuL_hz4s07c-6MQHkn4cBsyxgT_SD5oi6I,1193
|
70
78
|
athena/types/sql_agent_response.py,sha256=DmeG0HPZkPT_gTrtkroVZluGZIV9McB8wmME2iT8PB0,1347
|
71
|
-
athena/types/
|
79
|
+
athena/types/structured_data_extractor_response.py,sha256=RBTjR50PWs3NM0GUlENNHaqAMiOatf14Vmvrd94de8s,1647
|
80
|
+
athena/types/text_content.py,sha256=uG2poNIkM6o7tFgf-eKzZk9kZHYImY3JdI-NkYiqWgU,1185
|
72
81
|
athena/types/tool.py,sha256=6H2BFZiBgQOtYUAwSYBeGZKhwev17IEwnIjgmno6dZw,436
|
82
|
+
athena/types/type.py,sha256=JaUIt4ogmO4XxCQ9c56fqKN5qANKkrnpuZGmdqOCIow,581
|
73
83
|
athena/version.py,sha256=8aYAOJtVLaJLpRp6mTiEIhnl8gXA7yE0aDtZ-3mKQ4k,87
|
74
|
-
athena_intelligence-0.1.
|
75
|
-
athena_intelligence-0.1.
|
76
|
-
athena_intelligence-0.1.
|
84
|
+
athena_intelligence-0.1.124.dist-info/METADATA,sha256=lwtK__HdkgNj6iBeRlDN4JVY5z7XoZPv1ABXIwaBRT0,5274
|
85
|
+
athena_intelligence-0.1.124.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
86
|
+
athena_intelligence-0.1.124.dist-info/RECORD,,
|
File without changes
|