athena-intelligence 0.1.122__py3-none-any.whl → 0.1.123__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- athena/__init__.py +22 -2
- athena/core/client_wrapper.py +1 -1
- athena/tools/structured_data_extractor/client.py +143 -29
- athena/types/__init__.py +20 -2
- athena/types/chunk.py +36 -0
- athena/types/chunk_content_item.py +58 -0
- athena/types/chunk_result.py +35 -0
- athena/types/chunk_result_chunk_id.py +5 -0
- athena/types/{structured_data_extractor_reponse.py → image_url_content.py} +3 -6
- athena/types/prompt_message.py +35 -0
- athena/types/structured_data_extractor_response.py +42 -0
- athena/types/text_content.py +33 -0
- athena/types/type.py +25 -0
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.123.dist-info}/METADATA +1 -1
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.123.dist-info}/RECORD +16 -8
- {athena_intelligence-0.1.122.dist-info → athena_intelligence-0.1.123.dist-info}/WHEEL +0 -0
athena/__init__.py
CHANGED
@@ -2,6 +2,12 @@
|
|
2
2
|
|
3
3
|
from .types import (
|
4
4
|
AssetNotFoundError,
|
5
|
+
Chunk,
|
6
|
+
ChunkContentItem,
|
7
|
+
ChunkContentItem_ImageUrl,
|
8
|
+
ChunkContentItem_Text,
|
9
|
+
ChunkResult,
|
10
|
+
ChunkResultChunkId,
|
5
11
|
CustomAgentResponse,
|
6
12
|
DataFrameRequestOut,
|
7
13
|
DataFrameRequestOutColumnsItem,
|
@@ -16,12 +22,16 @@ from .types import (
|
|
16
22
|
GeneralAgentConfigEnabledToolsItem,
|
17
23
|
GeneralAgentRequest,
|
18
24
|
GeneralAgentResponse,
|
25
|
+
ImageUrlContent,
|
19
26
|
ParentFolderError,
|
27
|
+
PromptMessage,
|
20
28
|
ResearchAgentResponse,
|
21
29
|
SaveAssetRequestOut,
|
22
30
|
SqlAgentResponse,
|
23
|
-
|
31
|
+
StructuredDataExtractorResponse,
|
32
|
+
TextContent,
|
24
33
|
Tool,
|
34
|
+
Type,
|
25
35
|
)
|
26
36
|
from .errors import (
|
27
37
|
BadRequestError,
|
@@ -42,6 +52,12 @@ __all__ = [
|
|
42
52
|
"AssetNotFoundError",
|
43
53
|
"AthenaEnvironment",
|
44
54
|
"BadRequestError",
|
55
|
+
"Chunk",
|
56
|
+
"ChunkContentItem",
|
57
|
+
"ChunkContentItem_ImageUrl",
|
58
|
+
"ChunkContentItem_Text",
|
59
|
+
"ChunkResult",
|
60
|
+
"ChunkResultChunkId",
|
45
61
|
"ContentTooLargeError",
|
46
62
|
"CustomAgentResponse",
|
47
63
|
"DataFrameRequestOut",
|
@@ -57,16 +73,20 @@ __all__ = [
|
|
57
73
|
"GeneralAgentConfigEnabledToolsItem",
|
58
74
|
"GeneralAgentRequest",
|
59
75
|
"GeneralAgentResponse",
|
76
|
+
"ImageUrlContent",
|
60
77
|
"InternalServerError",
|
61
78
|
"NotFoundError",
|
62
79
|
"ParentFolderError",
|
80
|
+
"PromptMessage",
|
63
81
|
"QueryExecuteRequestDatabaseAssetIds",
|
64
82
|
"ResearchAgentResponse",
|
65
83
|
"SaveAssetRequestOut",
|
66
84
|
"SqlAgentResponse",
|
67
|
-
"
|
85
|
+
"StructuredDataExtractorResponse",
|
86
|
+
"TextContent",
|
68
87
|
"Tool",
|
69
88
|
"ToolsDataFrameRequestColumnsItem",
|
89
|
+
"Type",
|
70
90
|
"UnauthorizedError",
|
71
91
|
"UnprocessableEntityError",
|
72
92
|
"UnsupportedMediaTypeError",
|
athena/core/client_wrapper.py
CHANGED
@@ -17,7 +17,7 @@ class BaseClientWrapper:
|
|
17
17
|
headers: typing.Dict[str, str] = {
|
18
18
|
"X-Fern-Language": "Python",
|
19
19
|
"X-Fern-SDK-Name": "athena-intelligence",
|
20
|
-
"X-Fern-SDK-Version": "0.1.
|
20
|
+
"X-Fern-SDK-Version": "0.1.123",
|
21
21
|
}
|
22
22
|
headers["X-API-KEY"] = self.api_key
|
23
23
|
return headers
|
@@ -8,7 +8,9 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
8
|
from ...core.pydantic_utilities import pydantic_v1
|
9
9
|
from ...core.request_options import RequestOptions
|
10
10
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
11
|
-
from ...types.
|
11
|
+
from ...types.chunk import Chunk
|
12
|
+
from ...types.prompt_message import PromptMessage
|
13
|
+
from ...types.structured_data_extractor_response import StructuredDataExtractorResponse
|
12
14
|
|
13
15
|
# this is used as the default value for optional parameters
|
14
16
|
OMIT = typing.cast(typing.Any, ...)
|
@@ -21,58 +23,114 @@ class StructuredDataExtractorClient:
|
|
21
23
|
def invoke(
|
22
24
|
self,
|
23
25
|
*,
|
24
|
-
|
26
|
+
chunks: typing.Sequence[Chunk],
|
25
27
|
json_schema: typing.Dict[str, typing.Any],
|
26
|
-
|
28
|
+
chunk_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
27
29
|
reduce: typing.Optional[bool] = OMIT,
|
30
|
+
reduce_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
28
31
|
request_options: typing.Optional[RequestOptions] = None
|
29
|
-
) ->
|
32
|
+
) -> StructuredDataExtractorResponse:
|
30
33
|
"""
|
31
|
-
|
34
|
+
Extract structured data.
|
35
|
+
|
36
|
+
tl;dr:
|
37
|
+
|
38
|
+
- pass a valid JSON schema in `json_schema`
|
39
|
+
- pass the page chunks as a list of `Chunk` objects, by default: {"type": "text", "content": "..."}
|
40
|
+
- leave all other fields as default
|
41
|
+
|
42
|
+
Detailed configuration (only relevant for complex use cases):
|
43
|
+
|
44
|
+
The structured data extractor's architecture follows the map-reduce pattern,
|
45
|
+
where the asset is divided into chunks, the schema is extracted from each chunk,
|
46
|
+
and the chunks are then reduced to a single structured data object.
|
47
|
+
|
48
|
+
In some applications, you may not want to: - map (if your input asset is small enough) - reduce (if your output object is large enough that it will overflow the output length;
|
49
|
+
if you're extracting a long list of entities; if youre )
|
50
|
+
to extract all instances of the schema).
|
51
|
+
|
52
|
+
You can configure these behaviors with the `map` and `reduce` fields.
|
32
53
|
|
33
54
|
Parameters
|
34
55
|
----------
|
35
|
-
|
36
|
-
The
|
56
|
+
chunks : typing.Sequence[Chunk]
|
57
|
+
The chunks from which to extract structured data.
|
37
58
|
|
38
59
|
json_schema : typing.Dict[str, typing.Any]
|
39
|
-
The JSON schema to use for validation (version draft 2020-12).
|
60
|
+
The JSON schema to use for validation (version draft 2020-12). See the docs [here](https://json-schema.org/learn/getting-started-step-by-step).
|
40
61
|
|
41
|
-
|
42
|
-
|
62
|
+
chunk_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
63
|
+
The prompt to use for the data extraction over *each individual chunk*. It must be a list of messages. The chunk content will be appended as a list of human messages.
|
43
64
|
|
44
65
|
reduce : typing.Optional[bool]
|
45
66
|
If `map`, whether to reduce the chunks to a single structured object (true) or return the full list (false). Use True unless you want to preserve duplicates from each page or expect the object to overflow the output context.
|
46
67
|
|
68
|
+
reduce_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
69
|
+
The prompt to use for the reduce steps. It must be a list of messages. The two extraction attempts will be appended as a list of human messages.
|
70
|
+
|
47
71
|
request_options : typing.Optional[RequestOptions]
|
48
72
|
Request-specific configuration.
|
49
73
|
|
50
74
|
Returns
|
51
75
|
-------
|
52
|
-
|
76
|
+
StructuredDataExtractorResponse
|
53
77
|
Successful Response
|
54
78
|
|
55
79
|
Examples
|
56
80
|
--------
|
81
|
+
from athena import Chunk, ChunkContentItem_Text
|
57
82
|
from athena.client import Athena
|
58
83
|
|
59
84
|
client = Athena(
|
60
85
|
api_key="YOUR_API_KEY",
|
61
86
|
)
|
62
87
|
client.tools.structured_data_extractor.invoke(
|
63
|
-
|
64
|
-
|
88
|
+
chunks=[
|
89
|
+
Chunk(
|
90
|
+
chunk_id="1",
|
91
|
+
content=[
|
92
|
+
ChunkContentItem_Text(
|
93
|
+
text="John Smith is a 35 year old developer. You can reach him at john.smith@example.com",
|
94
|
+
)
|
95
|
+
],
|
96
|
+
),
|
97
|
+
Chunk(
|
98
|
+
chunk_id="2",
|
99
|
+
content=[
|
100
|
+
ChunkContentItem_Text(
|
101
|
+
text="Jane Doe is a 25 year old developer. You can reach her at jane@example.com",
|
102
|
+
)
|
103
|
+
],
|
104
|
+
),
|
105
|
+
],
|
106
|
+
json_schema={
|
107
|
+
"description": "A person",
|
108
|
+
"properties": {
|
109
|
+
"age": {"type": "integer"},
|
110
|
+
"email": {"type": "string"},
|
111
|
+
"name": {"type": "string"},
|
112
|
+
},
|
113
|
+
"required": ["name"],
|
114
|
+
"title": "Person",
|
115
|
+
"type": "object",
|
116
|
+
},
|
65
117
|
)
|
66
118
|
"""
|
67
119
|
_response = self._client_wrapper.httpx_client.request(
|
68
120
|
"api/v0/tools/structured-data-extractor/invoke",
|
69
121
|
method="POST",
|
70
|
-
json={
|
122
|
+
json={
|
123
|
+
"chunk_messages": chunk_messages,
|
124
|
+
"chunks": chunks,
|
125
|
+
"json_schema": json_schema,
|
126
|
+
"reduce": reduce,
|
127
|
+
"reduce_messages": reduce_messages,
|
128
|
+
},
|
71
129
|
request_options=request_options,
|
72
130
|
omit=OMIT,
|
73
131
|
)
|
74
132
|
if 200 <= _response.status_code < 300:
|
75
|
-
return pydantic_v1.parse_obj_as(
|
133
|
+
return pydantic_v1.parse_obj_as(StructuredDataExtractorResponse, _response.json()) # type: ignore
|
76
134
|
if _response.status_code == 422:
|
77
135
|
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
78
136
|
try:
|
@@ -89,58 +147,114 @@ class AsyncStructuredDataExtractorClient:
|
|
89
147
|
async def invoke(
|
90
148
|
self,
|
91
149
|
*,
|
92
|
-
|
150
|
+
chunks: typing.Sequence[Chunk],
|
93
151
|
json_schema: typing.Dict[str, typing.Any],
|
94
|
-
|
152
|
+
chunk_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
95
153
|
reduce: typing.Optional[bool] = OMIT,
|
154
|
+
reduce_messages: typing.Optional[typing.Sequence[PromptMessage]] = OMIT,
|
96
155
|
request_options: typing.Optional[RequestOptions] = None
|
97
|
-
) ->
|
156
|
+
) -> StructuredDataExtractorResponse:
|
98
157
|
"""
|
99
|
-
|
158
|
+
Extract structured data.
|
159
|
+
|
160
|
+
tl;dr:
|
161
|
+
|
162
|
+
- pass a valid JSON schema in `json_schema`
|
163
|
+
- pass the page chunks as a list of `Chunk` objects, by default: {"type": "text", "content": "..."}
|
164
|
+
- leave all other fields as default
|
165
|
+
|
166
|
+
Detailed configuration (only relevant for complex use cases):
|
167
|
+
|
168
|
+
The structured data extractor's architecture follows the map-reduce pattern,
|
169
|
+
where the asset is divided into chunks, the schema is extracted from each chunk,
|
170
|
+
and the chunks are then reduced to a single structured data object.
|
171
|
+
|
172
|
+
In some applications, you may not want to: - map (if your input asset is small enough) - reduce (if your output object is large enough that it will overflow the output length;
|
173
|
+
if you're extracting a long list of entities; if youre )
|
174
|
+
to extract all instances of the schema).
|
175
|
+
|
176
|
+
You can configure these behaviors with the `map` and `reduce` fields.
|
100
177
|
|
101
178
|
Parameters
|
102
179
|
----------
|
103
|
-
|
104
|
-
The
|
180
|
+
chunks : typing.Sequence[Chunk]
|
181
|
+
The chunks from which to extract structured data.
|
105
182
|
|
106
183
|
json_schema : typing.Dict[str, typing.Any]
|
107
|
-
The JSON schema to use for validation (version draft 2020-12).
|
184
|
+
The JSON schema to use for validation (version draft 2020-12). See the docs [here](https://json-schema.org/learn/getting-started-step-by-step).
|
108
185
|
|
109
|
-
|
110
|
-
|
186
|
+
chunk_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
187
|
+
The prompt to use for the data extraction over *each individual chunk*. It must be a list of messages. The chunk content will be appended as a list of human messages.
|
111
188
|
|
112
189
|
reduce : typing.Optional[bool]
|
113
190
|
If `map`, whether to reduce the chunks to a single structured object (true) or return the full list (false). Use True unless you want to preserve duplicates from each page or expect the object to overflow the output context.
|
114
191
|
|
192
|
+
reduce_messages : typing.Optional[typing.Sequence[PromptMessage]]
|
193
|
+
The prompt to use for the reduce steps. It must be a list of messages. The two extraction attempts will be appended as a list of human messages.
|
194
|
+
|
115
195
|
request_options : typing.Optional[RequestOptions]
|
116
196
|
Request-specific configuration.
|
117
197
|
|
118
198
|
Returns
|
119
199
|
-------
|
120
|
-
|
200
|
+
StructuredDataExtractorResponse
|
121
201
|
Successful Response
|
122
202
|
|
123
203
|
Examples
|
124
204
|
--------
|
205
|
+
from athena import Chunk, ChunkContentItem_Text
|
125
206
|
from athena.client import AsyncAthena
|
126
207
|
|
127
208
|
client = AsyncAthena(
|
128
209
|
api_key="YOUR_API_KEY",
|
129
210
|
)
|
130
211
|
await client.tools.structured_data_extractor.invoke(
|
131
|
-
|
132
|
-
|
212
|
+
chunks=[
|
213
|
+
Chunk(
|
214
|
+
chunk_id="1",
|
215
|
+
content=[
|
216
|
+
ChunkContentItem_Text(
|
217
|
+
text="John Smith is a 35 year old developer. You can reach him at john.smith@example.com",
|
218
|
+
)
|
219
|
+
],
|
220
|
+
),
|
221
|
+
Chunk(
|
222
|
+
chunk_id="2",
|
223
|
+
content=[
|
224
|
+
ChunkContentItem_Text(
|
225
|
+
text="Jane Doe is a 25 year old developer. You can reach her at jane@example.com",
|
226
|
+
)
|
227
|
+
],
|
228
|
+
),
|
229
|
+
],
|
230
|
+
json_schema={
|
231
|
+
"description": "A person",
|
232
|
+
"properties": {
|
233
|
+
"age": {"type": "integer"},
|
234
|
+
"email": {"type": "string"},
|
235
|
+
"name": {"type": "string"},
|
236
|
+
},
|
237
|
+
"required": ["name"],
|
238
|
+
"title": "Person",
|
239
|
+
"type": "object",
|
240
|
+
},
|
133
241
|
)
|
134
242
|
"""
|
135
243
|
_response = await self._client_wrapper.httpx_client.request(
|
136
244
|
"api/v0/tools/structured-data-extractor/invoke",
|
137
245
|
method="POST",
|
138
|
-
json={
|
246
|
+
json={
|
247
|
+
"chunk_messages": chunk_messages,
|
248
|
+
"chunks": chunks,
|
249
|
+
"json_schema": json_schema,
|
250
|
+
"reduce": reduce,
|
251
|
+
"reduce_messages": reduce_messages,
|
252
|
+
},
|
139
253
|
request_options=request_options,
|
140
254
|
omit=OMIT,
|
141
255
|
)
|
142
256
|
if 200 <= _response.status_code < 300:
|
143
|
-
return pydantic_v1.parse_obj_as(
|
257
|
+
return pydantic_v1.parse_obj_as(StructuredDataExtractorResponse, _response.json()) # type: ignore
|
144
258
|
if _response.status_code == 422:
|
145
259
|
raise UnprocessableEntityError(pydantic_v1.parse_obj_as(typing.Any, _response.json())) # type: ignore
|
146
260
|
try:
|
athena/types/__init__.py
CHANGED
@@ -1,6 +1,10 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
3
|
from .asset_not_found_error import AssetNotFoundError
|
4
|
+
from .chunk import Chunk
|
5
|
+
from .chunk_content_item import ChunkContentItem, ChunkContentItem_ImageUrl, ChunkContentItem_Text
|
6
|
+
from .chunk_result import ChunkResult
|
7
|
+
from .chunk_result_chunk_id import ChunkResultChunkId
|
4
8
|
from .custom_agent_response import CustomAgentResponse
|
5
9
|
from .data_frame_request_out import DataFrameRequestOut
|
6
10
|
from .data_frame_request_out_columns_item import DataFrameRequestOutColumnsItem
|
@@ -15,15 +19,25 @@ from .general_agent_config import GeneralAgentConfig
|
|
15
19
|
from .general_agent_config_enabled_tools_item import GeneralAgentConfigEnabledToolsItem
|
16
20
|
from .general_agent_request import GeneralAgentRequest
|
17
21
|
from .general_agent_response import GeneralAgentResponse
|
22
|
+
from .image_url_content import ImageUrlContent
|
18
23
|
from .parent_folder_error import ParentFolderError
|
24
|
+
from .prompt_message import PromptMessage
|
19
25
|
from .research_agent_response import ResearchAgentResponse
|
20
26
|
from .save_asset_request_out import SaveAssetRequestOut
|
21
27
|
from .sql_agent_response import SqlAgentResponse
|
22
|
-
from .
|
28
|
+
from .structured_data_extractor_response import StructuredDataExtractorResponse
|
29
|
+
from .text_content import TextContent
|
23
30
|
from .tool import Tool
|
31
|
+
from .type import Type
|
24
32
|
|
25
33
|
__all__ = [
|
26
34
|
"AssetNotFoundError",
|
35
|
+
"Chunk",
|
36
|
+
"ChunkContentItem",
|
37
|
+
"ChunkContentItem_ImageUrl",
|
38
|
+
"ChunkContentItem_Text",
|
39
|
+
"ChunkResult",
|
40
|
+
"ChunkResultChunkId",
|
27
41
|
"CustomAgentResponse",
|
28
42
|
"DataFrameRequestOut",
|
29
43
|
"DataFrameRequestOutColumnsItem",
|
@@ -38,10 +52,14 @@ __all__ = [
|
|
38
52
|
"GeneralAgentConfigEnabledToolsItem",
|
39
53
|
"GeneralAgentRequest",
|
40
54
|
"GeneralAgentResponse",
|
55
|
+
"ImageUrlContent",
|
41
56
|
"ParentFolderError",
|
57
|
+
"PromptMessage",
|
42
58
|
"ResearchAgentResponse",
|
43
59
|
"SaveAssetRequestOut",
|
44
60
|
"SqlAgentResponse",
|
45
|
-
"
|
61
|
+
"StructuredDataExtractorResponse",
|
62
|
+
"TextContent",
|
46
63
|
"Tool",
|
64
|
+
"Type",
|
47
65
|
]
|
athena/types/chunk.py
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_content_item import ChunkContentItem
|
9
|
+
|
10
|
+
|
11
|
+
class Chunk(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
A chunk of content to extract data from.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_id: str
|
17
|
+
content: typing.List[ChunkContentItem]
|
18
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[str]]] = None
|
19
|
+
|
20
|
+
def json(self, **kwargs: typing.Any) -> str:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().json(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
25
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
27
|
+
|
28
|
+
return deep_union_pydantic_dicts(
|
29
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
30
|
+
)
|
31
|
+
|
32
|
+
class Config:
|
33
|
+
frozen = True
|
34
|
+
smart_union = True
|
35
|
+
extra = pydantic_v1.Extra.allow
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import datetime as dt
|
6
|
+
import typing
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
10
|
+
|
11
|
+
|
12
|
+
class ChunkContentItem_Text(pydantic_v1.BaseModel):
|
13
|
+
text: str
|
14
|
+
type: typing.Literal["text"] = "text"
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
23
|
+
|
24
|
+
return deep_union_pydantic_dicts(
|
25
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
26
|
+
)
|
27
|
+
|
28
|
+
class Config:
|
29
|
+
frozen = True
|
30
|
+
smart_union = True
|
31
|
+
extra = pydantic_v1.Extra.allow
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
33
|
+
|
34
|
+
|
35
|
+
class ChunkContentItem_ImageUrl(pydantic_v1.BaseModel):
|
36
|
+
image_url: typing.Dict[str, str]
|
37
|
+
type: typing.Literal["image_url"] = "image_url"
|
38
|
+
|
39
|
+
def json(self, **kwargs: typing.Any) -> str:
|
40
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
41
|
+
return super().json(**kwargs_with_defaults)
|
42
|
+
|
43
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
44
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
45
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
46
|
+
|
47
|
+
return deep_union_pydantic_dicts(
|
48
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
49
|
+
)
|
50
|
+
|
51
|
+
class Config:
|
52
|
+
frozen = True
|
53
|
+
smart_union = True
|
54
|
+
extra = pydantic_v1.Extra.allow
|
55
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
56
|
+
|
57
|
+
|
58
|
+
ChunkContentItem = typing.Union[ChunkContentItem_Text, ChunkContentItem_ImageUrl]
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_result_chunk_id import ChunkResultChunkId
|
9
|
+
|
10
|
+
|
11
|
+
class ChunkResult(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
The result of a chunk extraction.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_id: ChunkResultChunkId
|
17
|
+
chunk_result: typing.Optional[typing.Dict[str, typing.Any]] = None
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
26
|
+
|
27
|
+
return deep_union_pydantic_dicts(
|
28
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
29
|
+
)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
extra = pydantic_v1.Extra.allow
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -7,15 +7,12 @@ from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
8
|
|
9
9
|
|
10
|
-
class
|
10
|
+
class ImageUrlContent(pydantic_v1.BaseModel):
|
11
11
|
"""
|
12
|
-
|
12
|
+
An image content item.
|
13
13
|
"""
|
14
14
|
|
15
|
-
|
16
|
-
"""
|
17
|
-
The extracted structured data. Guaranteed to match `json_schema`.
|
18
|
-
"""
|
15
|
+
image_url: typing.Dict[str, str]
|
19
16
|
|
20
17
|
def json(self, **kwargs: typing.Any) -> str:
|
21
18
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .type import Type
|
9
|
+
|
10
|
+
|
11
|
+
class PromptMessage(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
A message to use for the structured data extractor.
|
14
|
+
"""
|
15
|
+
|
16
|
+
content: str
|
17
|
+
type: Type
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
26
|
+
|
27
|
+
return deep_union_pydantic_dicts(
|
28
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
29
|
+
)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
extra = pydantic_v1.Extra.allow
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
from .chunk_result import ChunkResult
|
9
|
+
|
10
|
+
|
11
|
+
class StructuredDataExtractorResponse(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
The agent's response.
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunk_by_chunk_data: typing.Optional[typing.List[ChunkResult]] = pydantic_v1.Field(default=None)
|
17
|
+
"""
|
18
|
+
The extracted structured data for each chunk. A list where each element is guaranteed to match `json_schema`.
|
19
|
+
"""
|
20
|
+
|
21
|
+
reduced_data: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
22
|
+
"""
|
23
|
+
If reduce is True, the reduced structured data, otherwise null. Guaranteed to match `json_schema`.
|
24
|
+
"""
|
25
|
+
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
28
|
+
return super().json(**kwargs_with_defaults)
|
29
|
+
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
31
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
32
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
33
|
+
|
34
|
+
return deep_union_pydantic_dicts(
|
35
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
36
|
+
)
|
37
|
+
|
38
|
+
class Config:
|
39
|
+
frozen = True
|
40
|
+
smart_union = True
|
41
|
+
extra = pydantic_v1.Extra.allow
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TextContent(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
A text content item in a multimodal message content.
|
13
|
+
"""
|
14
|
+
|
15
|
+
text: str
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
|
24
|
+
|
25
|
+
return deep_union_pydantic_dicts(
|
26
|
+
super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
|
27
|
+
)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
smart_union = True
|
32
|
+
extra = pydantic_v1.Extra.allow
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
athena/types/type.py
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class Type(str, enum.Enum):
|
10
|
+
SYSTEM = "system"
|
11
|
+
HUMAN = "human"
|
12
|
+
USER = "user"
|
13
|
+
|
14
|
+
def visit(
|
15
|
+
self,
|
16
|
+
system: typing.Callable[[], T_Result],
|
17
|
+
human: typing.Callable[[], T_Result],
|
18
|
+
user: typing.Callable[[], T_Result],
|
19
|
+
) -> T_Result:
|
20
|
+
if self is Type.SYSTEM:
|
21
|
+
return system()
|
22
|
+
if self is Type.HUMAN:
|
23
|
+
return human()
|
24
|
+
if self is Type.USER:
|
25
|
+
return user()
|
@@ -1,4 +1,4 @@
|
|
1
|
-
athena/__init__.py,sha256
|
1
|
+
athena/__init__.py,sha256=-QY5Njk1mv2eVt9CMdJSaU_nMqoXzZ8d5kHjxKCs7jU,2464
|
2
2
|
athena/agents/__init__.py,sha256=I6MO2O_hb6KLa8oDHbGNSAhcPE-dsrX6LMcAEhsg3PQ,160
|
3
3
|
athena/agents/client.py,sha256=aI8rNhXBSVJ-hvjnIoCK9sKvHB0e95Zkn-3YpXOKFrY,6721
|
4
4
|
athena/agents/drive/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
@@ -13,7 +13,7 @@ athena/base_client.py,sha256=-kVdOlIibBz48lxWratdQAzT7fTvZsORvOMF3KoPDPw,5647
|
|
13
13
|
athena/client.py,sha256=4PUPrBPCMTFpHR1yuKVR5eC1AYBl_25SMf6ZH82JHB0,19039
|
14
14
|
athena/core/__init__.py,sha256=UFXpYzcGxWQUucU1TkjOQ9mGWN3A5JohluOIWVYKU4I,973
|
15
15
|
athena/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
16
|
-
athena/core/client_wrapper.py,sha256=
|
16
|
+
athena/core/client_wrapper.py,sha256=QpWDM_UvxCir-D4qH9GDRZGMoDC2fW-UpSPKEHT3S0o,1806
|
17
17
|
athena/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
18
18
|
athena/core/file.py,sha256=sy1RUGZ3aJYuw998bZytxxo6QdgKmlnlgBaMvwEKCGg,1480
|
19
19
|
athena/core/http_client.py,sha256=Z4NuAsJD-51yqmoME17O5sxwx5orSp1wsnd6bPyKcgA,17768
|
@@ -43,13 +43,17 @@ athena/tools/client.py,sha256=9ec2gnf3z_vhr3EqT_-ZksevTDtFP1jftPY4os0Ty3Q,21166
|
|
43
43
|
athena/tools/email/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
44
44
|
athena/tools/email/client.py,sha256=epUkV5af3eilVgRR81SFZAf29JuhEWKMkdMuN6qDLUM,7593
|
45
45
|
athena/tools/structured_data_extractor/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
46
|
-
athena/tools/structured_data_extractor/client.py,sha256=
|
46
|
+
athena/tools/structured_data_extractor/client.py,sha256=0rEq5bftoWQJwCF9fcP3r8nUSY5hZLy_pOAucena9Go,11235
|
47
47
|
athena/tools/tasks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
48
48
|
athena/tools/tasks/client.py,sha256=5kT6ulh2YDIbNYiv-knBjtF-ST7p0dUvZyrd7t5O61s,2975
|
49
49
|
athena/tools/types/__init__.py,sha256=cA-ZQm6veQAP3_vKu9KkZpISsQqgTBN_Z--FGY1c2iA,197
|
50
50
|
athena/tools/types/tools_data_frame_request_columns_item.py,sha256=GA1FUlTV_CfSc-KToTAwFf4Exl0rr4fsweVZupztjw0,138
|
51
|
-
athena/types/__init__.py,sha256
|
51
|
+
athena/types/__init__.py,sha256=-VA36LN5Q1SLk5IUgw159TBfTtTmvbgAOfaL-3TRY7A,2522
|
52
52
|
athena/types/asset_not_found_error.py,sha256=ZcgqRuzvO4Z8vVVxwtDB-QtKhpVIVV3hqQuJeUoOoJE,1121
|
53
|
+
athena/types/chunk.py,sha256=M4O7Sj3EMvkXioQneuKbptr1n5XNGCU9fVxYR12XG9o,1340
|
54
|
+
athena/types/chunk_content_item.py,sha256=2B1mTc0a4h7jyKRiYwfC573fM4xijhNEgfd_FI-myj4,2251
|
55
|
+
athena/types/chunk_result.py,sha256=b74rp4xNKm3r0R76N-VnoaKrEKeBzMWRGI2PVMyiXpc,1310
|
56
|
+
athena/types/chunk_result_chunk_id.py,sha256=pzJ6yL6NdUtseoeU4Kw2jlxSTMCVew2TrjhR1MbCuFg,124
|
53
57
|
athena/types/custom_agent_response.py,sha256=_Vm_fJq4cETtOawBW7p0cvH4Jmle26lHQZ73A8MdLX0,1263
|
54
58
|
athena/types/data_frame_request_out.py,sha256=1CEBe-baDQi0uz_EgMw0TKGYXGj6KV44cL3ViRTZLKM,1669
|
55
59
|
athena/types/data_frame_request_out_columns_item.py,sha256=9cjzciFv6C8n8Griytt_q_8ovkzHViS5tvUcMDfkfKE,143
|
@@ -64,13 +68,17 @@ athena/types/general_agent_config.py,sha256=FaswWVsDTsL5Fs9Tlx4zSK1S8OrsFnzruEt7
|
|
64
68
|
athena/types/general_agent_config_enabled_tools_item.py,sha256=6gYaU7uIDJbgygtBKLdYL-VbPxxbEcxwRsT8VaW5vN8,165
|
65
69
|
athena/types/general_agent_request.py,sha256=NnUVtz8U1VoA1SJapbp163Wf_inEQVeFCYWJvM4P-qI,1449
|
66
70
|
athena/types/general_agent_response.py,sha256=9BxqXzchSti5O0Ch_WJkvmkawkBhpH03QlZIbKdYbAY,1212
|
71
|
+
athena/types/image_url_content.py,sha256=AivFiET-XA7guQ_rWEGOOafDuQBXTvO8-rMGmKucCss,1182
|
67
72
|
athena/types/parent_folder_error.py,sha256=ZMF-i3mZY6Mu1n5uQ60Q3mIIfehlWuXtgFUkSYspkx8,1120
|
73
|
+
athena/types/prompt_message.py,sha256=0z2qlWbqHCG2j7hvWBDvDpQrHLDCI3h8Z0kg8AOOgKs,1227
|
68
74
|
athena/types/research_agent_response.py,sha256=-1mX4M0IEWDFH3alSZdtuhZHSerjWYJQkn74r3Dp26g,1235
|
69
75
|
athena/types/save_asset_request_out.py,sha256=5bpBaUV3oeuL_hz4s07c-6MQHkn4cBsyxgT_SD5oi6I,1193
|
70
76
|
athena/types/sql_agent_response.py,sha256=DmeG0HPZkPT_gTrtkroVZluGZIV9McB8wmME2iT8PB0,1347
|
71
|
-
athena/types/
|
77
|
+
athena/types/structured_data_extractor_response.py,sha256=RBTjR50PWs3NM0GUlENNHaqAMiOatf14Vmvrd94de8s,1647
|
78
|
+
athena/types/text_content.py,sha256=uG2poNIkM6o7tFgf-eKzZk9kZHYImY3JdI-NkYiqWgU,1185
|
72
79
|
athena/types/tool.py,sha256=6H2BFZiBgQOtYUAwSYBeGZKhwev17IEwnIjgmno6dZw,436
|
80
|
+
athena/types/type.py,sha256=JaUIt4ogmO4XxCQ9c56fqKN5qANKkrnpuZGmdqOCIow,581
|
73
81
|
athena/version.py,sha256=8aYAOJtVLaJLpRp6mTiEIhnl8gXA7yE0aDtZ-3mKQ4k,87
|
74
|
-
athena_intelligence-0.1.
|
75
|
-
athena_intelligence-0.1.
|
76
|
-
athena_intelligence-0.1.
|
82
|
+
athena_intelligence-0.1.123.dist-info/METADATA,sha256=Au7QjNP7MXi0igYTXg3ypVp_OHoNiFm-Q8c-E53dEGU,5274
|
83
|
+
athena_intelligence-0.1.123.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
84
|
+
athena_intelligence-0.1.123.dist-info/RECORD,,
|
File without changes
|