athena-intelligence 0.1.39__py3-none-any.whl → 0.1.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
athena/__init__.py CHANGED
@@ -2,49 +2,63 @@
2
2
 
3
3
  from .types import (
4
4
  Dataset,
5
+ Document,
6
+ ExcecuteToolFirstWorkflowOut,
7
+ FirecrawlScrapeUrlDataReponseDto,
8
+ FirecrawlScrapeUrlMetadata,
5
9
  GetDatasetsResponse,
6
10
  GetSnippetsResponse,
7
11
  HttpValidationError,
12
+ LangchainDocumentsRequestOut,
8
13
  MessageOut,
9
14
  MessageOutDto,
10
15
  Model,
11
16
  Report,
12
- ScrapeMarkdownResult,
13
17
  Snippet,
14
18
  SqlResults,
15
19
  StatusEnum,
20
+ StructuredParseResult,
16
21
  Tools,
17
22
  UrlResult,
18
23
  ValidationError,
19
24
  ValidationErrorLocItem,
20
25
  )
21
26
  from .errors import UnprocessableEntityError
22
- from . import dataset, message, query, report, search, snippet
27
+ from . import chain, dataset, message, query, report, search, snippet, tools
28
+ from .chain import StructuredParseInParsingModel
23
29
  from .environment import AthenaEnvironment
24
30
 
25
31
  __all__ = [
26
32
  "AthenaEnvironment",
27
33
  "Dataset",
34
+ "Document",
35
+ "ExcecuteToolFirstWorkflowOut",
36
+ "FirecrawlScrapeUrlDataReponseDto",
37
+ "FirecrawlScrapeUrlMetadata",
28
38
  "GetDatasetsResponse",
29
39
  "GetSnippetsResponse",
30
40
  "HttpValidationError",
41
+ "LangchainDocumentsRequestOut",
31
42
  "MessageOut",
32
43
  "MessageOutDto",
33
44
  "Model",
34
45
  "Report",
35
- "ScrapeMarkdownResult",
36
46
  "Snippet",
37
47
  "SqlResults",
38
48
  "StatusEnum",
49
+ "StructuredParseInParsingModel",
50
+ "StructuredParseResult",
39
51
  "Tools",
40
52
  "UnprocessableEntityError",
41
53
  "UrlResult",
42
54
  "ValidationError",
43
55
  "ValidationErrorLocItem",
56
+ "chain",
44
57
  "dataset",
45
58
  "message",
46
59
  "query",
47
60
  "report",
48
61
  "search",
49
62
  "snippet",
63
+ "tools",
50
64
  ]
athena/base_client.py CHANGED
@@ -4,6 +4,7 @@ import typing
4
4
 
5
5
  import httpx
6
6
 
7
+ from .chain.client import AsyncChainClient, ChainClient
7
8
  from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
9
  from .dataset.client import AsyncDatasetClient, DatasetClient
9
10
  from .environment import AthenaEnvironment
@@ -12,6 +13,7 @@ from .query.client import AsyncQueryClient, QueryClient
12
13
  from .report.client import AsyncReportClient, ReportClient
13
14
  from .search.client import AsyncSearchClient, SearchClient
14
15
  from .snippet.client import AsyncSnippetClient, SnippetClient
16
+ from .tools.client import AsyncToolsClient, ToolsClient
15
17
 
16
18
 
17
19
  class BaseAthena:
@@ -58,6 +60,8 @@ class BaseAthena:
58
60
  self.report = ReportClient(client_wrapper=self._client_wrapper)
59
61
  self.query = QueryClient(client_wrapper=self._client_wrapper)
60
62
  self.search = SearchClient(client_wrapper=self._client_wrapper)
63
+ self.chain = ChainClient(client_wrapper=self._client_wrapper)
64
+ self.tools = ToolsClient(client_wrapper=self._client_wrapper)
61
65
 
62
66
 
63
67
  class AsyncBaseAthena:
@@ -104,6 +108,8 @@ class AsyncBaseAthena:
104
108
  self.report = AsyncReportClient(client_wrapper=self._client_wrapper)
105
109
  self.query = AsyncQueryClient(client_wrapper=self._client_wrapper)
106
110
  self.search = AsyncSearchClient(client_wrapper=self._client_wrapper)
111
+ self.chain = AsyncChainClient(client_wrapper=self._client_wrapper)
112
+ self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper)
107
113
 
108
114
 
109
115
  def _get_base_url(*, base_url: typing.Optional[str] = None, environment: AthenaEnvironment) -> str:
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .types import StructuredParseInParsingModel
4
+
5
+ __all__ = ["StructuredParseInParsingModel"]
athena/chain/client.py ADDED
@@ -0,0 +1,167 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ..core.api_error import ApiError
8
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ..core.jsonable_encoder import jsonable_encoder
10
+ from ..core.remove_none_from_dict import remove_none_from_dict
11
+ from ..core.request_options import RequestOptions
12
+ from ..errors.unprocessable_entity_error import UnprocessableEntityError
13
+ from ..types.http_validation_error import HttpValidationError
14
+ from ..types.structured_parse_result import StructuredParseResult
15
+ from .types.structured_parse_in_parsing_model import StructuredParseInParsingModel
16
+
17
+ try:
18
+ import pydantic.v1 as pydantic # type: ignore
19
+ except ImportError:
20
+ import pydantic # type: ignore
21
+
22
+ # this is used as the default value for optional parameters
23
+ OMIT = typing.cast(typing.Any, ...)
24
+
25
+
26
+ class ChainClient:
27
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
28
+ self._client_wrapper = client_wrapper
29
+
30
+ def structured_parse(
31
+ self,
32
+ *,
33
+ text_input: str,
34
+ custom_type_dict: typing.Dict[str, typing.Any],
35
+ parsing_model: typing.Optional[StructuredParseInParsingModel] = OMIT,
36
+ request_options: typing.Optional[RequestOptions] = None,
37
+ ) -> StructuredParseResult:
38
+ """
39
+ Parameters:
40
+ - text_input: str. The text input to be parsed.
41
+
42
+ - custom_type_dict: typing.Dict[str, typing.Any]. A dictionary of field names and their default values.
43
+
44
+ - parsing_model: typing.Optional[StructuredParseInParsingModel]. The model to be used for parsing.
45
+
46
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
47
+ ---
48
+ from athena import StructuredParseInParsingModel
49
+ from athena.client import Athena
50
+
51
+ client = Athena(api_key="YOUR_API_KEY", )
52
+ client.chain.structured_parse(text_input='Athena is an AI-native analytics platform and artificial employee built to accelerate analytics workflows
53
+ by offering enterprise teams co-pilot and auto-pilot modes. Athena learns your workflow as a co-pilot,
54
+ allowing you to hand over controls to her for autonomous execution with confidence."
55
+
56
+ Give me all of the modes Athena provides.', custom_type_dict={"modes": {}}, parsing_model=StructuredParseInParsingModel.GPT_4_TURBO, )
57
+ """
58
+ _request: typing.Dict[str, typing.Any] = {"text_input": text_input, "custom_type_dict": custom_type_dict}
59
+ if parsing_model is not OMIT:
60
+ _request["parsing_model"] = parsing_model
61
+ _response = self._client_wrapper.httpx_client.request(
62
+ "POST",
63
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/structured-parse"),
64
+ params=jsonable_encoder(
65
+ request_options.get("additional_query_parameters") if request_options is not None else None
66
+ ),
67
+ json=jsonable_encoder(_request)
68
+ if request_options is None or request_options.get("additional_body_parameters") is None
69
+ else {
70
+ **jsonable_encoder(_request),
71
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
72
+ },
73
+ headers=jsonable_encoder(
74
+ remove_none_from_dict(
75
+ {
76
+ **self._client_wrapper.get_headers(),
77
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
78
+ }
79
+ )
80
+ ),
81
+ timeout=request_options.get("timeout_in_seconds")
82
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
83
+ else 60,
84
+ retries=0,
85
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
86
+ )
87
+ if 200 <= _response.status_code < 300:
88
+ return pydantic.parse_obj_as(StructuredParseResult, _response.json()) # type: ignore
89
+ if _response.status_code == 422:
90
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
91
+ try:
92
+ _response_json = _response.json()
93
+ except JSONDecodeError:
94
+ raise ApiError(status_code=_response.status_code, body=_response.text)
95
+ raise ApiError(status_code=_response.status_code, body=_response_json)
96
+
97
+
98
+ class AsyncChainClient:
99
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
100
+ self._client_wrapper = client_wrapper
101
+
102
+ async def structured_parse(
103
+ self,
104
+ *,
105
+ text_input: str,
106
+ custom_type_dict: typing.Dict[str, typing.Any],
107
+ parsing_model: typing.Optional[StructuredParseInParsingModel] = OMIT,
108
+ request_options: typing.Optional[RequestOptions] = None,
109
+ ) -> StructuredParseResult:
110
+ """
111
+ Parameters:
112
+ - text_input: str. The text input to be parsed.
113
+
114
+ - custom_type_dict: typing.Dict[str, typing.Any]. A dictionary of field names and their default values.
115
+
116
+ - parsing_model: typing.Optional[StructuredParseInParsingModel]. The model to be used for parsing.
117
+
118
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
119
+ ---
120
+ from athena import StructuredParseInParsingModel
121
+ from athena.client import AsyncAthena
122
+
123
+ client = AsyncAthena(api_key="YOUR_API_KEY", )
124
+ await client.chain.structured_parse(text_input='Athena is an AI-native analytics platform and artificial employee built to accelerate analytics workflows
125
+ by offering enterprise teams co-pilot and auto-pilot modes. Athena learns your workflow as a co-pilot,
126
+ allowing you to hand over controls to her for autonomous execution with confidence."
127
+
128
+ Give me all of the modes Athena provides.', custom_type_dict={"modes": {}}, parsing_model=StructuredParseInParsingModel.GPT_4_TURBO, )
129
+ """
130
+ _request: typing.Dict[str, typing.Any] = {"text_input": text_input, "custom_type_dict": custom_type_dict}
131
+ if parsing_model is not OMIT:
132
+ _request["parsing_model"] = parsing_model
133
+ _response = await self._client_wrapper.httpx_client.request(
134
+ "POST",
135
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/structured-parse"),
136
+ params=jsonable_encoder(
137
+ request_options.get("additional_query_parameters") if request_options is not None else None
138
+ ),
139
+ json=jsonable_encoder(_request)
140
+ if request_options is None or request_options.get("additional_body_parameters") is None
141
+ else {
142
+ **jsonable_encoder(_request),
143
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
144
+ },
145
+ headers=jsonable_encoder(
146
+ remove_none_from_dict(
147
+ {
148
+ **self._client_wrapper.get_headers(),
149
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
150
+ }
151
+ )
152
+ ),
153
+ timeout=request_options.get("timeout_in_seconds")
154
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
155
+ else 60,
156
+ retries=0,
157
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
158
+ )
159
+ if 200 <= _response.status_code < 300:
160
+ return pydantic.parse_obj_as(StructuredParseResult, _response.json()) # type: ignore
161
+ if _response.status_code == 422:
162
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
163
+ try:
164
+ _response_json = _response.json()
165
+ except JSONDecodeError:
166
+ raise ApiError(status_code=_response.status_code, body=_response.text)
167
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .structured_parse_in_parsing_model import StructuredParseInParsingModel
4
+
5
+ __all__ = ["StructuredParseInParsingModel"]
@@ -0,0 +1,53 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class StructuredParseInParsingModel(str, enum.Enum):
10
+ """
11
+ The model to be used for parsing.
12
+ """
13
+
14
+ GPT_4_TURBO = "gpt-4-turbo"
15
+ GPT_4_TURBO_PREVIEW = "gpt-4-turbo-preview"
16
+ GPT_4 = "gpt-4"
17
+ GPT_35_TURBO = "gpt-3.5-turbo"
18
+ MIXTRAL_SMALL_8_X_7_B_0211 = "mixtral-small-8x7b-0211"
19
+ MISTRAL_LARGE_0224 = "mistral-large-0224"
20
+ CLAUDE_3_OPUS_20240229 = "claude-3-opus-20240229"
21
+ CLAUDE_3_SONNET_20240229 = "claude-3-sonnet-20240229"
22
+ CLAUDE_3_HAIKU_20240307 = "claude-3-haiku-20240307"
23
+
24
+ def visit(
25
+ self,
26
+ gpt_4_turbo: typing.Callable[[], T_Result],
27
+ gpt_4_turbo_preview: typing.Callable[[], T_Result],
28
+ gpt_4: typing.Callable[[], T_Result],
29
+ gpt_35_turbo: typing.Callable[[], T_Result],
30
+ mixtral_small_8_x_7_b_0211: typing.Callable[[], T_Result],
31
+ mistral_large_0224: typing.Callable[[], T_Result],
32
+ claude_3_opus_20240229: typing.Callable[[], T_Result],
33
+ claude_3_sonnet_20240229: typing.Callable[[], T_Result],
34
+ claude_3_haiku_20240307: typing.Callable[[], T_Result],
35
+ ) -> T_Result:
36
+ if self is StructuredParseInParsingModel.GPT_4_TURBO:
37
+ return gpt_4_turbo()
38
+ if self is StructuredParseInParsingModel.GPT_4_TURBO_PREVIEW:
39
+ return gpt_4_turbo_preview()
40
+ if self is StructuredParseInParsingModel.GPT_4:
41
+ return gpt_4()
42
+ if self is StructuredParseInParsingModel.GPT_35_TURBO:
43
+ return gpt_35_turbo()
44
+ if self is StructuredParseInParsingModel.MIXTRAL_SMALL_8_X_7_B_0211:
45
+ return mixtral_small_8_x_7_b_0211()
46
+ if self is StructuredParseInParsingModel.MISTRAL_LARGE_0224:
47
+ return mistral_large_0224()
48
+ if self is StructuredParseInParsingModel.CLAUDE_3_OPUS_20240229:
49
+ return claude_3_opus_20240229()
50
+ if self is StructuredParseInParsingModel.CLAUDE_3_SONNET_20240229:
51
+ return claude_3_sonnet_20240229()
52
+ if self is StructuredParseInParsingModel.CLAUDE_3_HAIKU_20240307:
53
+ return claude_3_haiku_20240307()
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "athena-intelligence",
19
- "X-Fern-SDK-Version": "0.1.39",
19
+ "X-Fern-SDK-Version": "0.1.41",
20
20
  }
21
21
  headers["X-API-KEY"] = self.api_key
22
22
  return headers
athena/search/client.py CHANGED
@@ -11,7 +11,6 @@ from ..core.remove_none_from_dict import remove_none_from_dict
11
11
  from ..core.request_options import RequestOptions
12
12
  from ..errors.unprocessable_entity_error import UnprocessableEntityError
13
13
  from ..types.http_validation_error import HttpValidationError
14
- from ..types.scrape_markdown_result import ScrapeMarkdownResult
15
14
  from ..types.url_result import UrlResult
16
15
 
17
16
  try:
@@ -60,9 +59,9 @@ class SearchClient:
60
59
  api_key="YOUR_API_KEY",
61
60
  )
62
61
  client.search.get_urls(
63
- query="query",
64
- num_urls=1,
65
- tbs="tbs",
62
+ query="Dogs",
63
+ num_urls=10,
64
+ tbs="qdr:m",
66
65
  )
67
66
  """
68
67
  _request: typing.Dict[str, typing.Any] = {"query": query, "num_urls": num_urls, "tbs": tbs}
@@ -74,7 +73,7 @@ class SearchClient:
74
73
  _request["site"] = site
75
74
  _response = self._client_wrapper.httpx_client.request(
76
75
  "POST",
77
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/get-urls"),
76
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/search/get-urls"),
78
77
  params=jsonable_encoder(
79
78
  request_options.get("additional_query_parameters") if request_options is not None else None
80
79
  ),
@@ -108,60 +107,6 @@ class SearchClient:
108
107
  raise ApiError(status_code=_response.status_code, body=_response.text)
109
108
  raise ApiError(status_code=_response.status_code, body=_response_json)
110
109
 
111
- def get_markdown(
112
- self, *, url: str, request_options: typing.Optional[RequestOptions] = None
113
- ) -> ScrapeMarkdownResult:
114
- """
115
- Parameters:
116
- - url: str.
117
-
118
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
119
- ---
120
- from athena.client import Athena
121
-
122
- client = Athena(
123
- api_key="YOUR_API_KEY",
124
- )
125
- client.search.get_markdown(
126
- url="url",
127
- )
128
- """
129
- _response = self._client_wrapper.httpx_client.request(
130
- "POST",
131
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/get-markdown"),
132
- params=jsonable_encoder(
133
- request_options.get("additional_query_parameters") if request_options is not None else None
134
- ),
135
- json=jsonable_encoder({"url": url})
136
- if request_options is None or request_options.get("additional_body_parameters") is None
137
- else {
138
- **jsonable_encoder({"url": url}),
139
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
140
- },
141
- headers=jsonable_encoder(
142
- remove_none_from_dict(
143
- {
144
- **self._client_wrapper.get_headers(),
145
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
146
- }
147
- )
148
- ),
149
- timeout=request_options.get("timeout_in_seconds")
150
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
151
- else 60,
152
- retries=0,
153
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
154
- )
155
- if 200 <= _response.status_code < 300:
156
- return pydantic.parse_obj_as(ScrapeMarkdownResult, _response.json()) # type: ignore
157
- if _response.status_code == 422:
158
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
159
- try:
160
- _response_json = _response.json()
161
- except JSONDecodeError:
162
- raise ApiError(status_code=_response.status_code, body=_response.text)
163
- raise ApiError(status_code=_response.status_code, body=_response_json)
164
-
165
110
 
166
111
  class AsyncSearchClient:
167
112
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -200,9 +145,9 @@ class AsyncSearchClient:
200
145
  api_key="YOUR_API_KEY",
201
146
  )
202
147
  await client.search.get_urls(
203
- query="query",
204
- num_urls=1,
205
- tbs="tbs",
148
+ query="Dogs",
149
+ num_urls=10,
150
+ tbs="qdr:m",
206
151
  )
207
152
  """
208
153
  _request: typing.Dict[str, typing.Any] = {"query": query, "num_urls": num_urls, "tbs": tbs}
@@ -214,7 +159,7 @@ class AsyncSearchClient:
214
159
  _request["site"] = site
215
160
  _response = await self._client_wrapper.httpx_client.request(
216
161
  "POST",
217
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/get-urls"),
162
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/search/get-urls"),
218
163
  params=jsonable_encoder(
219
164
  request_options.get("additional_query_parameters") if request_options is not None else None
220
165
  ),
@@ -247,57 +192,3 @@ class AsyncSearchClient:
247
192
  except JSONDecodeError:
248
193
  raise ApiError(status_code=_response.status_code, body=_response.text)
249
194
  raise ApiError(status_code=_response.status_code, body=_response_json)
250
-
251
- async def get_markdown(
252
- self, *, url: str, request_options: typing.Optional[RequestOptions] = None
253
- ) -> ScrapeMarkdownResult:
254
- """
255
- Parameters:
256
- - url: str.
257
-
258
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
259
- ---
260
- from athena.client import AsyncAthena
261
-
262
- client = AsyncAthena(
263
- api_key="YOUR_API_KEY",
264
- )
265
- await client.search.get_markdown(
266
- url="url",
267
- )
268
- """
269
- _response = await self._client_wrapper.httpx_client.request(
270
- "POST",
271
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/get-markdown"),
272
- params=jsonable_encoder(
273
- request_options.get("additional_query_parameters") if request_options is not None else None
274
- ),
275
- json=jsonable_encoder({"url": url})
276
- if request_options is None or request_options.get("additional_body_parameters") is None
277
- else {
278
- **jsonable_encoder({"url": url}),
279
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
280
- },
281
- headers=jsonable_encoder(
282
- remove_none_from_dict(
283
- {
284
- **self._client_wrapper.get_headers(),
285
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
286
- }
287
- )
288
- ),
289
- timeout=request_options.get("timeout_in_seconds")
290
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
291
- else 60,
292
- retries=0,
293
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
294
- )
295
- if 200 <= _response.status_code < 300:
296
- return pydantic.parse_obj_as(ScrapeMarkdownResult, _response.json()) # type: ignore
297
- if _response.status_code == 422:
298
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
299
- try:
300
- _response_json = _response.json()
301
- except JSONDecodeError:
302
- raise ApiError(status_code=_response.status_code, body=_response.text)
303
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
athena/tools/client.py ADDED
@@ -0,0 +1,444 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ..core.api_error import ApiError
8
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ..core.jsonable_encoder import jsonable_encoder
10
+ from ..core.remove_none_from_dict import remove_none_from_dict
11
+ from ..core.request_options import RequestOptions
12
+ from ..errors.unprocessable_entity_error import UnprocessableEntityError
13
+ from ..types.excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
14
+ from ..types.firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
15
+ from ..types.http_validation_error import HttpValidationError
16
+ from ..types.langchain_documents_request_out import LangchainDocumentsRequestOut
17
+
18
+ try:
19
+ import pydantic.v1 as pydantic # type: ignore
20
+ except ImportError:
21
+ import pydantic # type: ignore
22
+
23
+ # this is used as the default value for optional parameters
24
+ OMIT = typing.cast(typing.Any, ...)
25
+
26
+
27
+ class ToolsClient:
28
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
29
+ self._client_wrapper = client_wrapper
30
+
31
+ def scrape_url(
32
+ self,
33
+ *,
34
+ url: str,
35
+ params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
36
+ request_options: typing.Optional[RequestOptions] = None,
37
+ ) -> FirecrawlScrapeUrlDataReponseDto:
38
+ """
39
+ Parameters:
40
+ - url: str.
41
+
42
+ - params: typing.Optional[typing.Dict[str, typing.Any]].
43
+
44
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
45
+ ---
46
+ from athena.client import Athena
47
+
48
+ client = Athena(
49
+ api_key="YOUR_API_KEY",
50
+ )
51
+ client.tools.scrape_url(
52
+ url="https://athenaintelligence.ai",
53
+ )
54
+ """
55
+ _request: typing.Dict[str, typing.Any] = {"url": url}
56
+ if params is not OMIT:
57
+ _request["params"] = params
58
+ _response = self._client_wrapper.httpx_client.request(
59
+ "POST",
60
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
61
+ params=jsonable_encoder(
62
+ request_options.get("additional_query_parameters") if request_options is not None else None
63
+ ),
64
+ json=jsonable_encoder(_request)
65
+ if request_options is None or request_options.get("additional_body_parameters") is None
66
+ else {
67
+ **jsonable_encoder(_request),
68
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
69
+ },
70
+ headers=jsonable_encoder(
71
+ remove_none_from_dict(
72
+ {
73
+ **self._client_wrapper.get_headers(),
74
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
75
+ }
76
+ )
77
+ ),
78
+ timeout=request_options.get("timeout_in_seconds")
79
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
80
+ else 60,
81
+ retries=0,
82
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
83
+ )
84
+ if 200 <= _response.status_code < 300:
85
+ return pydantic.parse_obj_as(FirecrawlScrapeUrlDataReponseDto, _response.json()) # type: ignore
86
+ if _response.status_code == 422:
87
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
88
+ try:
89
+ _response_json = _response.json()
90
+ except JSONDecodeError:
91
+ raise ApiError(status_code=_response.status_code, body=_response.text)
92
+ raise ApiError(status_code=_response.status_code, body=_response_json)
93
+
94
+ def langchain_documents(
95
+ self,
96
+ *,
97
+ document_id: str,
98
+ pagination_limit: typing.Optional[int] = OMIT,
99
+ pagination_offset: typing.Optional[int] = OMIT,
100
+ request_options: typing.Optional[RequestOptions] = None,
101
+ ) -> LangchainDocumentsRequestOut:
102
+ """
103
+ Parameters:
104
+ - document_id: str.
105
+
106
+ - pagination_limit: typing.Optional[int].
107
+
108
+ - pagination_offset: typing.Optional[int].
109
+
110
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
111
+ ---
112
+ from athena.client import Athena
113
+
114
+ client = Athena(
115
+ api_key="YOUR_API_KEY",
116
+ )
117
+ client.tools.langchain_documents(
118
+ document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
119
+ pagination_limit=250,
120
+ pagination_offset=0,
121
+ )
122
+ """
123
+ _request: typing.Dict[str, typing.Any] = {"document_id": document_id}
124
+ if pagination_limit is not OMIT:
125
+ _request["pagination_limit"] = pagination_limit
126
+ if pagination_offset is not OMIT:
127
+ _request["pagination_offset"] = pagination_offset
128
+ _response = self._client_wrapper.httpx_client.request(
129
+ "POST",
130
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/file/langchain-documents"),
131
+ params=jsonable_encoder(
132
+ request_options.get("additional_query_parameters") if request_options is not None else None
133
+ ),
134
+ json=jsonable_encoder(_request)
135
+ if request_options is None or request_options.get("additional_body_parameters") is None
136
+ else {
137
+ **jsonable_encoder(_request),
138
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
139
+ },
140
+ headers=jsonable_encoder(
141
+ remove_none_from_dict(
142
+ {
143
+ **self._client_wrapper.get_headers(),
144
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
145
+ }
146
+ )
147
+ ),
148
+ timeout=request_options.get("timeout_in_seconds")
149
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
150
+ else 60,
151
+ retries=0,
152
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
153
+ )
154
+ if 200 <= _response.status_code < 300:
155
+ return pydantic.parse_obj_as(LangchainDocumentsRequestOut, _response.json()) # type: ignore
156
+ if _response.status_code == 422:
157
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
158
+ try:
159
+ _response_json = _response.json()
160
+ except JSONDecodeError:
161
+ raise ApiError(status_code=_response.status_code, body=_response.text)
162
+ raise ApiError(status_code=_response.status_code, body=_response_json)
163
+
164
+ def tool_first_workflow(
165
+ self,
166
+ *,
167
+ model_name: str,
168
+ tool_name: str,
169
+ content: str,
170
+ tool_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
171
+ request_options: typing.Optional[RequestOptions] = None,
172
+ ) -> ExcecuteToolFirstWorkflowOut:
173
+ """
174
+ Parameters:
175
+ - model_name: str.
176
+
177
+ - tool_name: str.
178
+
179
+ - content: str.
180
+
181
+ - tool_kwargs: typing.Optional[typing.Dict[str, typing.Any]].
182
+
183
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
184
+ ---
185
+ from athena.client import Athena
186
+
187
+ client = Athena(
188
+ api_key="YOUR_API_KEY",
189
+ )
190
+ client.tools.tool_first_workflow(
191
+ model_name="gpt-3.5-turbo",
192
+ tool_name="tavily_search",
193
+ content="summarize the website in one paragraph",
194
+ tool_kwargs={"query": "website: www.athenaintelligence.ai"},
195
+ )
196
+ """
197
+ _request: typing.Dict[str, typing.Any] = {"model_name": model_name, "tool_name": tool_name, "content": content}
198
+ if tool_kwargs is not OMIT:
199
+ _request["tool_kwargs"] = tool_kwargs
200
+ _response = self._client_wrapper.httpx_client.request(
201
+ "POST",
202
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
203
+ params=jsonable_encoder(
204
+ request_options.get("additional_query_parameters") if request_options is not None else None
205
+ ),
206
+ json=jsonable_encoder(_request)
207
+ if request_options is None or request_options.get("additional_body_parameters") is None
208
+ else {
209
+ **jsonable_encoder(_request),
210
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
211
+ },
212
+ headers=jsonable_encoder(
213
+ remove_none_from_dict(
214
+ {
215
+ **self._client_wrapper.get_headers(),
216
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
217
+ }
218
+ )
219
+ ),
220
+ timeout=request_options.get("timeout_in_seconds")
221
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
222
+ else 60,
223
+ retries=0,
224
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
225
+ )
226
+ if 200 <= _response.status_code < 300:
227
+ return pydantic.parse_obj_as(ExcecuteToolFirstWorkflowOut, _response.json()) # type: ignore
228
+ if _response.status_code == 422:
229
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
230
+ try:
231
+ _response_json = _response.json()
232
+ except JSONDecodeError:
233
+ raise ApiError(status_code=_response.status_code, body=_response.text)
234
+ raise ApiError(status_code=_response.status_code, body=_response_json)
235
+
236
+
237
+ class AsyncToolsClient:
238
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
239
+ self._client_wrapper = client_wrapper
240
+
241
+ async def scrape_url(
242
+ self,
243
+ *,
244
+ url: str,
245
+ params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
246
+ request_options: typing.Optional[RequestOptions] = None,
247
+ ) -> FirecrawlScrapeUrlDataReponseDto:
248
+ """
249
+ Parameters:
250
+ - url: str.
251
+
252
+ - params: typing.Optional[typing.Dict[str, typing.Any]].
253
+
254
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
255
+ ---
256
+ from athena.client import AsyncAthena
257
+
258
+ client = AsyncAthena(
259
+ api_key="YOUR_API_KEY",
260
+ )
261
+ await client.tools.scrape_url(
262
+ url="https://athenaintelligence.ai",
263
+ )
264
+ """
265
+ _request: typing.Dict[str, typing.Any] = {"url": url}
266
+ if params is not OMIT:
267
+ _request["params"] = params
268
+ _response = await self._client_wrapper.httpx_client.request(
269
+ "POST",
270
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
271
+ params=jsonable_encoder(
272
+ request_options.get("additional_query_parameters") if request_options is not None else None
273
+ ),
274
+ json=jsonable_encoder(_request)
275
+ if request_options is None or request_options.get("additional_body_parameters") is None
276
+ else {
277
+ **jsonable_encoder(_request),
278
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
279
+ },
280
+ headers=jsonable_encoder(
281
+ remove_none_from_dict(
282
+ {
283
+ **self._client_wrapper.get_headers(),
284
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
285
+ }
286
+ )
287
+ ),
288
+ timeout=request_options.get("timeout_in_seconds")
289
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
290
+ else 60,
291
+ retries=0,
292
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
293
+ )
294
+ if 200 <= _response.status_code < 300:
295
+ return pydantic.parse_obj_as(FirecrawlScrapeUrlDataReponseDto, _response.json()) # type: ignore
296
+ if _response.status_code == 422:
297
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
298
+ try:
299
+ _response_json = _response.json()
300
+ except JSONDecodeError:
301
+ raise ApiError(status_code=_response.status_code, body=_response.text)
302
+ raise ApiError(status_code=_response.status_code, body=_response_json)
303
+
304
+ async def langchain_documents(
305
+ self,
306
+ *,
307
+ document_id: str,
308
+ pagination_limit: typing.Optional[int] = OMIT,
309
+ pagination_offset: typing.Optional[int] = OMIT,
310
+ request_options: typing.Optional[RequestOptions] = None,
311
+ ) -> LangchainDocumentsRequestOut:
312
+ """
313
+ Parameters:
314
+ - document_id: str.
315
+
316
+ - pagination_limit: typing.Optional[int].
317
+
318
+ - pagination_offset: typing.Optional[int].
319
+
320
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
321
+ ---
322
+ from athena.client import AsyncAthena
323
+
324
+ client = AsyncAthena(
325
+ api_key="YOUR_API_KEY",
326
+ )
327
+ await client.tools.langchain_documents(
328
+ document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
329
+ pagination_limit=250,
330
+ pagination_offset=0,
331
+ )
332
+ """
333
+ _request: typing.Dict[str, typing.Any] = {"document_id": document_id}
334
+ if pagination_limit is not OMIT:
335
+ _request["pagination_limit"] = pagination_limit
336
+ if pagination_offset is not OMIT:
337
+ _request["pagination_offset"] = pagination_offset
338
+ _response = await self._client_wrapper.httpx_client.request(
339
+ "POST",
340
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/file/langchain-documents"),
341
+ params=jsonable_encoder(
342
+ request_options.get("additional_query_parameters") if request_options is not None else None
343
+ ),
344
+ json=jsonable_encoder(_request)
345
+ if request_options is None or request_options.get("additional_body_parameters") is None
346
+ else {
347
+ **jsonable_encoder(_request),
348
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
349
+ },
350
+ headers=jsonable_encoder(
351
+ remove_none_from_dict(
352
+ {
353
+ **self._client_wrapper.get_headers(),
354
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
355
+ }
356
+ )
357
+ ),
358
+ timeout=request_options.get("timeout_in_seconds")
359
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
360
+ else 60,
361
+ retries=0,
362
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
363
+ )
364
+ if 200 <= _response.status_code < 300:
365
+ return pydantic.parse_obj_as(LangchainDocumentsRequestOut, _response.json()) # type: ignore
366
+ if _response.status_code == 422:
367
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
368
+ try:
369
+ _response_json = _response.json()
370
+ except JSONDecodeError:
371
+ raise ApiError(status_code=_response.status_code, body=_response.text)
372
+ raise ApiError(status_code=_response.status_code, body=_response_json)
373
+
374
+ async def tool_first_workflow(
375
+ self,
376
+ *,
377
+ model_name: str,
378
+ tool_name: str,
379
+ content: str,
380
+ tool_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
381
+ request_options: typing.Optional[RequestOptions] = None,
382
+ ) -> ExcecuteToolFirstWorkflowOut:
383
+ """
384
+ Parameters:
385
+ - model_name: str.
386
+
387
+ - tool_name: str.
388
+
389
+ - content: str.
390
+
391
+ - tool_kwargs: typing.Optional[typing.Dict[str, typing.Any]].
392
+
393
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
394
+ ---
395
+ from athena.client import AsyncAthena
396
+
397
+ client = AsyncAthena(
398
+ api_key="YOUR_API_KEY",
399
+ )
400
+ await client.tools.tool_first_workflow(
401
+ model_name="gpt-3.5-turbo",
402
+ tool_name="tavily_search",
403
+ content="summarize the website in one paragraph",
404
+ tool_kwargs={"query": "website: www.athenaintelligence.ai"},
405
+ )
406
+ """
407
+ _request: typing.Dict[str, typing.Any] = {"model_name": model_name, "tool_name": tool_name, "content": content}
408
+ if tool_kwargs is not OMIT:
409
+ _request["tool_kwargs"] = tool_kwargs
410
+ _response = await self._client_wrapper.httpx_client.request(
411
+ "POST",
412
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
413
+ params=jsonable_encoder(
414
+ request_options.get("additional_query_parameters") if request_options is not None else None
415
+ ),
416
+ json=jsonable_encoder(_request)
417
+ if request_options is None or request_options.get("additional_body_parameters") is None
418
+ else {
419
+ **jsonable_encoder(_request),
420
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
421
+ },
422
+ headers=jsonable_encoder(
423
+ remove_none_from_dict(
424
+ {
425
+ **self._client_wrapper.get_headers(),
426
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
427
+ }
428
+ )
429
+ ),
430
+ timeout=request_options.get("timeout_in_seconds")
431
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
432
+ else 60,
433
+ retries=0,
434
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
435
+ )
436
+ if 200 <= _response.status_code < 300:
437
+ return pydantic.parse_obj_as(ExcecuteToolFirstWorkflowOut, _response.json()) # type: ignore
438
+ if _response.status_code == 422:
439
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
440
+ try:
441
+ _response_json = _response.json()
442
+ except JSONDecodeError:
443
+ raise ApiError(status_code=_response.status_code, body=_response.text)
444
+ raise ApiError(status_code=_response.status_code, body=_response_json)
athena/types/__init__.py CHANGED
@@ -1,17 +1,22 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from .dataset import Dataset
4
+ from .document import Document
5
+ from .excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
6
+ from .firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
7
+ from .firecrawl_scrape_url_metadata import FirecrawlScrapeUrlMetadata
4
8
  from .get_datasets_response import GetDatasetsResponse
5
9
  from .get_snippets_response import GetSnippetsResponse
6
10
  from .http_validation_error import HttpValidationError
11
+ from .langchain_documents_request_out import LangchainDocumentsRequestOut
7
12
  from .message_out import MessageOut
8
13
  from .message_out_dto import MessageOutDto
9
14
  from .model import Model
10
15
  from .report import Report
11
- from .scrape_markdown_result import ScrapeMarkdownResult
12
16
  from .snippet import Snippet
13
17
  from .sql_results import SqlResults
14
18
  from .status_enum import StatusEnum
19
+ from .structured_parse_result import StructuredParseResult
15
20
  from .tools import Tools
16
21
  from .url_result import UrlResult
17
22
  from .validation_error import ValidationError
@@ -19,17 +24,22 @@ from .validation_error_loc_item import ValidationErrorLocItem
19
24
 
20
25
  __all__ = [
21
26
  "Dataset",
27
+ "Document",
28
+ "ExcecuteToolFirstWorkflowOut",
29
+ "FirecrawlScrapeUrlDataReponseDto",
30
+ "FirecrawlScrapeUrlMetadata",
22
31
  "GetDatasetsResponse",
23
32
  "GetSnippetsResponse",
24
33
  "HttpValidationError",
34
+ "LangchainDocumentsRequestOut",
25
35
  "MessageOut",
26
36
  "MessageOutDto",
27
37
  "Model",
28
38
  "Report",
29
- "ScrapeMarkdownResult",
30
39
  "Snippet",
31
40
  "SqlResults",
32
41
  "StatusEnum",
42
+ "StructuredParseResult",
33
43
  "Tools",
34
44
  "UrlResult",
35
45
  "ValidationError",
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class Document(pydantic.BaseModel):
15
+ """
16
+ Class for storing a piece of text and associated metadata.
17
+ """
18
+
19
+ page_content: str
20
+ metadata: typing.Optional[typing.Dict[str, typing.Any]] = None
21
+ type: typing.Optional[typing.Literal["Document"]] = None
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -11,8 +11,8 @@ except ImportError:
11
11
  import pydantic # type: ignore
12
12
 
13
13
 
14
- class ScrapeMarkdownResult(pydantic.BaseModel):
15
- markdown: str
14
+ class ExcecuteToolFirstWorkflowOut(pydantic.BaseModel):
15
+ output_message: str
16
16
 
17
17
  def json(self, **kwargs: typing.Any) -> str:
18
18
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .firecrawl_scrape_url_metadata import FirecrawlScrapeUrlMetadata
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class FirecrawlScrapeUrlDataReponseDto(pydantic.BaseModel):
16
+ content: str
17
+ markdown: str
18
+ metadata: FirecrawlScrapeUrlMetadata
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class FirecrawlScrapeUrlMetadata(pydantic.BaseModel):
15
+ title: typing.Optional[str] = None
16
+ description: typing.Optional[str] = None
17
+ language: typing.Optional[str] = None
18
+ source_url: typing.Optional[str] = pydantic.Field(alias="sourceURL", default=None)
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ allow_population_by_field_name = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .document import Document
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class LangchainDocumentsRequestOut(pydantic.BaseModel):
16
+ documents: typing.List[Document]
17
+
18
+ def json(self, **kwargs: typing.Any) -> str:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().json(**kwargs_with_defaults)
21
+
22
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().dict(**kwargs_with_defaults)
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,28 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class StructuredParseResult(pydantic.BaseModel):
15
+ result: typing.Dict[str, typing.Any]
16
+
17
+ def json(self, **kwargs: typing.Any) -> str:
18
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
19
+ return super().json(**kwargs_with_defaults)
20
+
21
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().dict(**kwargs_with_defaults)
24
+
25
+ class Config:
26
+ frozen = True
27
+ smart_union = True
28
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: athena-intelligence
3
- Version: 0.1.39
3
+ Version: 0.1.41
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,9 +1,13 @@
1
- athena/__init__.py,sha256=4F_he5MdULBjh5FEtmnAlXK61f8Y5fhP4-bLVzj1K3s,1013
2
- athena/base_client.py,sha256=9CD18sBT5meilMnX4WfnNBagwlyNWnc8NH0bSL9D0Ao,5014
1
+ athena/__init__.py,sha256=tOoLwN-O8vyWKTVN97cDo4WCjAQrwDKc_EDh-p9fYO0,1455
2
+ athena/base_client.py,sha256=tvX2FKc9AnNBm0k64njjUCOt7dPxBBn6BpClJpcPqgc,5416
3
+ athena/chain/__init__.py,sha256=I1CBCogKCvJBIPdXXFQWdGJyFs6pdp0_dp6rnybd5mI,159
4
+ athena/chain/client.py,sha256=SdlDtNr1QBbDgZGBAFW-GNT0FqPdhvAUcrQaHcGuUds,8333
5
+ athena/chain/types/__init__.py,sha256=s4rY--H5yj6slggsUnRQNrKBHZ3QGE9jQWopTCQOFpg,187
6
+ athena/chain/types/structured_parse_in_parsing_model.py,sha256=tr6DLP2v71IUScCOeLrUjUtXrAr3WlaW0sSD3ns2r-Q,2203
3
7
  athena/client.py,sha256=8QypiDlbZ0C1YsJh6GzhylLVCZXDQc1MCJTURo2_vvI,3576
4
8
  athena/core/__init__.py,sha256=RWfyDqkzWsf8e3VGc3NV60MovfJbg5XWzNFGB2DZ0hA,790
5
9
  athena/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
6
- athena/core/client_wrapper.py,sha256=IZ6Zo7IR39BxLeVgeprl1_Sh_0ewYHA8uloBT_m_axc,1198
10
+ athena/core/client_wrapper.py,sha256=GqP993lqfPIGgs1ep0UgLKCBKNFa9R6tBOZ6dRC5GMA,1198
7
11
  athena/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
8
12
  athena/core/file.py,sha256=sy1RUGZ3aJYuw998bZytxxo6QdgKmlnlgBaMvwEKCGg,1480
9
13
  athena/core/http_client.py,sha256=LI0yP3jUyE0Ue7oyBcI9nyo1pljOwh9Y5ycTeIpKwOg,4882
@@ -24,26 +28,33 @@ athena/query/client.py,sha256=UOx-Bq-xFFm-sTMTmJjWGrC6q_7vhVno3nYzmi81xwI,6243
24
28
  athena/report/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
25
29
  athena/report/client.py,sha256=sGJDrgk_E1SPleRYNhvspmsz-G3FQwMW-3alFzZPquE,6528
26
30
  athena/search/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
27
- athena/search/client.py,sha256=DLfHjgflIXAff20yswQK1h4BNxvY4SjZzhsywpQOM-g,12391
31
+ athena/search/client.py,sha256=zhE86fKD61gHQ1w45DYfWVHIZ1APGK6yUrWAHYi0OVM,7515
28
32
  athena/snippet/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
29
33
  athena/snippet/client.py,sha256=D0rSpm6ql9cnUj-mMe3z8OHRgRQQuk3bBW2CZSRnyp4,6087
30
- athena/types/__init__.py,sha256=WkQQXOAvm87RuRDk2_W9uJs9y7LWGejK1ZC666NTHXA,1054
34
+ athena/tools/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
35
+ athena/tools/client.py,sha256=b3BvnUTBBmlDAw3RfffWSYvKGVnukDaPCbSmGsSFGC0,19332
36
+ athena/types/__init__.py,sha256=Nbrzcoh-fM9lFISWhmZxAmD1m-8Q2dA4qJCbPhSHOuk,1553
31
37
  athena/types/dataset.py,sha256=70OJPxKBAYu7xthGEgrUolSdyLqiyh6X49INw1oN0sA,1014
38
+ athena/types/document.py,sha256=CAByS_smWjl-edYLCCc4fkdhZYXSVUKQqEamjbhpgU0,1081
39
+ athena/types/excecute_tool_first_workflow_out.py,sha256=tGL6pNN4uhL_knWf-SQc-Z1IPJFNVsAmYtDI-VjXS2s,895
40
+ athena/types/firecrawl_scrape_url_data_reponse_dto.py,sha256=LbJY-SC_WNQG3nbswG8NTfVl_u_tpV1HO7Y3MWCk5gc,1021
41
+ athena/types/firecrawl_scrape_url_metadata.py,sha256=sqHTtq5_5vujsMixJBDJULPK7MrvxEpB2wGPwC-bTdM,1128
32
42
  athena/types/get_datasets_response.py,sha256=BCdT8yTLfOsXeyFadlyoas4zzseFWGPAdGpkgkOuaD8,989
33
43
  athena/types/get_snippets_response.py,sha256=Lpn7bHJLpPQozN93unCV-8eByAAfz1MhQWR3G3Z1vl4,989
34
44
  athena/types/http_validation_error.py,sha256=Fcv_CTMMrLvCeTHjF0n5xf5tskMDgt-J6H9gp654eQw,973
45
+ athena/types/langchain_documents_request_out.py,sha256=P5Qq7BkCWILORC3yOkvt8pssdYgd_vnTTANNtrWF-l8,939
35
46
  athena/types/message_out.py,sha256=uvZY_Podv2XccEk8CICug9I_S2hFJTSzCBwcHiauW7A,865
36
47
  athena/types/message_out_dto.py,sha256=qgRibRbDNOWVnVGP7Rribh9WdoCT2CSiPUXeIWECqq4,1051
37
48
  athena/types/model.py,sha256=XbXkKXbmnfZ8bPTAn1xnWGjqKK1SVOLdxf1RGk5ON5k,2545
38
49
  athena/types/report.py,sha256=QVaqVfHMAV3s9_V2CqjIEMcRrbJhD8zmi82vrk2A8x0,946
39
- athena/types/scrape_markdown_result.py,sha256=uRpIxoLV9oyLdbJeehm3zmZk_qXZeYqYYcS2SeQmwbA,881
40
50
  athena/types/snippet.py,sha256=POIVJNV9iQxiVegB_qwQx-PZPPSyoIPhyxTsueNVUGA,1126
41
51
  athena/types/sql_results.py,sha256=pNH32nyf1bzoYJs3FgHctLdLO02oOjyGgLkHACACB6k,900
42
52
  athena/types/status_enum.py,sha256=0UZbhdAx215GHC-U53RS98mYHtn1N3On4VBe4j02Qtc,672
53
+ athena/types/structured_parse_result.py,sha256=7I-w06OmtxXFY01k7FXFSNPe5PpM3z54xNUbs62lSv0,905
43
54
  athena/types/tools.py,sha256=mhRkKAwlsDud-fFOhsx2T3hBD-FAtuCnGHyU9cLPcGU,1422
44
55
  athena/types/url_result.py,sha256=zajsW46qJnD6GPimb5kHkUncjqBfzHUlGOcKuUGMX-E,893
45
56
  athena/types/validation_error.py,sha256=2JhGNJouo8QpfrMBoT_JCwYSn1nFN2Nnq0p9uPLDH-U,992
46
57
  athena/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
47
- athena_intelligence-0.1.39.dist-info/METADATA,sha256=d8EmTn45t-7O63xpZzW5vbZbpxhagqY9Frj1559JYWw,4738
48
- athena_intelligence-0.1.39.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
49
- athena_intelligence-0.1.39.dist-info/RECORD,,
58
+ athena_intelligence-0.1.41.dist-info/METADATA,sha256=x6Bd7D06Ob6esJAQDGRuRWFkTtlIuixQTWFFuVc3Xvk,4738
59
+ athena_intelligence-0.1.41.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
60
+ athena_intelligence-0.1.41.dist-info/RECORD,,