athena-intelligence 0.1.71__py3-none-any.whl → 0.1.73__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- athena/__init__.py +12 -0
- athena/core/client_wrapper.py +1 -1
- athena/tools/client.py +305 -0
- athena/types/__init__.py +12 -0
- athena/types/convert_pdf_to_sheet_out.py +26 -0
- athena/types/filter_model.py +28 -0
- athena/types/filter_operator.py +73 -0
- athena/types/llm_model.py +8 -0
- athena/types/model.py +4 -0
- athena/types/query_model.py +34 -0
- athena/types/semantic_query_out.py +25 -0
- athena/types/time_dimension_model.py +29 -0
- {athena_intelligence-0.1.71.dist-info → athena_intelligence-0.1.73.dist-info}/METADATA +1 -1
- {athena_intelligence-0.1.71.dist-info → athena_intelligence-0.1.73.dist-info}/RECORD +15 -9
- {athena_intelligence-0.1.71.dist-info → athena_intelligence-0.1.73.dist-info}/WHEEL +0 -0
athena/__init__.py
CHANGED
@@ -1,10 +1,13 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
3
|
from .types import (
|
4
|
+
ConvertPdfToSheetOut,
|
4
5
|
Dataset,
|
5
6
|
Document,
|
6
7
|
ExcecuteToolFirstWorkflowOut,
|
7
8
|
FileDataResponse,
|
9
|
+
FilterModel,
|
10
|
+
FilterOperator,
|
8
11
|
FirecrawlScrapeUrlDataReponseDto,
|
9
12
|
FirecrawlScrapeUrlMetadata,
|
10
13
|
GetDatasetsResponse,
|
@@ -18,12 +21,15 @@ from .types import (
|
|
18
21
|
MessageOutDto,
|
19
22
|
Model,
|
20
23
|
PublishFormats,
|
24
|
+
QueryModel,
|
21
25
|
Report,
|
22
26
|
ResearcherOut,
|
27
|
+
SemanticQueryOut,
|
23
28
|
Snippet,
|
24
29
|
SqlResults,
|
25
30
|
StatusEnum,
|
26
31
|
StructuredParseResult,
|
32
|
+
TimeDimensionModel,
|
27
33
|
Tools,
|
28
34
|
UploadDocumentsOut,
|
29
35
|
UrlResult,
|
@@ -38,10 +44,13 @@ from .version import __version__
|
|
38
44
|
|
39
45
|
__all__ = [
|
40
46
|
"AthenaEnvironment",
|
47
|
+
"ConvertPdfToSheetOut",
|
41
48
|
"Dataset",
|
42
49
|
"Document",
|
43
50
|
"ExcecuteToolFirstWorkflowOut",
|
44
51
|
"FileDataResponse",
|
52
|
+
"FilterModel",
|
53
|
+
"FilterOperator",
|
45
54
|
"FirecrawlScrapeUrlDataReponseDto",
|
46
55
|
"FirecrawlScrapeUrlMetadata",
|
47
56
|
"GetDatasetsResponse",
|
@@ -55,12 +64,15 @@ __all__ = [
|
|
55
64
|
"MessageOutDto",
|
56
65
|
"Model",
|
57
66
|
"PublishFormats",
|
67
|
+
"QueryModel",
|
58
68
|
"Report",
|
59
69
|
"ResearcherOut",
|
70
|
+
"SemanticQueryOut",
|
60
71
|
"Snippet",
|
61
72
|
"SqlResults",
|
62
73
|
"StatusEnum",
|
63
74
|
"StructuredParseResult",
|
75
|
+
"TimeDimensionModel",
|
64
76
|
"Tools",
|
65
77
|
"UnprocessableEntityError",
|
66
78
|
"UploadDocumentsOut",
|
athena/core/client_wrapper.py
CHANGED
@@ -17,7 +17,7 @@ class BaseClientWrapper:
|
|
17
17
|
headers: typing.Dict[str, str] = {
|
18
18
|
"X-Fern-Language": "Python",
|
19
19
|
"X-Fern-SDK-Name": "athena-intelligence",
|
20
|
-
"X-Fern-SDK-Version": "0.1.
|
20
|
+
"X-Fern-SDK-Version": "0.1.73",
|
21
21
|
}
|
22
22
|
headers["X-API-KEY"] = self.api_key
|
23
23
|
return headers
|
athena/tools/client.py
CHANGED
@@ -11,13 +11,16 @@ from ..core.pydantic_utilities import pydantic_v1
|
|
11
11
|
from ..core.remove_none_from_dict import remove_none_from_dict
|
12
12
|
from ..core.request_options import RequestOptions
|
13
13
|
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
14
|
+
from ..types.convert_pdf_to_sheet_out import ConvertPdfToSheetOut
|
14
15
|
from ..types.excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
|
15
16
|
from ..types.firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
|
16
17
|
from ..types.http_validation_error import HttpValidationError
|
17
18
|
from ..types.langchain_documents_request_out import LangchainDocumentsRequestOut
|
18
19
|
from ..types.llm_model import LlmModel
|
19
20
|
from ..types.publish_formats import PublishFormats
|
21
|
+
from ..types.query_model import QueryModel
|
20
22
|
from ..types.researcher_out import ResearcherOut
|
23
|
+
from ..types.semantic_query_out import SemanticQueryOut
|
21
24
|
|
22
25
|
# this is used as the default value for optional parameters
|
23
26
|
OMIT = typing.cast(typing.Any, ...)
|
@@ -250,6 +253,8 @@ class ToolsClient:
|
|
250
253
|
max_sections: int,
|
251
254
|
guidelines: typing.Sequence[str],
|
252
255
|
publish_formats: typing.Optional[PublishFormats] = OMIT,
|
256
|
+
source: typing.Optional[str] = OMIT,
|
257
|
+
athena_document_ids: typing.Optional[typing.Sequence[str]] = OMIT,
|
253
258
|
request_options: typing.Optional[RequestOptions] = None,
|
254
259
|
) -> ResearcherOut:
|
255
260
|
"""
|
@@ -262,6 +267,10 @@ class ToolsClient:
|
|
262
267
|
|
263
268
|
- publish_formats: typing.Optional[PublishFormats].
|
264
269
|
|
270
|
+
- source: typing.Optional[str].
|
271
|
+
|
272
|
+
- athena_document_ids: typing.Optional[typing.Sequence[str]].
|
273
|
+
|
265
274
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
266
275
|
---
|
267
276
|
from athena import PublishFormats
|
@@ -283,6 +292,8 @@ class ToolsClient:
|
|
283
292
|
pdf=True,
|
284
293
|
docx=False,
|
285
294
|
),
|
295
|
+
source="web",
|
296
|
+
athena_document_ids=["doc_1", "doc_2"],
|
286
297
|
)
|
287
298
|
"""
|
288
299
|
_request: typing.Dict[str, typing.Any] = {
|
@@ -292,6 +303,10 @@ class ToolsClient:
|
|
292
303
|
}
|
293
304
|
if publish_formats is not OMIT:
|
294
305
|
_request["publish_formats"] = publish_formats
|
306
|
+
if source is not OMIT:
|
307
|
+
_request["source"] = source
|
308
|
+
if athena_document_ids is not OMIT:
|
309
|
+
_request["athena_document_ids"] = athena_document_ids
|
295
310
|
_response = self._client_wrapper.httpx_client.request(
|
296
311
|
method="POST",
|
297
312
|
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/researcher"),
|
@@ -330,6 +345,145 @@ class ToolsClient:
|
|
330
345
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
331
346
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
332
347
|
|
348
|
+
def convert_pdf_to_sheet(
|
349
|
+
self, *, document_id: str, request_options: typing.Optional[RequestOptions] = None
|
350
|
+
) -> ConvertPdfToSheetOut:
|
351
|
+
"""
|
352
|
+
Parameters:
|
353
|
+
- document_id: str.
|
354
|
+
|
355
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
356
|
+
---
|
357
|
+
from athena.client import Athena
|
358
|
+
|
359
|
+
client = Athena(
|
360
|
+
api_key="YOUR_API_KEY",
|
361
|
+
)
|
362
|
+
client.tools.convert_pdf_to_sheet(
|
363
|
+
document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
|
364
|
+
)
|
365
|
+
"""
|
366
|
+
_response = self._client_wrapper.httpx_client.request(
|
367
|
+
method="POST",
|
368
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/convert-pdf-to-sheet"),
|
369
|
+
params=jsonable_encoder(
|
370
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
371
|
+
),
|
372
|
+
json=jsonable_encoder({"document_id": document_id})
|
373
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
374
|
+
else {
|
375
|
+
**jsonable_encoder({"document_id": document_id}),
|
376
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
377
|
+
},
|
378
|
+
headers=jsonable_encoder(
|
379
|
+
remove_none_from_dict(
|
380
|
+
{
|
381
|
+
**self._client_wrapper.get_headers(),
|
382
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
383
|
+
}
|
384
|
+
)
|
385
|
+
),
|
386
|
+
timeout=request_options.get("timeout_in_seconds")
|
387
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
388
|
+
else self._client_wrapper.get_timeout(),
|
389
|
+
retries=0,
|
390
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
391
|
+
)
|
392
|
+
if 200 <= _response.status_code < 300:
|
393
|
+
return pydantic_v1.parse_obj_as(ConvertPdfToSheetOut, _response.json()) # type: ignore
|
394
|
+
if _response.status_code == 422:
|
395
|
+
raise UnprocessableEntityError(
|
396
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
397
|
+
)
|
398
|
+
try:
|
399
|
+
_response_json = _response.json()
|
400
|
+
except JSONDecodeError:
|
401
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
402
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
403
|
+
|
404
|
+
def semantic_query(
|
405
|
+
self,
|
406
|
+
*,
|
407
|
+
query: QueryModel,
|
408
|
+
table_name: typing.Optional[str] = OMIT,
|
409
|
+
request_options: typing.Optional[RequestOptions] = None,
|
410
|
+
) -> SemanticQueryOut:
|
411
|
+
"""
|
412
|
+
Parameters:
|
413
|
+
- query: QueryModel.
|
414
|
+
|
415
|
+
- table_name: typing.Optional[str].
|
416
|
+
|
417
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
418
|
+
---
|
419
|
+
from athena import FilterModel, FilterOperator, QueryModel, TimeDimensionModel
|
420
|
+
from athena.client import Athena
|
421
|
+
|
422
|
+
client = Athena(
|
423
|
+
api_key="YOUR_API_KEY",
|
424
|
+
)
|
425
|
+
client.tools.semantic_query(
|
426
|
+
query=QueryModel(
|
427
|
+
measures=["count"],
|
428
|
+
time_dimensions=[
|
429
|
+
TimeDimensionModel(
|
430
|
+
dimension="dimension",
|
431
|
+
granularity="granularity",
|
432
|
+
date_range=["dateRange"],
|
433
|
+
)
|
434
|
+
],
|
435
|
+
dimensions=["manufacturer"],
|
436
|
+
filters=[
|
437
|
+
FilterModel(
|
438
|
+
dimension="manufacturer",
|
439
|
+
operator=FilterOperator.EQUALS,
|
440
|
+
values=["values"],
|
441
|
+
)
|
442
|
+
],
|
443
|
+
),
|
444
|
+
)
|
445
|
+
"""
|
446
|
+
_request: typing.Dict[str, typing.Any] = {"query": query}
|
447
|
+
if table_name is not OMIT:
|
448
|
+
_request["table_name"] = table_name
|
449
|
+
_response = self._client_wrapper.httpx_client.request(
|
450
|
+
method="POST",
|
451
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/strict-semantic-query"),
|
452
|
+
params=jsonable_encoder(
|
453
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
454
|
+
),
|
455
|
+
json=jsonable_encoder(_request)
|
456
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
457
|
+
else {
|
458
|
+
**jsonable_encoder(_request),
|
459
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
460
|
+
},
|
461
|
+
headers=jsonable_encoder(
|
462
|
+
remove_none_from_dict(
|
463
|
+
{
|
464
|
+
**self._client_wrapper.get_headers(),
|
465
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
466
|
+
}
|
467
|
+
)
|
468
|
+
),
|
469
|
+
timeout=request_options.get("timeout_in_seconds")
|
470
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
471
|
+
else self._client_wrapper.get_timeout(),
|
472
|
+
retries=0,
|
473
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
474
|
+
)
|
475
|
+
if 200 <= _response.status_code < 300:
|
476
|
+
return pydantic_v1.parse_obj_as(SemanticQueryOut, _response.json()) # type: ignore
|
477
|
+
if _response.status_code == 422:
|
478
|
+
raise UnprocessableEntityError(
|
479
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
480
|
+
)
|
481
|
+
try:
|
482
|
+
_response_json = _response.json()
|
483
|
+
except JSONDecodeError:
|
484
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
485
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
486
|
+
|
333
487
|
|
334
488
|
class AsyncToolsClient:
|
335
489
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
@@ -558,6 +712,8 @@ class AsyncToolsClient:
|
|
558
712
|
max_sections: int,
|
559
713
|
guidelines: typing.Sequence[str],
|
560
714
|
publish_formats: typing.Optional[PublishFormats] = OMIT,
|
715
|
+
source: typing.Optional[str] = OMIT,
|
716
|
+
athena_document_ids: typing.Optional[typing.Sequence[str]] = OMIT,
|
561
717
|
request_options: typing.Optional[RequestOptions] = None,
|
562
718
|
) -> ResearcherOut:
|
563
719
|
"""
|
@@ -570,6 +726,10 @@ class AsyncToolsClient:
|
|
570
726
|
|
571
727
|
- publish_formats: typing.Optional[PublishFormats].
|
572
728
|
|
729
|
+
- source: typing.Optional[str].
|
730
|
+
|
731
|
+
- athena_document_ids: typing.Optional[typing.Sequence[str]].
|
732
|
+
|
573
733
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
574
734
|
---
|
575
735
|
from athena import PublishFormats
|
@@ -591,6 +751,8 @@ class AsyncToolsClient:
|
|
591
751
|
pdf=True,
|
592
752
|
docx=False,
|
593
753
|
),
|
754
|
+
source="web",
|
755
|
+
athena_document_ids=["doc_1", "doc_2"],
|
594
756
|
)
|
595
757
|
"""
|
596
758
|
_request: typing.Dict[str, typing.Any] = {
|
@@ -600,6 +762,10 @@ class AsyncToolsClient:
|
|
600
762
|
}
|
601
763
|
if publish_formats is not OMIT:
|
602
764
|
_request["publish_formats"] = publish_formats
|
765
|
+
if source is not OMIT:
|
766
|
+
_request["source"] = source
|
767
|
+
if athena_document_ids is not OMIT:
|
768
|
+
_request["athena_document_ids"] = athena_document_ids
|
603
769
|
_response = await self._client_wrapper.httpx_client.request(
|
604
770
|
method="POST",
|
605
771
|
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/researcher"),
|
@@ -637,3 +803,142 @@ class AsyncToolsClient:
|
|
637
803
|
except JSONDecodeError:
|
638
804
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
639
805
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
806
|
+
|
807
|
+
async def convert_pdf_to_sheet(
|
808
|
+
self, *, document_id: str, request_options: typing.Optional[RequestOptions] = None
|
809
|
+
) -> ConvertPdfToSheetOut:
|
810
|
+
"""
|
811
|
+
Parameters:
|
812
|
+
- document_id: str.
|
813
|
+
|
814
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
815
|
+
---
|
816
|
+
from athena.client import AsyncAthena
|
817
|
+
|
818
|
+
client = AsyncAthena(
|
819
|
+
api_key="YOUR_API_KEY",
|
820
|
+
)
|
821
|
+
await client.tools.convert_pdf_to_sheet(
|
822
|
+
document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
|
823
|
+
)
|
824
|
+
"""
|
825
|
+
_response = await self._client_wrapper.httpx_client.request(
|
826
|
+
method="POST",
|
827
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/convert-pdf-to-sheet"),
|
828
|
+
params=jsonable_encoder(
|
829
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
830
|
+
),
|
831
|
+
json=jsonable_encoder({"document_id": document_id})
|
832
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
833
|
+
else {
|
834
|
+
**jsonable_encoder({"document_id": document_id}),
|
835
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
836
|
+
},
|
837
|
+
headers=jsonable_encoder(
|
838
|
+
remove_none_from_dict(
|
839
|
+
{
|
840
|
+
**self._client_wrapper.get_headers(),
|
841
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
842
|
+
}
|
843
|
+
)
|
844
|
+
),
|
845
|
+
timeout=request_options.get("timeout_in_seconds")
|
846
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
847
|
+
else self._client_wrapper.get_timeout(),
|
848
|
+
retries=0,
|
849
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
850
|
+
)
|
851
|
+
if 200 <= _response.status_code < 300:
|
852
|
+
return pydantic_v1.parse_obj_as(ConvertPdfToSheetOut, _response.json()) # type: ignore
|
853
|
+
if _response.status_code == 422:
|
854
|
+
raise UnprocessableEntityError(
|
855
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
856
|
+
)
|
857
|
+
try:
|
858
|
+
_response_json = _response.json()
|
859
|
+
except JSONDecodeError:
|
860
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
861
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
862
|
+
|
863
|
+
async def semantic_query(
|
864
|
+
self,
|
865
|
+
*,
|
866
|
+
query: QueryModel,
|
867
|
+
table_name: typing.Optional[str] = OMIT,
|
868
|
+
request_options: typing.Optional[RequestOptions] = None,
|
869
|
+
) -> SemanticQueryOut:
|
870
|
+
"""
|
871
|
+
Parameters:
|
872
|
+
- query: QueryModel.
|
873
|
+
|
874
|
+
- table_name: typing.Optional[str].
|
875
|
+
|
876
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
877
|
+
---
|
878
|
+
from athena import FilterModel, FilterOperator, QueryModel, TimeDimensionModel
|
879
|
+
from athena.client import AsyncAthena
|
880
|
+
|
881
|
+
client = AsyncAthena(
|
882
|
+
api_key="YOUR_API_KEY",
|
883
|
+
)
|
884
|
+
await client.tools.semantic_query(
|
885
|
+
query=QueryModel(
|
886
|
+
measures=["count"],
|
887
|
+
time_dimensions=[
|
888
|
+
TimeDimensionModel(
|
889
|
+
dimension="dimension",
|
890
|
+
granularity="granularity",
|
891
|
+
date_range=["dateRange"],
|
892
|
+
)
|
893
|
+
],
|
894
|
+
dimensions=["manufacturer"],
|
895
|
+
filters=[
|
896
|
+
FilterModel(
|
897
|
+
dimension="manufacturer",
|
898
|
+
operator=FilterOperator.EQUALS,
|
899
|
+
values=["values"],
|
900
|
+
)
|
901
|
+
],
|
902
|
+
),
|
903
|
+
)
|
904
|
+
"""
|
905
|
+
_request: typing.Dict[str, typing.Any] = {"query": query}
|
906
|
+
if table_name is not OMIT:
|
907
|
+
_request["table_name"] = table_name
|
908
|
+
_response = await self._client_wrapper.httpx_client.request(
|
909
|
+
method="POST",
|
910
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/strict-semantic-query"),
|
911
|
+
params=jsonable_encoder(
|
912
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
913
|
+
),
|
914
|
+
json=jsonable_encoder(_request)
|
915
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
916
|
+
else {
|
917
|
+
**jsonable_encoder(_request),
|
918
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
919
|
+
},
|
920
|
+
headers=jsonable_encoder(
|
921
|
+
remove_none_from_dict(
|
922
|
+
{
|
923
|
+
**self._client_wrapper.get_headers(),
|
924
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
925
|
+
}
|
926
|
+
)
|
927
|
+
),
|
928
|
+
timeout=request_options.get("timeout_in_seconds")
|
929
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
930
|
+
else self._client_wrapper.get_timeout(),
|
931
|
+
retries=0,
|
932
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
933
|
+
)
|
934
|
+
if 200 <= _response.status_code < 300:
|
935
|
+
return pydantic_v1.parse_obj_as(SemanticQueryOut, _response.json()) # type: ignore
|
936
|
+
if _response.status_code == 422:
|
937
|
+
raise UnprocessableEntityError(
|
938
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
939
|
+
)
|
940
|
+
try:
|
941
|
+
_response_json = _response.json()
|
942
|
+
except JSONDecodeError:
|
943
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
944
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
athena/types/__init__.py
CHANGED
@@ -1,9 +1,12 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
|
+
from .convert_pdf_to_sheet_out import ConvertPdfToSheetOut
|
3
4
|
from .dataset import Dataset
|
4
5
|
from .document import Document
|
5
6
|
from .excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
|
6
7
|
from .file_data_response import FileDataResponse
|
8
|
+
from .filter_model import FilterModel
|
9
|
+
from .filter_operator import FilterOperator
|
7
10
|
from .firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
|
8
11
|
from .firecrawl_scrape_url_metadata import FirecrawlScrapeUrlMetadata
|
9
12
|
from .get_datasets_response import GetDatasetsResponse
|
@@ -17,12 +20,15 @@ from .message_out import MessageOut
|
|
17
20
|
from .message_out_dto import MessageOutDto
|
18
21
|
from .model import Model
|
19
22
|
from .publish_formats import PublishFormats
|
23
|
+
from .query_model import QueryModel
|
20
24
|
from .report import Report
|
21
25
|
from .researcher_out import ResearcherOut
|
26
|
+
from .semantic_query_out import SemanticQueryOut
|
22
27
|
from .snippet import Snippet
|
23
28
|
from .sql_results import SqlResults
|
24
29
|
from .status_enum import StatusEnum
|
25
30
|
from .structured_parse_result import StructuredParseResult
|
31
|
+
from .time_dimension_model import TimeDimensionModel
|
26
32
|
from .tools import Tools
|
27
33
|
from .upload_documents_out import UploadDocumentsOut
|
28
34
|
from .url_result import UrlResult
|
@@ -31,10 +37,13 @@ from .validation_error_loc_item import ValidationErrorLocItem
|
|
31
37
|
from .workflow_status_out import WorkflowStatusOut
|
32
38
|
|
33
39
|
__all__ = [
|
40
|
+
"ConvertPdfToSheetOut",
|
34
41
|
"Dataset",
|
35
42
|
"Document",
|
36
43
|
"ExcecuteToolFirstWorkflowOut",
|
37
44
|
"FileDataResponse",
|
45
|
+
"FilterModel",
|
46
|
+
"FilterOperator",
|
38
47
|
"FirecrawlScrapeUrlDataReponseDto",
|
39
48
|
"FirecrawlScrapeUrlMetadata",
|
40
49
|
"GetDatasetsResponse",
|
@@ -48,12 +57,15 @@ __all__ = [
|
|
48
57
|
"MessageOutDto",
|
49
58
|
"Model",
|
50
59
|
"PublishFormats",
|
60
|
+
"QueryModel",
|
51
61
|
"Report",
|
52
62
|
"ResearcherOut",
|
63
|
+
"SemanticQueryOut",
|
53
64
|
"Snippet",
|
54
65
|
"SqlResults",
|
55
66
|
"StatusEnum",
|
56
67
|
"StructuredParseResult",
|
68
|
+
"TimeDimensionModel",
|
57
69
|
"Tools",
|
58
70
|
"UploadDocumentsOut",
|
59
71
|
"UrlResult",
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class ConvertPdfToSheetOut(pydantic_v1.BaseModel):
|
11
|
+
document_id: str
|
12
|
+
new_document_id: str
|
13
|
+
|
14
|
+
def json(self, **kwargs: typing.Any) -> str:
|
15
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
16
|
+
return super().json(**kwargs_with_defaults)
|
17
|
+
|
18
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().dict(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
class Config:
|
23
|
+
frozen = True
|
24
|
+
smart_union = True
|
25
|
+
extra = pydantic_v1.Extra.allow
|
26
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .filter_operator import FilterOperator
|
9
|
+
|
10
|
+
|
11
|
+
class FilterModel(pydantic_v1.BaseModel):
|
12
|
+
dimension: str
|
13
|
+
operator: FilterOperator
|
14
|
+
values: typing.List[str]
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().dict(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
class Config:
|
25
|
+
frozen = True
|
26
|
+
smart_union = True
|
27
|
+
extra = pydantic_v1.Extra.allow
|
28
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,73 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class FilterOperator(str, enum.Enum):
|
10
|
+
"""
|
11
|
+
An enumeration.
|
12
|
+
"""
|
13
|
+
|
14
|
+
EQUALS = "equals"
|
15
|
+
NOT_EQUALS = "notEquals"
|
16
|
+
CONTAINS = "contains"
|
17
|
+
NOT_CONTAINS = "notContains"
|
18
|
+
STARTS_WITH = "startsWith"
|
19
|
+
ENDS_WITH = "endsWith"
|
20
|
+
GT = "gt"
|
21
|
+
GTE = "gte"
|
22
|
+
LT = "lt"
|
23
|
+
LTE = "lte"
|
24
|
+
SET = "set"
|
25
|
+
NOT_SET = "notSet"
|
26
|
+
IN = "in"
|
27
|
+
NOT_IN = "notIn"
|
28
|
+
|
29
|
+
def visit(
|
30
|
+
self,
|
31
|
+
equals: typing.Callable[[], T_Result],
|
32
|
+
not_equals: typing.Callable[[], T_Result],
|
33
|
+
contains: typing.Callable[[], T_Result],
|
34
|
+
not_contains: typing.Callable[[], T_Result],
|
35
|
+
starts_with: typing.Callable[[], T_Result],
|
36
|
+
ends_with: typing.Callable[[], T_Result],
|
37
|
+
gt: typing.Callable[[], T_Result],
|
38
|
+
gte: typing.Callable[[], T_Result],
|
39
|
+
lt: typing.Callable[[], T_Result],
|
40
|
+
lte: typing.Callable[[], T_Result],
|
41
|
+
set_: typing.Callable[[], T_Result],
|
42
|
+
not_set: typing.Callable[[], T_Result],
|
43
|
+
in_: typing.Callable[[], T_Result],
|
44
|
+
not_in: typing.Callable[[], T_Result],
|
45
|
+
) -> T_Result:
|
46
|
+
if self is FilterOperator.EQUALS:
|
47
|
+
return equals()
|
48
|
+
if self is FilterOperator.NOT_EQUALS:
|
49
|
+
return not_equals()
|
50
|
+
if self is FilterOperator.CONTAINS:
|
51
|
+
return contains()
|
52
|
+
if self is FilterOperator.NOT_CONTAINS:
|
53
|
+
return not_contains()
|
54
|
+
if self is FilterOperator.STARTS_WITH:
|
55
|
+
return starts_with()
|
56
|
+
if self is FilterOperator.ENDS_WITH:
|
57
|
+
return ends_with()
|
58
|
+
if self is FilterOperator.GT:
|
59
|
+
return gt()
|
60
|
+
if self is FilterOperator.GTE:
|
61
|
+
return gte()
|
62
|
+
if self is FilterOperator.LT:
|
63
|
+
return lt()
|
64
|
+
if self is FilterOperator.LTE:
|
65
|
+
return lte()
|
66
|
+
if self is FilterOperator.SET:
|
67
|
+
return set_()
|
68
|
+
if self is FilterOperator.NOT_SET:
|
69
|
+
return not_set()
|
70
|
+
if self is FilterOperator.IN:
|
71
|
+
return in_()
|
72
|
+
if self is FilterOperator.NOT_IN:
|
73
|
+
return not_in()
|
athena/types/llm_model.py
CHANGED
@@ -14,6 +14,7 @@ class LlmModel(str, enum.Enum):
|
|
14
14
|
GPT_35_TURBO = "gpt-3.5-turbo"
|
15
15
|
GPT_4_TURBO = "gpt-4-turbo"
|
16
16
|
GPT_4_TURBO_PREVIEW = "gpt-4-turbo-preview"
|
17
|
+
GPT_4_O_MINI = "gpt-4o-mini"
|
17
18
|
GPT_4_O = "gpt-4o"
|
18
19
|
GPT_4 = "gpt-4"
|
19
20
|
MIXTRAL_SMALL_8_X_7_B_0211 = "mixtral-small-8x7b-0211"
|
@@ -21,6 +22,7 @@ class LlmModel(str, enum.Enum):
|
|
21
22
|
MIXTRAL_8_X_22_B_INSTRUCT = "mixtral-8x22b-instruct"
|
22
23
|
LLAMA_V_38_B_INSTRUCT = "llama-v3-8b-instruct"
|
23
24
|
LLAMA_V_370_B_INSTRUCT = "llama-v3-70b-instruct"
|
25
|
+
CLAUDE_35_SONNET_20240620 = "claude-3-5-sonnet-20240620"
|
24
26
|
CLAUDE_3_OPUS_20240229 = "claude-3-opus-20240229"
|
25
27
|
CLAUDE_3_SONNET_20240229 = "claude-3-sonnet-20240229"
|
26
28
|
CLAUDE_3_HAIKU_20240307 = "claude-3-haiku-20240307"
|
@@ -39,6 +41,7 @@ class LlmModel(str, enum.Enum):
|
|
39
41
|
gpt_35_turbo: typing.Callable[[], T_Result],
|
40
42
|
gpt_4_turbo: typing.Callable[[], T_Result],
|
41
43
|
gpt_4_turbo_preview: typing.Callable[[], T_Result],
|
44
|
+
gpt_4_o_mini: typing.Callable[[], T_Result],
|
42
45
|
gpt_4_o: typing.Callable[[], T_Result],
|
43
46
|
gpt_4: typing.Callable[[], T_Result],
|
44
47
|
mixtral_small_8_x_7_b_0211: typing.Callable[[], T_Result],
|
@@ -46,6 +49,7 @@ class LlmModel(str, enum.Enum):
|
|
46
49
|
mixtral_8_x_22_b_instruct: typing.Callable[[], T_Result],
|
47
50
|
llama_v_38_b_instruct: typing.Callable[[], T_Result],
|
48
51
|
llama_v_370_b_instruct: typing.Callable[[], T_Result],
|
52
|
+
claude_35_sonnet_20240620: typing.Callable[[], T_Result],
|
49
53
|
claude_3_opus_20240229: typing.Callable[[], T_Result],
|
50
54
|
claude_3_sonnet_20240229: typing.Callable[[], T_Result],
|
51
55
|
claude_3_haiku_20240307: typing.Callable[[], T_Result],
|
@@ -65,6 +69,8 @@ class LlmModel(str, enum.Enum):
|
|
65
69
|
return gpt_4_turbo()
|
66
70
|
if self is LlmModel.GPT_4_TURBO_PREVIEW:
|
67
71
|
return gpt_4_turbo_preview()
|
72
|
+
if self is LlmModel.GPT_4_O_MINI:
|
73
|
+
return gpt_4_o_mini()
|
68
74
|
if self is LlmModel.GPT_4_O:
|
69
75
|
return gpt_4_o()
|
70
76
|
if self is LlmModel.GPT_4:
|
@@ -79,6 +85,8 @@ class LlmModel(str, enum.Enum):
|
|
79
85
|
return llama_v_38_b_instruct()
|
80
86
|
if self is LlmModel.LLAMA_V_370_B_INSTRUCT:
|
81
87
|
return llama_v_370_b_instruct()
|
88
|
+
if self is LlmModel.CLAUDE_35_SONNET_20240620:
|
89
|
+
return claude_35_sonnet_20240620()
|
82
90
|
if self is LlmModel.CLAUDE_3_OPUS_20240229:
|
83
91
|
return claude_3_opus_20240229()
|
84
92
|
if self is LlmModel.CLAUDE_3_SONNET_20240229:
|
athena/types/model.py
CHANGED
@@ -16,6 +16,7 @@ class Model(str, enum.Enum):
|
|
16
16
|
GPT_4_TURBO_PREVIEW = "gpt-4-turbo-preview"
|
17
17
|
GPT_4 = "gpt-4"
|
18
18
|
GPT_4_O = "gpt-4o"
|
19
|
+
GPT_4_O_MINI = "gpt-4o-mini"
|
19
20
|
MIXTRAL_SMALL_8_X_7_B_0211 = "mixtral-small-8x7b-0211"
|
20
21
|
MISTRAL_LARGE_0224 = "mistral-large-0224"
|
21
22
|
MIXTRAL_8_X_22_B_INSTRUCT = "mixtral-8x22b-instruct"
|
@@ -34,6 +35,7 @@ class Model(str, enum.Enum):
|
|
34
35
|
gpt_4_turbo_preview: typing.Callable[[], T_Result],
|
35
36
|
gpt_4: typing.Callable[[], T_Result],
|
36
37
|
gpt_4_o: typing.Callable[[], T_Result],
|
38
|
+
gpt_4_o_mini: typing.Callable[[], T_Result],
|
37
39
|
mixtral_small_8_x_7_b_0211: typing.Callable[[], T_Result],
|
38
40
|
mistral_large_0224: typing.Callable[[], T_Result],
|
39
41
|
mixtral_8_x_22_b_instruct: typing.Callable[[], T_Result],
|
@@ -55,6 +57,8 @@ class Model(str, enum.Enum):
|
|
55
57
|
return gpt_4()
|
56
58
|
if self is Model.GPT_4_O:
|
57
59
|
return gpt_4_o()
|
60
|
+
if self is Model.GPT_4_O_MINI:
|
61
|
+
return gpt_4_o_mini()
|
58
62
|
if self is Model.MIXTRAL_SMALL_8_X_7_B_0211:
|
59
63
|
return mixtral_small_8_x_7_b_0211()
|
60
64
|
if self is Model.MISTRAL_LARGE_0224:
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .filter_model import FilterModel
|
9
|
+
from .time_dimension_model import TimeDimensionModel
|
10
|
+
|
11
|
+
|
12
|
+
class QueryModel(pydantic_v1.BaseModel):
|
13
|
+
measures: typing.Optional[typing.List[str]] = None
|
14
|
+
time_dimensions: typing.Optional[typing.List[TimeDimensionModel]] = pydantic_v1.Field(
|
15
|
+
alias="timeDimensions", default=None
|
16
|
+
)
|
17
|
+
dimensions: typing.Optional[typing.List[str]] = None
|
18
|
+
filters: typing.Optional[typing.List[FilterModel]] = None
|
19
|
+
|
20
|
+
def json(self, **kwargs: typing.Any) -> str:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().json(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
return super().dict(**kwargs_with_defaults)
|
27
|
+
|
28
|
+
class Config:
|
29
|
+
frozen = True
|
30
|
+
smart_union = True
|
31
|
+
allow_population_by_field_name = True
|
32
|
+
populate_by_name = True
|
33
|
+
extra = pydantic_v1.Extra.allow
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class SemanticQueryOut(pydantic_v1.BaseModel):
|
11
|
+
output: typing.Dict[str, typing.Any]
|
12
|
+
|
13
|
+
def json(self, **kwargs: typing.Any) -> str:
|
14
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
15
|
+
return super().json(**kwargs_with_defaults)
|
16
|
+
|
17
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().dict(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
class Config:
|
22
|
+
frozen = True
|
23
|
+
smart_union = True
|
24
|
+
extra = pydantic_v1.Extra.allow
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TimeDimensionModel(pydantic_v1.BaseModel):
|
11
|
+
dimension: str
|
12
|
+
granularity: str
|
13
|
+
date_range: typing.List[str] = pydantic_v1.Field(alias="dateRange")
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
smart_union = True
|
26
|
+
allow_population_by_field_name = True
|
27
|
+
populate_by_name = True
|
28
|
+
extra = pydantic_v1.Extra.allow
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -1,11 +1,11 @@
|
|
1
|
-
athena/__init__.py,sha256=
|
1
|
+
athena/__init__.py,sha256=tMr1ZzM-Ij6aKBOrYAfa6mtPEpFfu_q-8-APaTJyfwY,2075
|
2
2
|
athena/base_client.py,sha256=RjB7CwjedSRf4V5BH07pFx6yByX_YQFXrAXIyWDHJ_s,7089
|
3
3
|
athena/chain/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
4
4
|
athena/chain/client.py,sha256=2vSu7d4RvgbGc7jbWpKkCs5dU-ryCIJ1i0I1EsoCEdQ,16177
|
5
5
|
athena/client.py,sha256=8QypiDlbZ0C1YsJh6GzhylLVCZXDQc1MCJTURo2_vvI,3576
|
6
6
|
athena/core/__init__.py,sha256=1pNSKkwyQvMl_F0wohBqmoQAITptg3zlvCwsoSSzy7c,853
|
7
7
|
athena/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
8
|
-
athena/core/client_wrapper.py,sha256=
|
8
|
+
athena/core/client_wrapper.py,sha256=RZH8zFwKwqqz6W-8cASwgcI1lQVRFTFm8kU8JZSfSAY,1495
|
9
9
|
athena/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
10
10
|
athena/core/file.py,sha256=sy1RUGZ3aJYuw998bZytxxo6QdgKmlnlgBaMvwEKCGg,1480
|
11
11
|
athena/core/http_client.py,sha256=5ok6hqgZDJhg57EHvMnr0BBaHdG50QxFPKaCZ9aVWTc,5059
|
@@ -31,12 +31,15 @@ athena/search/client.py,sha256=j0DYo1WWFMlrssybtQAH71O889eRJdDHheADms5Q9yE,7640
|
|
31
31
|
athena/snippet/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
32
32
|
athena/snippet/client.py,sha256=EE2ADdtSvk_c3-NkVMfwS1r29-y7YhozPoqXc4DPj8k,11323
|
33
33
|
athena/tools/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
34
|
-
athena/tools/client.py,sha256=
|
35
|
-
athena/types/__init__.py,sha256=
|
34
|
+
athena/tools/client.py,sha256=VLLcpLliclTJHvHp1Ul9LhpRFjZ6mHYuRIjX6Lwdcz0,40895
|
35
|
+
athena/types/__init__.py,sha256=ZlCYghXU0eCOkeDLOC1jQgz1N8iRYAbcK6XGNKKOI74,2515
|
36
|
+
athena/types/convert_pdf_to_sheet_out.py,sha256=cI1KZpYZEiYEoVXfpZVHI5ibgjy3cj8PcNiA9ukqOBo,889
|
36
37
|
athena/types/dataset.py,sha256=ShFYop4Pj-pscWrjWZQFboUmK5TDX3NzP0xNRZimpp8,994
|
37
38
|
athena/types/document.py,sha256=evK_-wGk07kB8y5xyPMFCgqDbItuxCAawdUN20b6zFg,1061
|
38
39
|
athena/types/excecute_tool_first_workflow_out.py,sha256=T4GxP3yzTY3XkumdpUdXbn8Tx_iNc1exed8N2SnwV2w,875
|
39
40
|
athena/types/file_data_response.py,sha256=DVkcuaZYDAI-2Ih4xWU5tVsS0cMPoyDOEyeiG6i2xI8,1171
|
41
|
+
athena/types/filter_model.py,sha256=ygqXB329xyeXZEd1R9Ea3GYtGoy2oftEP4KPYB17oOE,955
|
42
|
+
athena/types/filter_operator.py,sha256=8E4hKamq97tR3_Qp4YNDYRDPx4QHODciqGLENkwtf3o,2200
|
40
43
|
athena/types/firecrawl_scrape_url_data_reponse_dto.py,sha256=-MkjjhzRTpuyoypLmiGtvH01TjeoVQxpX-HsALUSFUM,1001
|
41
44
|
athena/types/firecrawl_scrape_url_metadata.py,sha256=kIKb0mMGxw7-49GSsagfx6AperguHDKOvODGPjFtOxU,1143
|
42
45
|
athena/types/get_datasets_response.py,sha256=kbv8BI2nEo34-HJZV33dPhKWKrA1FiIS_OUkUYJj1ZQ,969
|
@@ -44,18 +47,21 @@ athena/types/get_snippet_out.py,sha256=AkkF6YJcYysiQVnOvhRerHMsHkBTu1BP9tYZC8wET
|
|
44
47
|
athena/types/get_snippets_response.py,sha256=pgwYqmddU5shKeVaE4RQSFN9SLsVAeQp3sqIkQlvzoU,969
|
45
48
|
athena/types/http_validation_error.py,sha256=u4t-1U0DI0u3Zj_Oz7AmGmpL4sqBzoS_5nZHImWQbmM,953
|
46
49
|
athena/types/langchain_documents_request_out.py,sha256=O1v7mcgt0ryaY4e8YODpAHYJKyUY7jYFBc0s93A1sgo,892
|
47
|
-
athena/types/llm_model.py,sha256=
|
50
|
+
athena/types/llm_model.py,sha256=YFj5S6Wcp7CV75YChhz0CTMFTlvmf_q6vVwhmLxwYvw,5087
|
48
51
|
athena/types/map_reduce_chain_out.py,sha256=6R-fuxHaww60dhUAuwrdZPp5lV-DyFZh9SGLCc6fp8E,950
|
49
52
|
athena/types/message_out.py,sha256=HJZizmFH7crD3OHm0fdTy3189F2gv5qR8aaUbTTfWFI,845
|
50
53
|
athena/types/message_out_dto.py,sha256=1G8srlYaIYmoYRstLKm97xZGxK87DK57CiO9hYnt3gQ,1031
|
51
|
-
athena/types/model.py,sha256=
|
54
|
+
athena/types/model.py,sha256=uUaC03YGJxMcgQzBf30jVC0NsLCICDxSYS4un0VYlME,3284
|
52
55
|
athena/types/publish_formats.py,sha256=1_F5vyEwDtxshFG0S2gNx05V8jZHFEK6ZoZkjIJVhyQ,885
|
56
|
+
athena/types/query_model.py,sha256=ZRuFxbgYi6X9nLNQvmrOV594OaSpKmqdGBNdrmj8pZ4,1318
|
53
57
|
athena/types/report.py,sha256=km2CgCbHBXQQbPai1y5sGlsQpO7WAlUVvdsRC_7f4KI,926
|
54
58
|
athena/types/researcher_out.py,sha256=v9Sx2Nm3ptwScV-JoSX0z-oKhmjEZTmWMUOKsTcQ6jQ,879
|
59
|
+
athena/types/semantic_query_out.py,sha256=4-nnE15GJboHB1bE4Z6VqF-AzrvpiDuJA9cJYaUMbDU,880
|
55
60
|
athena/types/snippet.py,sha256=Mrc92_hBABJQjCSToAA-FgwhvO-Jn8Kjm-lYI6aMlUY,1106
|
56
61
|
athena/types/sql_results.py,sha256=ExPFds4vZ425AxGt0jhykbPhOjkplZPGQwVKb0LHg_g,880
|
57
62
|
athena/types/status_enum.py,sha256=0UZbhdAx215GHC-U53RS98mYHtn1N3On4VBe4j02Qtc,672
|
58
63
|
athena/types/structured_parse_result.py,sha256=fph7KrT_X_2BKDCOFN1UEufeaMmpSEvT0Oi6aM-e3kU,885
|
64
|
+
athena/types/time_dimension_model.py,sha256=hnPBry6ZEgzSYAPtGWuMRzShSeHPEBv2HU37g0W48ro,1031
|
59
65
|
athena/types/tools.py,sha256=W0ekZrKpwlf66HJC7kGLWYJE3C1agJRnmMbvfA4M93o,1577
|
60
66
|
athena/types/upload_documents_out.py,sha256=3FJ0QIKl6zGmswAUpgkrVGP2nLdH3AloXrShg4Mh9lk,986
|
61
67
|
athena/types/url_result.py,sha256=lIgnQeyKy_UfFFPe7HMrrRzb-SK089RxcKcKN9Q3DNQ,873
|
@@ -67,6 +73,6 @@ athena/upload/client.py,sha256=e5h10wZ7lGBasJ6X907x7nXHRhX600mLSkdw2qz6pmY,6385
|
|
67
73
|
athena/version.py,sha256=8aYAOJtVLaJLpRp6mTiEIhnl8gXA7yE0aDtZ-3mKQ4k,87
|
68
74
|
athena/workflow/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
69
75
|
athena/workflow/client.py,sha256=uY9IS_v2GDQ-g2nbatpTUP1aT1oHbG_E8WAor8JzxPI,6249
|
70
|
-
athena_intelligence-0.1.
|
71
|
-
athena_intelligence-0.1.
|
72
|
-
athena_intelligence-0.1.
|
76
|
+
athena_intelligence-0.1.73.dist-info/METADATA,sha256=rZDMXobCN5lrkflrEMkBzcmb4rLEryAzRHwL7if-nlU,4738
|
77
|
+
athena_intelligence-0.1.73.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
78
|
+
athena_intelligence-0.1.73.dist-info/RECORD,,
|
File without changes
|