athena-intelligence 0.1.45__py3-none-any.whl → 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- athena/__init__.py +12 -3
- athena/base_client.py +27 -6
- athena/chain/__init__.py +0 -3
- athena/chain/client.py +42 -44
- athena/core/__init__.py +2 -0
- athena/core/client_wrapper.py +14 -6
- athena/core/http_client.py +8 -3
- athena/core/jsonable_encoder.py +7 -11
- athena/core/pydantic_utilities.py +12 -0
- athena/dataset/client.py +15 -15
- athena/message/client.py +33 -25
- athena/query/client.py +15 -15
- athena/report/client.py +15 -15
- athena/search/client.py +15 -15
- athena/snippet/client.py +15 -15
- athena/tasks/__init__.py +2 -0
- athena/tasks/client.py +191 -0
- athena/tools/client.py +178 -25
- athena/types/__init__.py +8 -0
- athena/types/dataset.py +3 -6
- athena/types/document.py +31 -0
- athena/types/excecute_tool_first_workflow_out.py +3 -6
- athena/types/firecrawl_scrape_url_data_reponse_dto.py +3 -6
- athena/types/firecrawl_scrape_url_metadata.py +5 -7
- athena/types/get_datasets_response.py +3 -6
- athena/types/get_snippets_response.py +3 -6
- athena/types/http_validation_error.py +3 -6
- athena/types/langchain_documents_request_out.py +26 -0
- athena/types/llm_model.py +93 -0
- athena/types/message_out.py +3 -6
- athena/types/message_out_dto.py +3 -6
- athena/types/model.py +0 -4
- athena/types/plan_execute_out.py +32 -0
- athena/types/report.py +3 -6
- athena/types/snippet.py +3 -6
- athena/types/sql_results.py +3 -6
- athena/types/structured_parse_result.py +3 -6
- athena/types/url_result.py +3 -6
- athena/types/validation_error.py +3 -6
- athena/version.py +4 -0
- {athena_intelligence-0.1.45.dist-info → athena_intelligence-0.1.50.dist-info}/METADATA +1 -1
- athena_intelligence-0.1.50.dist-info/RECORD +65 -0
- athena/chain/types/__init__.py +0 -5
- athena/chain/types/structured_parse_in_parsing_model.py +0 -53
- athena_intelligence-0.1.45.dist-info/RECORD +0 -59
- {athena_intelligence-0.1.45.dist-info → athena_intelligence-0.1.50.dist-info}/WHEEL +0 -0
athena/tools/client.py
CHANGED
@@ -7,19 +7,16 @@ from json.decoder import JSONDecodeError
|
|
7
7
|
from ..core.api_error import ApiError
|
8
8
|
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
9
9
|
from ..core.jsonable_encoder import jsonable_encoder
|
10
|
+
from ..core.pydantic_utilities import pydantic_v1
|
10
11
|
from ..core.remove_none_from_dict import remove_none_from_dict
|
11
12
|
from ..core.request_options import RequestOptions
|
12
13
|
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
13
14
|
from ..types.excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
|
14
15
|
from ..types.firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
|
15
16
|
from ..types.http_validation_error import HttpValidationError
|
17
|
+
from ..types.langchain_documents_request_out import LangchainDocumentsRequestOut
|
16
18
|
from ..types.tool_models import ToolModels
|
17
19
|
|
18
|
-
try:
|
19
|
-
import pydantic.v1 as pydantic # type: ignore
|
20
|
-
except ImportError:
|
21
|
-
import pydantic # type: ignore
|
22
|
-
|
23
20
|
# this is used as the default value for optional parameters
|
24
21
|
OMIT = typing.cast(typing.Any, ...)
|
25
22
|
|
@@ -56,8 +53,82 @@ class ToolsClient:
|
|
56
53
|
if params is not OMIT:
|
57
54
|
_request["params"] = params
|
58
55
|
_response = self._client_wrapper.httpx_client.request(
|
59
|
-
"POST",
|
60
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
|
56
|
+
method="POST",
|
57
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
|
58
|
+
params=jsonable_encoder(
|
59
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
60
|
+
),
|
61
|
+
json=jsonable_encoder(_request)
|
62
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
63
|
+
else {
|
64
|
+
**jsonable_encoder(_request),
|
65
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
66
|
+
},
|
67
|
+
headers=jsonable_encoder(
|
68
|
+
remove_none_from_dict(
|
69
|
+
{
|
70
|
+
**self._client_wrapper.get_headers(),
|
71
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
72
|
+
}
|
73
|
+
)
|
74
|
+
),
|
75
|
+
timeout=request_options.get("timeout_in_seconds")
|
76
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
77
|
+
else self._client_wrapper.get_timeout(),
|
78
|
+
retries=0,
|
79
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
80
|
+
)
|
81
|
+
if 200 <= _response.status_code < 300:
|
82
|
+
return pydantic_v1.parse_obj_as(FirecrawlScrapeUrlDataReponseDto, _response.json()) # type: ignore
|
83
|
+
if _response.status_code == 422:
|
84
|
+
raise UnprocessableEntityError(
|
85
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
86
|
+
)
|
87
|
+
try:
|
88
|
+
_response_json = _response.json()
|
89
|
+
except JSONDecodeError:
|
90
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
91
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
92
|
+
|
93
|
+
def langchain_documents(
|
94
|
+
self,
|
95
|
+
*,
|
96
|
+
document_id: str,
|
97
|
+
pagination_limit: typing.Optional[int] = OMIT,
|
98
|
+
pagination_offset: typing.Optional[int] = OMIT,
|
99
|
+
request_options: typing.Optional[RequestOptions] = None,
|
100
|
+
) -> LangchainDocumentsRequestOut:
|
101
|
+
"""
|
102
|
+
Parameters:
|
103
|
+
- document_id: str.
|
104
|
+
|
105
|
+
- pagination_limit: typing.Optional[int].
|
106
|
+
|
107
|
+
- pagination_offset: typing.Optional[int].
|
108
|
+
|
109
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
110
|
+
---
|
111
|
+
from athena.client import Athena
|
112
|
+
|
113
|
+
client = Athena(
|
114
|
+
api_key="YOUR_API_KEY",
|
115
|
+
)
|
116
|
+
client.tools.langchain_documents(
|
117
|
+
document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
|
118
|
+
pagination_limit=250,
|
119
|
+
pagination_offset=0,
|
120
|
+
)
|
121
|
+
"""
|
122
|
+
_request: typing.Dict[str, typing.Any] = {"document_id": document_id}
|
123
|
+
if pagination_limit is not OMIT:
|
124
|
+
_request["pagination_limit"] = pagination_limit
|
125
|
+
if pagination_offset is not OMIT:
|
126
|
+
_request["pagination_offset"] = pagination_offset
|
127
|
+
_response = self._client_wrapper.httpx_client.request(
|
128
|
+
method="POST",
|
129
|
+
url=urllib.parse.urljoin(
|
130
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/file/langchain-documents"
|
131
|
+
),
|
61
132
|
params=jsonable_encoder(
|
62
133
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
63
134
|
),
|
@@ -77,14 +148,16 @@ class ToolsClient:
|
|
77
148
|
),
|
78
149
|
timeout=request_options.get("timeout_in_seconds")
|
79
150
|
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
80
|
-
else
|
151
|
+
else self._client_wrapper.get_timeout(),
|
81
152
|
retries=0,
|
82
153
|
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
83
154
|
)
|
84
155
|
if 200 <= _response.status_code < 300:
|
85
|
-
return
|
156
|
+
return pydantic_v1.parse_obj_as(LangchainDocumentsRequestOut, _response.json()) # type: ignore
|
86
157
|
if _response.status_code == 422:
|
87
|
-
raise UnprocessableEntityError(
|
158
|
+
raise UnprocessableEntityError(
|
159
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
160
|
+
)
|
88
161
|
try:
|
89
162
|
_response_json = _response.json()
|
90
163
|
except JSONDecodeError:
|
@@ -131,8 +204,8 @@ class ToolsClient:
|
|
131
204
|
if tool_kwargs is not OMIT:
|
132
205
|
_request["tool_kwargs"] = tool_kwargs
|
133
206
|
_response = self._client_wrapper.httpx_client.request(
|
134
|
-
"POST",
|
135
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
|
207
|
+
method="POST",
|
208
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
|
136
209
|
params=jsonable_encoder(
|
137
210
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
138
211
|
),
|
@@ -152,14 +225,16 @@ class ToolsClient:
|
|
152
225
|
),
|
153
226
|
timeout=request_options.get("timeout_in_seconds")
|
154
227
|
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
155
|
-
else
|
228
|
+
else self._client_wrapper.get_timeout(),
|
156
229
|
retries=0,
|
157
230
|
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
158
231
|
)
|
159
232
|
if 200 <= _response.status_code < 300:
|
160
|
-
return
|
233
|
+
return pydantic_v1.parse_obj_as(ExcecuteToolFirstWorkflowOut, _response.json()) # type: ignore
|
161
234
|
if _response.status_code == 422:
|
162
|
-
raise UnprocessableEntityError(
|
235
|
+
raise UnprocessableEntityError(
|
236
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
237
|
+
)
|
163
238
|
try:
|
164
239
|
_response_json = _response.json()
|
165
240
|
except JSONDecodeError:
|
@@ -199,8 +274,82 @@ class AsyncToolsClient:
|
|
199
274
|
if params is not OMIT:
|
200
275
|
_request["params"] = params
|
201
276
|
_response = await self._client_wrapper.httpx_client.request(
|
202
|
-
"POST",
|
203
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
|
277
|
+
method="POST",
|
278
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/firecrawl/scrape-url"),
|
279
|
+
params=jsonable_encoder(
|
280
|
+
request_options.get("additional_query_parameters") if request_options is not None else None
|
281
|
+
),
|
282
|
+
json=jsonable_encoder(_request)
|
283
|
+
if request_options is None or request_options.get("additional_body_parameters") is None
|
284
|
+
else {
|
285
|
+
**jsonable_encoder(_request),
|
286
|
+
**(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
|
287
|
+
},
|
288
|
+
headers=jsonable_encoder(
|
289
|
+
remove_none_from_dict(
|
290
|
+
{
|
291
|
+
**self._client_wrapper.get_headers(),
|
292
|
+
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
|
293
|
+
}
|
294
|
+
)
|
295
|
+
),
|
296
|
+
timeout=request_options.get("timeout_in_seconds")
|
297
|
+
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
298
|
+
else self._client_wrapper.get_timeout(),
|
299
|
+
retries=0,
|
300
|
+
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
301
|
+
)
|
302
|
+
if 200 <= _response.status_code < 300:
|
303
|
+
return pydantic_v1.parse_obj_as(FirecrawlScrapeUrlDataReponseDto, _response.json()) # type: ignore
|
304
|
+
if _response.status_code == 422:
|
305
|
+
raise UnprocessableEntityError(
|
306
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
307
|
+
)
|
308
|
+
try:
|
309
|
+
_response_json = _response.json()
|
310
|
+
except JSONDecodeError:
|
311
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
312
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
313
|
+
|
314
|
+
async def langchain_documents(
|
315
|
+
self,
|
316
|
+
*,
|
317
|
+
document_id: str,
|
318
|
+
pagination_limit: typing.Optional[int] = OMIT,
|
319
|
+
pagination_offset: typing.Optional[int] = OMIT,
|
320
|
+
request_options: typing.Optional[RequestOptions] = None,
|
321
|
+
) -> LangchainDocumentsRequestOut:
|
322
|
+
"""
|
323
|
+
Parameters:
|
324
|
+
- document_id: str.
|
325
|
+
|
326
|
+
- pagination_limit: typing.Optional[int].
|
327
|
+
|
328
|
+
- pagination_offset: typing.Optional[int].
|
329
|
+
|
330
|
+
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
331
|
+
---
|
332
|
+
from athena.client import AsyncAthena
|
333
|
+
|
334
|
+
client = AsyncAthena(
|
335
|
+
api_key="YOUR_API_KEY",
|
336
|
+
)
|
337
|
+
await client.tools.langchain_documents(
|
338
|
+
document_id="doc_9249292-d118-42d3-95b4-00eccfe0754f",
|
339
|
+
pagination_limit=250,
|
340
|
+
pagination_offset=0,
|
341
|
+
)
|
342
|
+
"""
|
343
|
+
_request: typing.Dict[str, typing.Any] = {"document_id": document_id}
|
344
|
+
if pagination_limit is not OMIT:
|
345
|
+
_request["pagination_limit"] = pagination_limit
|
346
|
+
if pagination_offset is not OMIT:
|
347
|
+
_request["pagination_offset"] = pagination_offset
|
348
|
+
_response = await self._client_wrapper.httpx_client.request(
|
349
|
+
method="POST",
|
350
|
+
url=urllib.parse.urljoin(
|
351
|
+
f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/file/langchain-documents"
|
352
|
+
),
|
204
353
|
params=jsonable_encoder(
|
205
354
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
206
355
|
),
|
@@ -220,14 +369,16 @@ class AsyncToolsClient:
|
|
220
369
|
),
|
221
370
|
timeout=request_options.get("timeout_in_seconds")
|
222
371
|
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
223
|
-
else
|
372
|
+
else self._client_wrapper.get_timeout(),
|
224
373
|
retries=0,
|
225
374
|
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
226
375
|
)
|
227
376
|
if 200 <= _response.status_code < 300:
|
228
|
-
return
|
377
|
+
return pydantic_v1.parse_obj_as(LangchainDocumentsRequestOut, _response.json()) # type: ignore
|
229
378
|
if _response.status_code == 422:
|
230
|
-
raise UnprocessableEntityError(
|
379
|
+
raise UnprocessableEntityError(
|
380
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
381
|
+
)
|
231
382
|
try:
|
232
383
|
_response_json = _response.json()
|
233
384
|
except JSONDecodeError:
|
@@ -274,8 +425,8 @@ class AsyncToolsClient:
|
|
274
425
|
if tool_kwargs is not OMIT:
|
275
426
|
_request["tool_kwargs"] = tool_kwargs
|
276
427
|
_response = await self._client_wrapper.httpx_client.request(
|
277
|
-
"POST",
|
278
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
|
428
|
+
method="POST",
|
429
|
+
url=urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v0/tools/first-agent"),
|
279
430
|
params=jsonable_encoder(
|
280
431
|
request_options.get("additional_query_parameters") if request_options is not None else None
|
281
432
|
),
|
@@ -295,14 +446,16 @@ class AsyncToolsClient:
|
|
295
446
|
),
|
296
447
|
timeout=request_options.get("timeout_in_seconds")
|
297
448
|
if request_options is not None and request_options.get("timeout_in_seconds") is not None
|
298
|
-
else
|
449
|
+
else self._client_wrapper.get_timeout(),
|
299
450
|
retries=0,
|
300
451
|
max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
|
301
452
|
)
|
302
453
|
if 200 <= _response.status_code < 300:
|
303
|
-
return
|
454
|
+
return pydantic_v1.parse_obj_as(ExcecuteToolFirstWorkflowOut, _response.json()) # type: ignore
|
304
455
|
if _response.status_code == 422:
|
305
|
-
raise UnprocessableEntityError(
|
456
|
+
raise UnprocessableEntityError(
|
457
|
+
pydantic_v1.parse_obj_as(HttpValidationError, _response.json()) # type: ignore
|
458
|
+
)
|
306
459
|
try:
|
307
460
|
_response_json = _response.json()
|
308
461
|
except JSONDecodeError:
|
athena/types/__init__.py
CHANGED
@@ -1,15 +1,19 @@
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
2
2
|
|
3
3
|
from .dataset import Dataset
|
4
|
+
from .document import Document
|
4
5
|
from .excecute_tool_first_workflow_out import ExcecuteToolFirstWorkflowOut
|
5
6
|
from .firecrawl_scrape_url_data_reponse_dto import FirecrawlScrapeUrlDataReponseDto
|
6
7
|
from .firecrawl_scrape_url_metadata import FirecrawlScrapeUrlMetadata
|
7
8
|
from .get_datasets_response import GetDatasetsResponse
|
8
9
|
from .get_snippets_response import GetSnippetsResponse
|
9
10
|
from .http_validation_error import HttpValidationError
|
11
|
+
from .langchain_documents_request_out import LangchainDocumentsRequestOut
|
12
|
+
from .llm_model import LlmModel
|
10
13
|
from .message_out import MessageOut
|
11
14
|
from .message_out_dto import MessageOutDto
|
12
15
|
from .model import Model
|
16
|
+
from .plan_execute_out import PlanExecuteOut
|
13
17
|
from .report import Report
|
14
18
|
from .snippet import Snippet
|
15
19
|
from .sql_results import SqlResults
|
@@ -23,15 +27,19 @@ from .validation_error_loc_item import ValidationErrorLocItem
|
|
23
27
|
|
24
28
|
__all__ = [
|
25
29
|
"Dataset",
|
30
|
+
"Document",
|
26
31
|
"ExcecuteToolFirstWorkflowOut",
|
27
32
|
"FirecrawlScrapeUrlDataReponseDto",
|
28
33
|
"FirecrawlScrapeUrlMetadata",
|
29
34
|
"GetDatasetsResponse",
|
30
35
|
"GetSnippetsResponse",
|
31
36
|
"HttpValidationError",
|
37
|
+
"LangchainDocumentsRequestOut",
|
38
|
+
"LlmModel",
|
32
39
|
"MessageOut",
|
33
40
|
"MessageOutDto",
|
34
41
|
"Model",
|
42
|
+
"PlanExecuteOut",
|
35
43
|
"Report",
|
36
44
|
"Snippet",
|
37
45
|
"SqlResults",
|
athena/types/dataset.py
CHANGED
@@ -4,14 +4,10 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
|
8
|
-
try:
|
9
|
-
import pydantic.v1 as pydantic # type: ignore
|
10
|
-
except ImportError:
|
11
|
-
import pydantic # type: ignore
|
12
9
|
|
13
|
-
|
14
|
-
class Dataset(pydantic.BaseModel):
|
10
|
+
class Dataset(pydantic_v1.BaseModel):
|
15
11
|
id: str
|
16
12
|
name: typing.Optional[str] = None
|
17
13
|
description: typing.Optional[str] = None
|
@@ -29,4 +25,5 @@ class Dataset(pydantic.BaseModel):
|
|
29
25
|
class Config:
|
30
26
|
frozen = True
|
31
27
|
smart_union = True
|
28
|
+
extra = pydantic_v1.Extra.allow
|
32
29
|
json_encoders = {dt.datetime: serialize_datetime}
|
athena/types/document.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class Document(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Class for storing a piece of text and associated metadata.
|
13
|
+
"""
|
14
|
+
|
15
|
+
page_content: str
|
16
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = None
|
17
|
+
type: typing.Optional[typing.Literal["Document"]] = None
|
18
|
+
|
19
|
+
def json(self, **kwargs: typing.Any) -> str:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().json(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().dict(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
class Config:
|
28
|
+
frozen = True
|
29
|
+
smart_union = True
|
30
|
+
extra = pydantic_v1.Extra.allow
|
31
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,14 +4,10 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
|
8
|
-
try:
|
9
|
-
import pydantic.v1 as pydantic # type: ignore
|
10
|
-
except ImportError:
|
11
|
-
import pydantic # type: ignore
|
12
9
|
|
13
|
-
|
14
|
-
class ExcecuteToolFirstWorkflowOut(pydantic.BaseModel):
|
10
|
+
class ExcecuteToolFirstWorkflowOut(pydantic_v1.BaseModel):
|
15
11
|
output_message: str
|
16
12
|
|
17
13
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -25,4 +21,5 @@ class ExcecuteToolFirstWorkflowOut(pydantic.BaseModel):
|
|
25
21
|
class Config:
|
26
22
|
frozen = True
|
27
23
|
smart_union = True
|
24
|
+
extra = pydantic_v1.Extra.allow
|
28
25
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,15 +4,11 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
from .firecrawl_scrape_url_metadata import FirecrawlScrapeUrlMetadata
|
8
9
|
|
9
|
-
try:
|
10
|
-
import pydantic.v1 as pydantic # type: ignore
|
11
|
-
except ImportError:
|
12
|
-
import pydantic # type: ignore
|
13
10
|
|
14
|
-
|
15
|
-
class FirecrawlScrapeUrlDataReponseDto(pydantic.BaseModel):
|
11
|
+
class FirecrawlScrapeUrlDataReponseDto(pydantic_v1.BaseModel):
|
16
12
|
content: str
|
17
13
|
markdown: str
|
18
14
|
metadata: FirecrawlScrapeUrlMetadata
|
@@ -28,4 +24,5 @@ class FirecrawlScrapeUrlDataReponseDto(pydantic.BaseModel):
|
|
28
24
|
class Config:
|
29
25
|
frozen = True
|
30
26
|
smart_union = True
|
27
|
+
extra = pydantic_v1.Extra.allow
|
31
28
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,18 +4,14 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
|
8
|
-
try:
|
9
|
-
import pydantic.v1 as pydantic # type: ignore
|
10
|
-
except ImportError:
|
11
|
-
import pydantic # type: ignore
|
12
9
|
|
13
|
-
|
14
|
-
class FirecrawlScrapeUrlMetadata(pydantic.BaseModel):
|
10
|
+
class FirecrawlScrapeUrlMetadata(pydantic_v1.BaseModel):
|
15
11
|
title: typing.Optional[str] = None
|
16
12
|
description: typing.Optional[str] = None
|
17
13
|
language: typing.Optional[str] = None
|
18
|
-
source_url: typing.Optional[str] =
|
14
|
+
source_url: typing.Optional[str] = pydantic_v1.Field(alias="sourceURL", default=None)
|
19
15
|
|
20
16
|
def json(self, **kwargs: typing.Any) -> str:
|
21
17
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -29,4 +25,6 @@ class FirecrawlScrapeUrlMetadata(pydantic.BaseModel):
|
|
29
25
|
frozen = True
|
30
26
|
smart_union = True
|
31
27
|
allow_population_by_field_name = True
|
28
|
+
populate_by_name = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
32
30
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,15 +4,11 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
from .dataset import Dataset
|
8
9
|
|
9
|
-
try:
|
10
|
-
import pydantic.v1 as pydantic # type: ignore
|
11
|
-
except ImportError:
|
12
|
-
import pydantic # type: ignore
|
13
10
|
|
14
|
-
|
15
|
-
class GetDatasetsResponse(pydantic.BaseModel):
|
11
|
+
class GetDatasetsResponse(pydantic_v1.BaseModel):
|
16
12
|
datasets: typing.List[Dataset]
|
17
13
|
total: int
|
18
14
|
page: int
|
@@ -30,4 +26,5 @@ class GetDatasetsResponse(pydantic.BaseModel):
|
|
30
26
|
class Config:
|
31
27
|
frozen = True
|
32
28
|
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
33
30
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,15 +4,11 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
from .snippet import Snippet
|
8
9
|
|
9
|
-
try:
|
10
|
-
import pydantic.v1 as pydantic # type: ignore
|
11
|
-
except ImportError:
|
12
|
-
import pydantic # type: ignore
|
13
10
|
|
14
|
-
|
15
|
-
class GetSnippetsResponse(pydantic.BaseModel):
|
11
|
+
class GetSnippetsResponse(pydantic_v1.BaseModel):
|
16
12
|
snippets: typing.List[Snippet]
|
17
13
|
total: int
|
18
14
|
page: int
|
@@ -30,4 +26,5 @@ class GetSnippetsResponse(pydantic.BaseModel):
|
|
30
26
|
class Config:
|
31
27
|
frozen = True
|
32
28
|
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
33
30
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -4,15 +4,11 @@ import datetime as dt
|
|
4
4
|
import typing
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
7
8
|
from .validation_error import ValidationError
|
8
9
|
|
9
|
-
try:
|
10
|
-
import pydantic.v1 as pydantic # type: ignore
|
11
|
-
except ImportError:
|
12
|
-
import pydantic # type: ignore
|
13
10
|
|
14
|
-
|
15
|
-
class HttpValidationError(pydantic.BaseModel):
|
11
|
+
class HttpValidationError(pydantic_v1.BaseModel):
|
16
12
|
detail: typing.Optional[typing.List[ValidationError]] = None
|
17
13
|
|
18
14
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -26,4 +22,5 @@ class HttpValidationError(pydantic.BaseModel):
|
|
26
22
|
class Config:
|
27
23
|
frozen = True
|
28
24
|
smart_union = True
|
25
|
+
extra = pydantic_v1.Extra.allow
|
29
26
|
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .document import Document
|
9
|
+
|
10
|
+
|
11
|
+
class LangchainDocumentsRequestOut(pydantic_v1.BaseModel):
|
12
|
+
documents: typing.List[Document]
|
13
|
+
|
14
|
+
def json(self, **kwargs: typing.Any) -> str:
|
15
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
16
|
+
return super().json(**kwargs_with_defaults)
|
17
|
+
|
18
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().dict(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
class Config:
|
23
|
+
frozen = True
|
24
|
+
smart_union = True
|
25
|
+
extra = pydantic_v1.Extra.allow
|
26
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,93 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class LlmModel(str, enum.Enum):
|
10
|
+
"""
|
11
|
+
An enumeration.
|
12
|
+
"""
|
13
|
+
|
14
|
+
GPT_35_TURBO = "gpt-3.5-turbo"
|
15
|
+
GPT_4_TURBO = "gpt-4-turbo"
|
16
|
+
GPT_4_TURBO_PREVIEW = "gpt-4-turbo-preview"
|
17
|
+
GPT_4 = "gpt-4"
|
18
|
+
MIXTRAL_SMALL_8_X_7_B_0211 = "mixtral-small-8x7b-0211"
|
19
|
+
MISTRAL_LARGE_0224 = "mistral-large-0224"
|
20
|
+
MIXTRAL_8_X_22_B_INSTRUCT = "mixtral-8x22b-instruct"
|
21
|
+
LLAMA_V_38_B_INSTRUCT = "llama-v3-8b-instruct"
|
22
|
+
LLAMA_V_370_B_INSTRUCT = "llama-v3-70b-instruct"
|
23
|
+
CLAUDE_3_OPUS_20240229 = "claude-3-opus-20240229"
|
24
|
+
CLAUDE_3_SONNET_20240229 = "claude-3-sonnet-20240229"
|
25
|
+
CLAUDE_3_HAIKU_20240307 = "claude-3-haiku-20240307"
|
26
|
+
GROQ_MIXTRAL_8_X_7_B_32768 = "groq-mixtral-8x7b-32768"
|
27
|
+
GROQ_LLAMA_38_B_8192 = "groq-llama3-8b-8192"
|
28
|
+
GROQ_LLAMA_370_B_8192 = "groq-llama3-70b-8192"
|
29
|
+
GROQ_GEMMA_7_B_IT = "groq-gemma-7b-it"
|
30
|
+
GOOGLE_GEMINI_10_PRO_LATEST = "google-gemini-1.0-pro-latest"
|
31
|
+
DATABRICKS_DBRX = "databricks-dbrx"
|
32
|
+
GOOGLE_GEMINI_15_PRO_LATEST = "google-gemini-1.5-pro-latest"
|
33
|
+
|
34
|
+
def visit(
|
35
|
+
self,
|
36
|
+
gpt_35_turbo: typing.Callable[[], T_Result],
|
37
|
+
gpt_4_turbo: typing.Callable[[], T_Result],
|
38
|
+
gpt_4_turbo_preview: typing.Callable[[], T_Result],
|
39
|
+
gpt_4: typing.Callable[[], T_Result],
|
40
|
+
mixtral_small_8_x_7_b_0211: typing.Callable[[], T_Result],
|
41
|
+
mistral_large_0224: typing.Callable[[], T_Result],
|
42
|
+
mixtral_8_x_22_b_instruct: typing.Callable[[], T_Result],
|
43
|
+
llama_v_38_b_instruct: typing.Callable[[], T_Result],
|
44
|
+
llama_v_370_b_instruct: typing.Callable[[], T_Result],
|
45
|
+
claude_3_opus_20240229: typing.Callable[[], T_Result],
|
46
|
+
claude_3_sonnet_20240229: typing.Callable[[], T_Result],
|
47
|
+
claude_3_haiku_20240307: typing.Callable[[], T_Result],
|
48
|
+
groq_mixtral_8_x_7_b_32768: typing.Callable[[], T_Result],
|
49
|
+
groq_llama_38_b_8192: typing.Callable[[], T_Result],
|
50
|
+
groq_llama_370_b_8192: typing.Callable[[], T_Result],
|
51
|
+
groq_gemma_7_b_it: typing.Callable[[], T_Result],
|
52
|
+
google_gemini_10_pro_latest: typing.Callable[[], T_Result],
|
53
|
+
databricks_dbrx: typing.Callable[[], T_Result],
|
54
|
+
google_gemini_15_pro_latest: typing.Callable[[], T_Result],
|
55
|
+
) -> T_Result:
|
56
|
+
if self is LlmModel.GPT_35_TURBO:
|
57
|
+
return gpt_35_turbo()
|
58
|
+
if self is LlmModel.GPT_4_TURBO:
|
59
|
+
return gpt_4_turbo()
|
60
|
+
if self is LlmModel.GPT_4_TURBO_PREVIEW:
|
61
|
+
return gpt_4_turbo_preview()
|
62
|
+
if self is LlmModel.GPT_4:
|
63
|
+
return gpt_4()
|
64
|
+
if self is LlmModel.MIXTRAL_SMALL_8_X_7_B_0211:
|
65
|
+
return mixtral_small_8_x_7_b_0211()
|
66
|
+
if self is LlmModel.MISTRAL_LARGE_0224:
|
67
|
+
return mistral_large_0224()
|
68
|
+
if self is LlmModel.MIXTRAL_8_X_22_B_INSTRUCT:
|
69
|
+
return mixtral_8_x_22_b_instruct()
|
70
|
+
if self is LlmModel.LLAMA_V_38_B_INSTRUCT:
|
71
|
+
return llama_v_38_b_instruct()
|
72
|
+
if self is LlmModel.LLAMA_V_370_B_INSTRUCT:
|
73
|
+
return llama_v_370_b_instruct()
|
74
|
+
if self is LlmModel.CLAUDE_3_OPUS_20240229:
|
75
|
+
return claude_3_opus_20240229()
|
76
|
+
if self is LlmModel.CLAUDE_3_SONNET_20240229:
|
77
|
+
return claude_3_sonnet_20240229()
|
78
|
+
if self is LlmModel.CLAUDE_3_HAIKU_20240307:
|
79
|
+
return claude_3_haiku_20240307()
|
80
|
+
if self is LlmModel.GROQ_MIXTRAL_8_X_7_B_32768:
|
81
|
+
return groq_mixtral_8_x_7_b_32768()
|
82
|
+
if self is LlmModel.GROQ_LLAMA_38_B_8192:
|
83
|
+
return groq_llama_38_b_8192()
|
84
|
+
if self is LlmModel.GROQ_LLAMA_370_B_8192:
|
85
|
+
return groq_llama_370_b_8192()
|
86
|
+
if self is LlmModel.GROQ_GEMMA_7_B_IT:
|
87
|
+
return groq_gemma_7_b_it()
|
88
|
+
if self is LlmModel.GOOGLE_GEMINI_10_PRO_LATEST:
|
89
|
+
return google_gemini_10_pro_latest()
|
90
|
+
if self is LlmModel.DATABRICKS_DBRX:
|
91
|
+
return databricks_dbrx()
|
92
|
+
if self is LlmModel.GOOGLE_GEMINI_15_PRO_LATEST:
|
93
|
+
return google_gemini_15_pro_latest()
|