exa-py 1.12.1__tar.gz → 1.13.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of exa-py might be problematic. Click here for more details.
- {exa_py-1.12.1 → exa_py-1.13.1}/PKG-INFO +5 -16
- {exa_py-1.12.1 → exa_py-1.13.1}/README.md +4 -15
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/api.py +136 -31
- {exa_py-1.12.1 → exa_py-1.13.1}/pyproject.toml +2 -2
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/py.typed +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/utils.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/_generator/pydantic/BaseModel.jinja2 +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/client.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/core/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/core/base.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/enrichments/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/enrichments/client.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/items/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/items/client.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/searches/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/searches/client.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/types.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/webhooks/__init__.py +0 -0
- {exa_py-1.12.1 → exa_py-1.13.1}/exa_py/websets/webhooks/client.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: exa-py
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.13.1
|
|
4
4
|
Summary: Python SDK for Exa API.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Exa AI
|
|
@@ -50,9 +50,6 @@ exa = Exa(api_key="your-api-key")
|
|
|
50
50
|
# basic search
|
|
51
51
|
results = exa.search("This is a Exa query:")
|
|
52
52
|
|
|
53
|
-
# autoprompted search
|
|
54
|
-
results = exa.search("autopromptable query", use_autoprompt=True)
|
|
55
|
-
|
|
56
53
|
# keyword search (non-neural)
|
|
57
54
|
results = exa.search("Google-style query", type="keyword")
|
|
58
55
|
|
|
@@ -65,13 +62,9 @@ exa = Exa(api_key="your-api-key")
|
|
|
65
62
|
# search and get text contents
|
|
66
63
|
results = exa.search_and_contents("This is a Exa query:")
|
|
67
64
|
|
|
68
|
-
# search and get highlights
|
|
69
|
-
results = exa.search_and_contents("This is a Exa query:", highlights=True)
|
|
70
|
-
|
|
71
65
|
# search and get contents with contents options
|
|
72
66
|
results = exa.search_and_contents("This is a Exa query:",
|
|
73
|
-
text={"include_html_tags": True, "max_characters": 1000}
|
|
74
|
-
highlights={"highlights_per_url": 2, "num_sentences": 1, "query": "This is the highlight query:"})
|
|
67
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
75
68
|
|
|
76
69
|
# find similar documents
|
|
77
70
|
results = exa.find_similar("https://example.com")
|
|
@@ -80,18 +73,14 @@ exa = Exa(api_key="your-api-key")
|
|
|
80
73
|
results = exa.find_similar("https://example.com", exclude_source_domain=True)
|
|
81
74
|
|
|
82
75
|
# find similar with contents
|
|
83
|
-
results = exa.find_similar_and_contents("https://example.com", text=True
|
|
76
|
+
results = exa.find_similar_and_contents("https://example.com", text=True)
|
|
84
77
|
|
|
85
78
|
# get text contents
|
|
86
|
-
results = exa.get_contents(["
|
|
87
|
-
|
|
88
|
-
# get highlights
|
|
89
|
-
results = exa.get_contents(["urls"], highlights=True)
|
|
79
|
+
results = exa.get_contents(["tesla.com"])
|
|
90
80
|
|
|
91
81
|
# get contents with contents options
|
|
92
82
|
results = exa.get_contents(["urls"],
|
|
93
|
-
text={"include_html_tags": True, "max_characters": 1000}
|
|
94
|
-
highlights={"highlights_per_url": 2, "num_sentences": 1, "query": "This is the highlight query:"})
|
|
83
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
95
84
|
|
|
96
85
|
# basic answer
|
|
97
86
|
response = exa.answer("This is a query to answer a question")
|
|
@@ -27,9 +27,6 @@ exa = Exa(api_key="your-api-key")
|
|
|
27
27
|
# basic search
|
|
28
28
|
results = exa.search("This is a Exa query:")
|
|
29
29
|
|
|
30
|
-
# autoprompted search
|
|
31
|
-
results = exa.search("autopromptable query", use_autoprompt=True)
|
|
32
|
-
|
|
33
30
|
# keyword search (non-neural)
|
|
34
31
|
results = exa.search("Google-style query", type="keyword")
|
|
35
32
|
|
|
@@ -42,13 +39,9 @@ exa = Exa(api_key="your-api-key")
|
|
|
42
39
|
# search and get text contents
|
|
43
40
|
results = exa.search_and_contents("This is a Exa query:")
|
|
44
41
|
|
|
45
|
-
# search and get highlights
|
|
46
|
-
results = exa.search_and_contents("This is a Exa query:", highlights=True)
|
|
47
|
-
|
|
48
42
|
# search and get contents with contents options
|
|
49
43
|
results = exa.search_and_contents("This is a Exa query:",
|
|
50
|
-
text={"include_html_tags": True, "max_characters": 1000}
|
|
51
|
-
highlights={"highlights_per_url": 2, "num_sentences": 1, "query": "This is the highlight query:"})
|
|
44
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
52
45
|
|
|
53
46
|
# find similar documents
|
|
54
47
|
results = exa.find_similar("https://example.com")
|
|
@@ -57,18 +50,14 @@ exa = Exa(api_key="your-api-key")
|
|
|
57
50
|
results = exa.find_similar("https://example.com", exclude_source_domain=True)
|
|
58
51
|
|
|
59
52
|
# find similar with contents
|
|
60
|
-
results = exa.find_similar_and_contents("https://example.com", text=True
|
|
53
|
+
results = exa.find_similar_and_contents("https://example.com", text=True)
|
|
61
54
|
|
|
62
55
|
# get text contents
|
|
63
|
-
results = exa.get_contents(["
|
|
64
|
-
|
|
65
|
-
# get highlights
|
|
66
|
-
results = exa.get_contents(["urls"], highlights=True)
|
|
56
|
+
results = exa.get_contents(["tesla.com"])
|
|
67
57
|
|
|
68
58
|
# get contents with contents options
|
|
69
59
|
results = exa.get_contents(["urls"],
|
|
70
|
-
text={"include_html_tags": True, "max_characters": 1000}
|
|
71
|
-
highlights={"highlights_per_url": 2, "num_sentences": 1, "query": "This is the highlight query:"})
|
|
60
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
72
61
|
|
|
73
62
|
# basic answer
|
|
74
63
|
response = exa.answer("This is a query to answer a question")
|
|
@@ -56,7 +56,7 @@ def snake_to_camel(snake_str: str) -> str:
|
|
|
56
56
|
return "$schema"
|
|
57
57
|
if snake_str == "not_":
|
|
58
58
|
return "not"
|
|
59
|
-
|
|
59
|
+
|
|
60
60
|
components = snake_str.split("_")
|
|
61
61
|
return components[0] + "".join(x.title() for x in components[1:])
|
|
62
62
|
|
|
@@ -261,6 +261,7 @@ class JSONSchema(TypedDict, total=False):
|
|
|
261
261
|
"""Represents a JSON Schema definition used for structured summary output.
|
|
262
262
|
To learn more visit https://json-schema.org/overview/what-is-jsonschema.
|
|
263
263
|
"""
|
|
264
|
+
|
|
264
265
|
schema_: str # This will be converted to "$schema" in JSON
|
|
265
266
|
title: str
|
|
266
267
|
description: str
|
|
@@ -288,7 +289,7 @@ class SummaryContentsOptions(TypedDict, total=False):
|
|
|
288
289
|
|
|
289
290
|
query: str
|
|
290
291
|
schema: JSONSchema
|
|
291
|
-
|
|
292
|
+
|
|
292
293
|
|
|
293
294
|
class ExtrasOptions(TypedDict, total=False):
|
|
294
295
|
"""A class representing additional extraction fields (e.g. links, images)"""
|
|
@@ -669,7 +670,7 @@ class AnswerResponse:
|
|
|
669
670
|
citations (List[AnswerResult]): A list of citations used to generate the answer.
|
|
670
671
|
"""
|
|
671
672
|
|
|
672
|
-
answer: str
|
|
673
|
+
answer: Union[str, dict[str, Any]]
|
|
673
674
|
citations: List[AnswerResult]
|
|
674
675
|
|
|
675
676
|
def __str__(self):
|
|
@@ -765,9 +766,9 @@ class AsyncStreamAnswerResponse:
|
|
|
765
766
|
content = chunk["choices"][0]["delta"].get("content")
|
|
766
767
|
|
|
767
768
|
if (
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
769
|
+
"citations" in chunk
|
|
770
|
+
and chunk["citations"]
|
|
771
|
+
and chunk["citations"] != "null"
|
|
771
772
|
):
|
|
772
773
|
citations = [
|
|
773
774
|
AnswerResult(**to_snake_case(s)) for s in chunk["citations"]
|
|
@@ -776,6 +777,7 @@ class AsyncStreamAnswerResponse:
|
|
|
776
777
|
stream_chunk = StreamChunk(content=content, citations=citations)
|
|
777
778
|
if stream_chunk.has_data():
|
|
778
779
|
yield stream_chunk
|
|
780
|
+
|
|
779
781
|
return generator()
|
|
780
782
|
|
|
781
783
|
def close(self) -> None:
|
|
@@ -835,6 +837,37 @@ def nest_fields(original_dict: Dict, fields_to_nest: List[str], new_key: str):
|
|
|
835
837
|
return original_dict
|
|
836
838
|
|
|
837
839
|
|
|
840
|
+
@dataclass
|
|
841
|
+
class ResearchTaskResponse:
|
|
842
|
+
"""A class representing the response for a research task.
|
|
843
|
+
|
|
844
|
+
Attributes:
|
|
845
|
+
id (str): The unique identifier for the research request.
|
|
846
|
+
status (str): Status of the research request.
|
|
847
|
+
output (Optional[Dict[str, Any]]): The answer structured as JSON, if available.
|
|
848
|
+
citations (Optional[Dict[str, List[_Result]]]): List of citations used to generate the answer, grouped by root field in the output schema.
|
|
849
|
+
"""
|
|
850
|
+
|
|
851
|
+
id: str
|
|
852
|
+
status: str
|
|
853
|
+
output: Optional[Dict[str, Any]]
|
|
854
|
+
citations: Dict[str, List[_Result]]
|
|
855
|
+
|
|
856
|
+
def __str__(self):
|
|
857
|
+
output_repr = (
|
|
858
|
+
json.dumps(self.output, indent=2, ensure_ascii=False)
|
|
859
|
+
if self.output is not None
|
|
860
|
+
else "None"
|
|
861
|
+
)
|
|
862
|
+
citations_str = "\n\n".join(str(src) for src in self.citations)
|
|
863
|
+
return (
|
|
864
|
+
f"ID: {self.id}\n"
|
|
865
|
+
f"Status: {self.status}\n"
|
|
866
|
+
f"Output: {output_repr}\n\n"
|
|
867
|
+
f"Citations:\n{citations_str}"
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
|
|
838
871
|
class Exa:
|
|
839
872
|
"""A client for interacting with Exa API."""
|
|
840
873
|
|
|
@@ -859,10 +892,20 @@ class Exa:
|
|
|
859
892
|
"API key must be provided as an argument or in EXA_API_KEY environment variable"
|
|
860
893
|
)
|
|
861
894
|
self.base_url = base_url
|
|
862
|
-
self.headers = {
|
|
895
|
+
self.headers = {
|
|
896
|
+
"x-api-key": api_key,
|
|
897
|
+
"User-Agent": user_agent,
|
|
898
|
+
"Content-Type": "application/json",
|
|
899
|
+
}
|
|
863
900
|
self.websets = WebsetsClient(self)
|
|
864
901
|
|
|
865
|
-
def request(
|
|
902
|
+
def request(
|
|
903
|
+
self,
|
|
904
|
+
endpoint: str,
|
|
905
|
+
data: Optional[Union[Dict[str, Any], str]] = None,
|
|
906
|
+
method: str = "POST",
|
|
907
|
+
params: Optional[Dict[str, Any]] = None,
|
|
908
|
+
) -> Union[Dict[str, Any], requests.Response]:
|
|
866
909
|
"""Send a request to the Exa API, optionally streaming if data['stream'] is True.
|
|
867
910
|
|
|
868
911
|
Args:
|
|
@@ -885,13 +928,13 @@ class Exa:
|
|
|
885
928
|
else:
|
|
886
929
|
# Otherwise, serialize the dictionary to JSON if it exists
|
|
887
930
|
json_data = json.dumps(data, cls=ExaJSONEncoder) if data else None
|
|
888
|
-
|
|
931
|
+
|
|
889
932
|
if data and data.get("stream"):
|
|
890
933
|
res = requests.post(
|
|
891
|
-
self.base_url + endpoint,
|
|
934
|
+
self.base_url + endpoint,
|
|
892
935
|
data=json_data,
|
|
893
|
-
headers=self.headers,
|
|
894
|
-
stream=True
|
|
936
|
+
headers=self.headers,
|
|
937
|
+
stream=True,
|
|
895
938
|
)
|
|
896
939
|
return res
|
|
897
940
|
|
|
@@ -901,20 +944,14 @@ class Exa:
|
|
|
901
944
|
)
|
|
902
945
|
elif method.upper() == "POST":
|
|
903
946
|
res = requests.post(
|
|
904
|
-
self.base_url + endpoint,
|
|
905
|
-
data=json_data,
|
|
906
|
-
headers=self.headers
|
|
947
|
+
self.base_url + endpoint, data=json_data, headers=self.headers
|
|
907
948
|
)
|
|
908
949
|
elif method.upper() == "PATCH":
|
|
909
950
|
res = requests.patch(
|
|
910
|
-
self.base_url + endpoint,
|
|
911
|
-
data=json_data,
|
|
912
|
-
headers=self.headers
|
|
951
|
+
self.base_url + endpoint, data=json_data, headers=self.headers
|
|
913
952
|
)
|
|
914
953
|
elif method.upper() == "DELETE":
|
|
915
|
-
res = requests.delete(
|
|
916
|
-
self.base_url + endpoint, headers=self.headers
|
|
917
|
-
)
|
|
954
|
+
res = requests.delete(self.base_url + endpoint, headers=self.headers)
|
|
918
955
|
else:
|
|
919
956
|
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
920
957
|
|
|
@@ -1845,6 +1882,7 @@ class Exa:
|
|
|
1845
1882
|
text: Optional[bool] = False,
|
|
1846
1883
|
system_prompt: Optional[str] = None,
|
|
1847
1884
|
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1885
|
+
output_schema: Optional[dict[str, Any]] = None,
|
|
1848
1886
|
) -> Union[AnswerResponse, StreamAnswerResponse]: ...
|
|
1849
1887
|
|
|
1850
1888
|
def answer(
|
|
@@ -1855,6 +1893,7 @@ class Exa:
|
|
|
1855
1893
|
text: Optional[bool] = False,
|
|
1856
1894
|
system_prompt: Optional[str] = None,
|
|
1857
1895
|
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1896
|
+
output_schema: Optional[dict[str, Any]] = None,
|
|
1858
1897
|
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
1859
1898
|
"""Generate an answer to a query using Exa's search and LLM capabilities.
|
|
1860
1899
|
|
|
@@ -1863,6 +1902,7 @@ class Exa:
|
|
|
1863
1902
|
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
1864
1903
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
1865
1904
|
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
1905
|
+
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
1866
1906
|
|
|
1867
1907
|
Returns:
|
|
1868
1908
|
AnswerResponse: An object containing the answer and citations.
|
|
@@ -1892,6 +1932,7 @@ class Exa:
|
|
|
1892
1932
|
text: bool = False,
|
|
1893
1933
|
system_prompt: Optional[str] = None,
|
|
1894
1934
|
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1935
|
+
output_schema: Optional[dict[str, Any]] = None,
|
|
1895
1936
|
) -> StreamAnswerResponse:
|
|
1896
1937
|
"""Generate a streaming answer response.
|
|
1897
1938
|
|
|
@@ -1900,7 +1941,7 @@ class Exa:
|
|
|
1900
1941
|
text (bool): Whether to include full text in the results. Defaults to False.
|
|
1901
1942
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
1902
1943
|
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
1903
|
-
|
|
1944
|
+
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
1904
1945
|
Returns:
|
|
1905
1946
|
StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
1906
1947
|
Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
|
|
@@ -1911,6 +1952,37 @@ class Exa:
|
|
|
1911
1952
|
raw_response = self.request("/answer", options)
|
|
1912
1953
|
return StreamAnswerResponse(raw_response)
|
|
1913
1954
|
|
|
1955
|
+
def researchTask(
|
|
1956
|
+
self,
|
|
1957
|
+
*,
|
|
1958
|
+
input_instructions: str,
|
|
1959
|
+
output_schema: Dict[str, Any],
|
|
1960
|
+
) -> ResearchTaskResponse:
|
|
1961
|
+
"""Submit a research request to Exa.
|
|
1962
|
+
|
|
1963
|
+
Args:
|
|
1964
|
+
input_instructions (str): The instructions for the research task.
|
|
1965
|
+
output_schema (Dict[str, Any]): JSON schema describing the desired answer structure.
|
|
1966
|
+
"""
|
|
1967
|
+
# Build the request payload expected by the Exa API
|
|
1968
|
+
options = {
|
|
1969
|
+
"input": {"instructions": input_instructions},
|
|
1970
|
+
"output": {"schema": output_schema},
|
|
1971
|
+
}
|
|
1972
|
+
|
|
1973
|
+
response = self.request("/research/tasks", options)
|
|
1974
|
+
|
|
1975
|
+
return ResearchTaskResponse(
|
|
1976
|
+
id=response["id"],
|
|
1977
|
+
status=response["status"],
|
|
1978
|
+
output=response.get("output"),
|
|
1979
|
+
citations={
|
|
1980
|
+
key: [_Result(**to_snake_case(citation)) for citation in citations_list]
|
|
1981
|
+
for key, citations_list in response.get("citations", {}).items()
|
|
1982
|
+
},
|
|
1983
|
+
)
|
|
1984
|
+
|
|
1985
|
+
|
|
1914
1986
|
class AsyncExa(Exa):
|
|
1915
1987
|
def __init__(self, api_key: str, api_base: str = "https://api.exa.ai"):
|
|
1916
1988
|
super().__init__(api_key, api_base)
|
|
@@ -1921,9 +1993,7 @@ class AsyncExa(Exa):
|
|
|
1921
1993
|
# this may only be a
|
|
1922
1994
|
if self._client is None:
|
|
1923
1995
|
self._client = httpx.AsyncClient(
|
|
1924
|
-
base_url=self.base_url,
|
|
1925
|
-
headers=self.headers,
|
|
1926
|
-
timeout=60
|
|
1996
|
+
base_url=self.base_url, headers=self.headers, timeout=60
|
|
1927
1997
|
)
|
|
1928
1998
|
return self._client
|
|
1929
1999
|
|
|
@@ -1943,15 +2013,14 @@ class AsyncExa(Exa):
|
|
|
1943
2013
|
"""
|
|
1944
2014
|
if data.get("stream"):
|
|
1945
2015
|
request = httpx.Request(
|
|
1946
|
-
|
|
1947
|
-
self.base_url + endpoint,
|
|
1948
|
-
json=data,
|
|
1949
|
-
headers=self.headers
|
|
2016
|
+
"POST", self.base_url + endpoint, json=data, headers=self.headers
|
|
1950
2017
|
)
|
|
1951
2018
|
res = await self.client.send(request, stream=True)
|
|
1952
2019
|
return res
|
|
1953
2020
|
|
|
1954
|
-
res = await self.client.post(
|
|
2021
|
+
res = await self.client.post(
|
|
2022
|
+
self.base_url + endpoint, json=data, headers=self.headers
|
|
2023
|
+
)
|
|
1955
2024
|
if res.status_code != 200:
|
|
1956
2025
|
raise ValueError(
|
|
1957
2026
|
f"Request failed with status code {res.status_code}: {res.text}"
|
|
@@ -2189,6 +2258,7 @@ class AsyncExa(Exa):
|
|
|
2189
2258
|
text: Optional[bool] = False,
|
|
2190
2259
|
system_prompt: Optional[str] = None,
|
|
2191
2260
|
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
2261
|
+
output_schema: Optional[dict[str, Any]] = None,
|
|
2192
2262
|
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
2193
2263
|
"""Generate an answer to a query using Exa's search and LLM capabilities.
|
|
2194
2264
|
|
|
@@ -2197,6 +2267,7 @@ class AsyncExa(Exa):
|
|
|
2197
2267
|
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
2198
2268
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
2199
2269
|
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
2270
|
+
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
2200
2271
|
|
|
2201
2272
|
Returns:
|
|
2202
2273
|
AnswerResponse: An object containing the answer and citations.
|
|
@@ -2226,6 +2297,7 @@ class AsyncExa(Exa):
|
|
|
2226
2297
|
text: bool = False,
|
|
2227
2298
|
system_prompt: Optional[str] = None,
|
|
2228
2299
|
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
2300
|
+
output_schema: Optional[dict[str, Any]] = None,
|
|
2229
2301
|
) -> AsyncStreamAnswerResponse:
|
|
2230
2302
|
"""Generate a streaming answer response.
|
|
2231
2303
|
|
|
@@ -2234,7 +2306,7 @@ class AsyncExa(Exa):
|
|
|
2234
2306
|
text (bool): Whether to include full text in the results. Defaults to False.
|
|
2235
2307
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
2236
2308
|
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
2237
|
-
|
|
2309
|
+
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
2238
2310
|
Returns:
|
|
2239
2311
|
AsyncStreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
2240
2312
|
Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
|
|
@@ -2244,3 +2316,36 @@ class AsyncExa(Exa):
|
|
|
2244
2316
|
options["stream"] = True
|
|
2245
2317
|
raw_response = await self.async_request("/answer", options)
|
|
2246
2318
|
return AsyncStreamAnswerResponse(raw_response)
|
|
2319
|
+
|
|
2320
|
+
async def researchTask(
|
|
2321
|
+
self,
|
|
2322
|
+
*,
|
|
2323
|
+
input_instructions: str,
|
|
2324
|
+
output_schema: Dict[str, Any],
|
|
2325
|
+
) -> ResearchTaskResponse:
|
|
2326
|
+
"""Asynchronously submit a research request to Exa.
|
|
2327
|
+
|
|
2328
|
+
Args:
|
|
2329
|
+
input_instructions (str): The instructions for the research task.
|
|
2330
|
+
output_schema (Dict[str, Any]): JSON schema describing the desired answer structure.
|
|
2331
|
+
|
|
2332
|
+
Returns:
|
|
2333
|
+
ResearchTaskResponse: The parsed response from the Exa API.
|
|
2334
|
+
"""
|
|
2335
|
+
# Build the request payload expected by the Exa API
|
|
2336
|
+
options = {
|
|
2337
|
+
"input": {"instructions": input_instructions},
|
|
2338
|
+
"output": {"schema": output_schema},
|
|
2339
|
+
}
|
|
2340
|
+
|
|
2341
|
+
response = await self.async_request("/research/tasks", options)
|
|
2342
|
+
|
|
2343
|
+
return ResearchTaskResponse(
|
|
2344
|
+
id=response["id"],
|
|
2345
|
+
status=response["status"],
|
|
2346
|
+
output=response.get("output"),
|
|
2347
|
+
citations={
|
|
2348
|
+
key: [_Result(**to_snake_case(citation)) for citation in citations_list]
|
|
2349
|
+
for key, citations_list in response.get("citations", {}).items()
|
|
2350
|
+
},
|
|
2351
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "exa-py"
|
|
3
|
-
version = "1.
|
|
3
|
+
version = "1.13.0"
|
|
4
4
|
description = "Python SDK for Exa API."
|
|
5
5
|
authors = ["Exa AI <hello@exa.ai>"]
|
|
6
6
|
readme = "README.md"
|
|
@@ -32,7 +32,7 @@ in-project = true
|
|
|
32
32
|
|
|
33
33
|
[project]
|
|
34
34
|
name = "exa-py"
|
|
35
|
-
version = "1.
|
|
35
|
+
version = "1.13.1"
|
|
36
36
|
description = "Python SDK for Exa API."
|
|
37
37
|
readme = "README.md"
|
|
38
38
|
requires-python = ">=3.9"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|