exa-py 1.8.6__py3-none-any.whl → 1.8.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of exa-py might be problematic. Click here for more details.
exa_py/api.py
CHANGED
|
@@ -505,7 +505,7 @@ class ResultWithTextAndHighlightsAndSummary(_Result):
|
|
|
505
505
|
|
|
506
506
|
@dataclass
|
|
507
507
|
class AnswerResult:
|
|
508
|
-
"""A class representing a
|
|
508
|
+
"""A class representing a result for an answer.
|
|
509
509
|
|
|
510
510
|
Attributes:
|
|
511
511
|
title (str): The title of the search result.
|
|
@@ -513,21 +513,21 @@ class AnswerResult:
|
|
|
513
513
|
id (str): The temporary ID for the document.
|
|
514
514
|
published_date (str, optional): An estimate of the creation date, from parsing HTML content.
|
|
515
515
|
author (str, optional): If available, the author of the content.
|
|
516
|
+
text (str, optional): The full page text from each search result.
|
|
516
517
|
"""
|
|
517
|
-
|
|
518
|
+
id: str
|
|
518
519
|
url: str
|
|
519
|
-
id: str
|
|
520
520
|
title: Optional[str] = None
|
|
521
|
-
author: Optional[str] = None
|
|
522
521
|
published_date: Optional[str] = None
|
|
522
|
+
author: Optional[str] = None
|
|
523
523
|
text: Optional[str] = None
|
|
524
524
|
|
|
525
525
|
def __init__(self, **kwargs):
|
|
526
|
-
self.url = kwargs['url']
|
|
527
526
|
self.id = kwargs['id']
|
|
528
|
-
self.
|
|
529
|
-
self.
|
|
527
|
+
self.url = kwargs['url']
|
|
528
|
+
self.title = kwargs.get('title')
|
|
530
529
|
self.published_date = kwargs.get('published_date')
|
|
530
|
+
self.author = kwargs.get('author')
|
|
531
531
|
self.text = kwargs.get('text')
|
|
532
532
|
|
|
533
533
|
def __str__(self):
|
|
@@ -546,23 +546,23 @@ class StreamChunk:
|
|
|
546
546
|
|
|
547
547
|
Attributes:
|
|
548
548
|
content (Optional[str]): The partial text content of the answer
|
|
549
|
-
|
|
549
|
+
citations (Optional[List[AnswerResult]]): List of citations if provided in this chunk
|
|
550
550
|
"""
|
|
551
551
|
content: Optional[str] = None
|
|
552
|
-
|
|
552
|
+
citations: Optional[List[AnswerResult]] = None
|
|
553
553
|
|
|
554
554
|
def has_data(self) -> bool:
|
|
555
555
|
"""Check if this chunk contains any data."""
|
|
556
|
-
return self.content is not None or self.
|
|
556
|
+
return self.content is not None or self.citations is not None
|
|
557
557
|
|
|
558
558
|
def __str__(self) -> str:
|
|
559
559
|
"""Format the chunk data as a string."""
|
|
560
560
|
output = ""
|
|
561
561
|
if self.content:
|
|
562
562
|
output += self.content
|
|
563
|
-
if self.
|
|
564
|
-
output += "\
|
|
565
|
-
for source in self.
|
|
563
|
+
if self.citations:
|
|
564
|
+
output += "\nCitations:"
|
|
565
|
+
for source in self.citations:
|
|
566
566
|
output += f"\n{source}"
|
|
567
567
|
return output
|
|
568
568
|
|
|
@@ -573,21 +573,21 @@ class AnswerResponse:
|
|
|
573
573
|
|
|
574
574
|
Attributes:
|
|
575
575
|
answer (str): The generated answer.
|
|
576
|
-
|
|
576
|
+
citations (List[AnswerResult]): A list of citations used to generate the answer.
|
|
577
577
|
"""
|
|
578
578
|
|
|
579
579
|
answer: str
|
|
580
|
-
|
|
580
|
+
citations: List[AnswerResult]
|
|
581
581
|
|
|
582
582
|
def __str__(self):
|
|
583
|
-
output = f"Answer: {self.answer}\n\
|
|
584
|
-
for source in self.
|
|
583
|
+
output = f"Answer: {self.answer}\n\nCitations:"
|
|
584
|
+
for source in self.citations:
|
|
585
585
|
output += f"\nTitle: {source.title}"
|
|
586
|
+
output += f"\nID: {source.id}"
|
|
586
587
|
output += f"\nURL: {source.url}"
|
|
587
|
-
output += f"\nPublished: {source.published_date}"
|
|
588
|
+
output += f"\nPublished Date: {source.published_date}"
|
|
588
589
|
output += f"\nAuthor: {source.author}"
|
|
589
|
-
|
|
590
|
-
output += f"\nText: {source.text}"
|
|
590
|
+
output += f"\nText: {source.text}"
|
|
591
591
|
output += "\n"
|
|
592
592
|
return output
|
|
593
593
|
|
|
@@ -615,16 +615,16 @@ class StreamAnswerResponse:
|
|
|
615
615
|
continue
|
|
616
616
|
|
|
617
617
|
content = None
|
|
618
|
-
|
|
618
|
+
citations = None
|
|
619
619
|
|
|
620
620
|
if "choices" in chunk and chunk["choices"]:
|
|
621
621
|
if "delta" in chunk["choices"][0]:
|
|
622
622
|
content = chunk["choices"][0]["delta"].get("content")
|
|
623
623
|
|
|
624
|
-
if "
|
|
625
|
-
|
|
624
|
+
if "citations" in chunk and chunk["citations"] and chunk["citations"] != "null":
|
|
625
|
+
citations = [AnswerResult(**to_snake_case(s)) for s in chunk["citations"]]
|
|
626
626
|
|
|
627
|
-
stream_chunk = StreamChunk(content=content,
|
|
627
|
+
stream_chunk = StreamChunk(content=content, citations=citations)
|
|
628
628
|
if stream_chunk.has_data():
|
|
629
629
|
yield stream_chunk
|
|
630
630
|
|
|
@@ -686,7 +686,7 @@ class Exa:
|
|
|
686
686
|
self,
|
|
687
687
|
api_key: Optional[str],
|
|
688
688
|
base_url: str = "https://api.exa.ai",
|
|
689
|
-
user_agent: str = "exa-py 1.8.
|
|
689
|
+
user_agent: str = "exa-py 1.8.8",
|
|
690
690
|
):
|
|
691
691
|
"""Initialize the Exa client with the provided API key and optional base URL and user agent.
|
|
692
692
|
|
|
@@ -826,6 +826,8 @@ class Exa:
|
|
|
826
826
|
use_autoprompt: Optional[bool] = None,
|
|
827
827
|
type: Optional[str] = None,
|
|
828
828
|
category: Optional[str] = None,
|
|
829
|
+
flags: Optional[List[str]] = None,
|
|
830
|
+
moderation: Optional[bool] = None,
|
|
829
831
|
subpages: Optional[int] = None,
|
|
830
832
|
livecrawl_timeout: Optional[int] = None,
|
|
831
833
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
@@ -974,6 +976,8 @@ class Exa:
|
|
|
974
976
|
category: Optional[str] = None,
|
|
975
977
|
subpages: Optional[int] = None,
|
|
976
978
|
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
979
|
+
flags: Optional[List[str]] = None,
|
|
980
|
+
moderation: Optional[bool] = None,
|
|
977
981
|
livecrawl_timeout: Optional[int] = None,
|
|
978
982
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
979
983
|
filter_empty_results: Optional[bool] = None,
|
|
@@ -1002,11 +1006,12 @@ class Exa:
|
|
|
1002
1006
|
type: Optional[str] = None,
|
|
1003
1007
|
category: Optional[str] = None,
|
|
1004
1008
|
flags: Optional[List[str]] = None,
|
|
1009
|
+
moderation: Optional[bool] = None,
|
|
1005
1010
|
livecrawl_timeout: Optional[int] = None,
|
|
1006
1011
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
1007
|
-
filter_empty_results: Optional[bool] = None,
|
|
1008
1012
|
subpages: Optional[int] = None,
|
|
1009
1013
|
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
1014
|
+
filter_empty_results: Optional[bool] = None,
|
|
1010
1015
|
extras: Optional[ExtrasOptions] = None,
|
|
1011
1016
|
) -> SearchResponse[ResultWithTextAndHighlightsAndSummary]:
|
|
1012
1017
|
...
|
|
@@ -1656,6 +1661,7 @@ class Exa:
|
|
|
1656
1661
|
*,
|
|
1657
1662
|
stream: Optional[bool] = False,
|
|
1658
1663
|
text: Optional[bool] = False,
|
|
1664
|
+
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1659
1665
|
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
1660
1666
|
...
|
|
1661
1667
|
|
|
@@ -1665,15 +1671,17 @@ class Exa:
|
|
|
1665
1671
|
*,
|
|
1666
1672
|
stream: Optional[bool] = False,
|
|
1667
1673
|
text: Optional[bool] = False,
|
|
1674
|
+
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1668
1675
|
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
1669
1676
|
"""Generate an answer to a query using Exa's search and LLM capabilities.
|
|
1670
1677
|
|
|
1671
1678
|
Args:
|
|
1672
1679
|
query (str): The query to answer.
|
|
1673
1680
|
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
1681
|
+
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
1674
1682
|
|
|
1675
1683
|
Returns:
|
|
1676
|
-
AnswerResponse: An object containing the answer and
|
|
1684
|
+
AnswerResponse: An object containing the answer and citations.
|
|
1677
1685
|
|
|
1678
1686
|
Raises:
|
|
1679
1687
|
ValueError: If stream=True is provided. Use stream_answer() instead for streaming responses.
|
|
@@ -1694,7 +1702,7 @@ class Exa:
|
|
|
1694
1702
|
|
|
1695
1703
|
return AnswerResponse(
|
|
1696
1704
|
response["answer"],
|
|
1697
|
-
[AnswerResult(**to_snake_case(result)) for result in response["
|
|
1705
|
+
[AnswerResult(**to_snake_case(result)) for result in response["citations"]]
|
|
1698
1706
|
)
|
|
1699
1707
|
|
|
1700
1708
|
def stream_answer(
|
|
@@ -1702,15 +1710,17 @@ class Exa:
|
|
|
1702
1710
|
query: str,
|
|
1703
1711
|
*,
|
|
1704
1712
|
text: bool = False,
|
|
1713
|
+
model: Optional[Literal["exa", "exa-pro"]] = None,
|
|
1705
1714
|
) -> StreamAnswerResponse:
|
|
1706
1715
|
"""Generate a streaming answer response.
|
|
1707
1716
|
|
|
1708
1717
|
Args:
|
|
1709
1718
|
query (str): The query to answer.
|
|
1710
1719
|
text (bool): Whether to include full text in the results. Defaults to False.
|
|
1720
|
+
model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
|
|
1711
1721
|
|
|
1712
1722
|
Returns:
|
|
1713
|
-
StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial
|
|
1723
|
+
StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
1714
1724
|
Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
|
|
1715
1725
|
"""
|
|
1716
1726
|
options = {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: exa-py
|
|
3
|
-
Version: 1.8.
|
|
3
|
+
Version: 1.8.8
|
|
4
4
|
Summary: Python SDK for Exa API.
|
|
5
5
|
Author: Exa AI
|
|
6
6
|
Author-email: hello@exa.ai
|
|
@@ -91,8 +91,8 @@ exa = Exa(api_key="your-api-key")
|
|
|
91
91
|
# basic answer
|
|
92
92
|
response = exa.answer("This is a query to answer a question")
|
|
93
93
|
|
|
94
|
-
# answer with full text
|
|
95
|
-
response = exa.answer("This is a query to answer a question", text=True)
|
|
94
|
+
# answer with full text, using the exa-pro model (sends 2 expanded quries to exa search)
|
|
95
|
+
response = exa.answer("This is a query to answer a question", text=True, model="exa-pro")
|
|
96
96
|
|
|
97
97
|
# answer with streaming
|
|
98
98
|
response = exa.stream_answer("This is a query to answer:")
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
exa_py/__init__.py,sha256=1selemczpRm1y8V9cWNm90LARnU1jbtyp-Qpx3c7cTw,28
|
|
2
|
+
exa_py/api.py,sha256=yzQkUJ94RbnMglVjpevmwuixvKnkwhN_r6KuVV9j3Sw,63279
|
|
3
|
+
exa_py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
exa_py/utils.py,sha256=Rc1FJjoR9LQ7L_OJM91Sd1GNkbHjcLyEvJENhRix6gc,2405
|
|
5
|
+
exa_py-1.8.8.dist-info/METADATA,sha256=UV9_4Df_l1JP-SPRFfryyg7SP7VwYNZ3Iz34FsIXP3M,3419
|
|
6
|
+
exa_py-1.8.8.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
7
|
+
exa_py-1.8.8.dist-info/RECORD,,
|
exa_py-1.8.6.dist-info/RECORD
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
exa_py/__init__.py,sha256=1selemczpRm1y8V9cWNm90LARnU1jbtyp-Qpx3c7cTw,28
|
|
2
|
-
exa_py/api.py,sha256=MJbdvN1AAVS7GA9IUT56KF8U_R8AT-sIcvCVG0EY6YM,62530
|
|
3
|
-
exa_py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
exa_py/utils.py,sha256=Rc1FJjoR9LQ7L_OJM91Sd1GNkbHjcLyEvJENhRix6gc,2405
|
|
5
|
-
exa_py-1.8.6.dist-info/METADATA,sha256=jOlJfJYWMCEhoOGW1FY-bDjHLogR5soamiu94sPOuiw,3337
|
|
6
|
-
exa_py-1.8.6.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
7
|
-
exa_py-1.8.6.dist-info/RECORD,,
|
|
File without changes
|