exa-py 1.8.5__py3-none-any.whl → 1.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of exa-py might be problematic. Click here for more details.
exa_py/api.py
CHANGED
|
@@ -17,8 +17,10 @@ from typing import (
|
|
|
17
17
|
Literal,
|
|
18
18
|
get_origin,
|
|
19
19
|
get_args,
|
|
20
|
+
Iterator,
|
|
20
21
|
)
|
|
21
22
|
from typing_extensions import TypedDict
|
|
23
|
+
import json
|
|
22
24
|
|
|
23
25
|
from openai import OpenAI
|
|
24
26
|
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
|
@@ -30,7 +32,6 @@ from exa_py.utils import (
|
|
|
30
32
|
maybe_get_query,
|
|
31
33
|
)
|
|
32
34
|
import os
|
|
33
|
-
from typing import Iterator
|
|
34
35
|
|
|
35
36
|
is_beta = os.getenv("IS_BETA") == "True"
|
|
36
37
|
|
|
@@ -504,7 +505,7 @@ class ResultWithTextAndHighlightsAndSummary(_Result):
|
|
|
504
505
|
|
|
505
506
|
@dataclass
|
|
506
507
|
class AnswerResult:
|
|
507
|
-
"""A class representing a
|
|
508
|
+
"""A class representing a result for an answer.
|
|
508
509
|
|
|
509
510
|
Attributes:
|
|
510
511
|
title (str): The title of the search result.
|
|
@@ -512,21 +513,21 @@ class AnswerResult:
|
|
|
512
513
|
id (str): The temporary ID for the document.
|
|
513
514
|
published_date (str, optional): An estimate of the creation date, from parsing HTML content.
|
|
514
515
|
author (str, optional): If available, the author of the content.
|
|
516
|
+
text (str, optional): The full page text from each search result.
|
|
515
517
|
"""
|
|
516
|
-
|
|
518
|
+
id: str
|
|
517
519
|
url: str
|
|
518
|
-
id: str
|
|
519
520
|
title: Optional[str] = None
|
|
520
|
-
author: Optional[str] = None
|
|
521
521
|
published_date: Optional[str] = None
|
|
522
|
+
author: Optional[str] = None
|
|
522
523
|
text: Optional[str] = None
|
|
523
524
|
|
|
524
525
|
def __init__(self, **kwargs):
|
|
525
|
-
self.url = kwargs['url']
|
|
526
526
|
self.id = kwargs['id']
|
|
527
|
-
self.
|
|
528
|
-
self.
|
|
527
|
+
self.url = kwargs['url']
|
|
528
|
+
self.title = kwargs.get('title')
|
|
529
529
|
self.published_date = kwargs.get('published_date')
|
|
530
|
+
self.author = kwargs.get('author')
|
|
530
531
|
self.text = kwargs.get('text')
|
|
531
532
|
|
|
532
533
|
def __str__(self):
|
|
@@ -536,8 +537,34 @@ class AnswerResult:
|
|
|
536
537
|
f"ID: {self.id}\n"
|
|
537
538
|
f"Published Date: {self.published_date}\n"
|
|
538
539
|
f"Author: {self.author}\n"
|
|
539
|
-
f"Text: {self.text}\n"
|
|
540
|
+
f"Text: {self.text}\n\n"
|
|
540
541
|
)
|
|
542
|
+
|
|
543
|
+
@dataclass
|
|
544
|
+
class StreamChunk:
|
|
545
|
+
"""A class representing a single chunk of streaming data.
|
|
546
|
+
|
|
547
|
+
Attributes:
|
|
548
|
+
content (Optional[str]): The partial text content of the answer
|
|
549
|
+
citations (Optional[List[AnswerResult]]): List of citations if provided in this chunk
|
|
550
|
+
"""
|
|
551
|
+
content: Optional[str] = None
|
|
552
|
+
citations: Optional[List[AnswerResult]] = None
|
|
553
|
+
|
|
554
|
+
def has_data(self) -> bool:
|
|
555
|
+
"""Check if this chunk contains any data."""
|
|
556
|
+
return self.content is not None or self.citations is not None
|
|
557
|
+
|
|
558
|
+
def __str__(self) -> str:
|
|
559
|
+
"""Format the chunk data as a string."""
|
|
560
|
+
output = ""
|
|
561
|
+
if self.content:
|
|
562
|
+
output += self.content
|
|
563
|
+
if self.citations:
|
|
564
|
+
output += "\nCitations:"
|
|
565
|
+
for source in self.citations:
|
|
566
|
+
output += f"\n{source}"
|
|
567
|
+
return output
|
|
541
568
|
|
|
542
569
|
|
|
543
570
|
@dataclass
|
|
@@ -546,18 +573,66 @@ class AnswerResponse:
|
|
|
546
573
|
|
|
547
574
|
Attributes:
|
|
548
575
|
answer (str): The generated answer.
|
|
549
|
-
|
|
576
|
+
citations (List[AnswerResult]): A list of citations used to generate the answer.
|
|
550
577
|
"""
|
|
551
578
|
|
|
552
579
|
answer: str
|
|
553
|
-
|
|
580
|
+
citations: List[AnswerResult]
|
|
554
581
|
|
|
555
582
|
def __str__(self):
|
|
556
|
-
output = f"Answer: {self.answer}\n\
|
|
557
|
-
|
|
583
|
+
output = f"Answer: {self.answer}\n\nCitations:"
|
|
584
|
+
for source in self.citations:
|
|
585
|
+
output += f"\nTitle: {source.title}"
|
|
586
|
+
output += f"\nID: {source.id}"
|
|
587
|
+
output += f"\nURL: {source.url}"
|
|
588
|
+
output += f"\nPublished Date: {source.published_date}"
|
|
589
|
+
output += f"\nAuthor: {source.author}"
|
|
590
|
+
output += f"\nText: {source.text}"
|
|
591
|
+
output += "\n"
|
|
558
592
|
return output
|
|
559
593
|
|
|
560
594
|
|
|
595
|
+
class StreamAnswerResponse:
|
|
596
|
+
"""A class representing a streaming answer response."""
|
|
597
|
+
def __init__(self, raw_response: requests.Response):
|
|
598
|
+
self._raw_response = raw_response
|
|
599
|
+
self._ensure_ok_status()
|
|
600
|
+
|
|
601
|
+
def _ensure_ok_status(self):
|
|
602
|
+
if self._raw_response.status_code != 200:
|
|
603
|
+
raise ValueError(
|
|
604
|
+
f"Request failed with status code {self._raw_response.status_code}: {self._raw_response.text}"
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
def __iter__(self) -> Iterator[StreamChunk]:
|
|
608
|
+
for line in self._raw_response.iter_lines():
|
|
609
|
+
if not line:
|
|
610
|
+
continue
|
|
611
|
+
decoded_line = line.decode("utf-8").removeprefix("data: ")
|
|
612
|
+
try:
|
|
613
|
+
chunk = json.loads(decoded_line)
|
|
614
|
+
except json.JSONDecodeError:
|
|
615
|
+
continue
|
|
616
|
+
|
|
617
|
+
content = None
|
|
618
|
+
citations = None
|
|
619
|
+
|
|
620
|
+
if "choices" in chunk and chunk["choices"]:
|
|
621
|
+
if "delta" in chunk["choices"][0]:
|
|
622
|
+
content = chunk["choices"][0]["delta"].get("content")
|
|
623
|
+
|
|
624
|
+
if "citations" in chunk and chunk["citations"] and chunk["citations"] != "null":
|
|
625
|
+
citations = [AnswerResult(**to_snake_case(s)) for s in chunk["citations"]]
|
|
626
|
+
|
|
627
|
+
stream_chunk = StreamChunk(content=content, citations=citations)
|
|
628
|
+
if stream_chunk.has_data():
|
|
629
|
+
yield stream_chunk
|
|
630
|
+
|
|
631
|
+
def close(self) -> None:
|
|
632
|
+
"""Close the underlying raw response to release the network socket."""
|
|
633
|
+
self._raw_response.close()
|
|
634
|
+
|
|
635
|
+
|
|
561
636
|
T = TypeVar("T")
|
|
562
637
|
|
|
563
638
|
|
|
@@ -611,7 +686,7 @@ class Exa:
|
|
|
611
686
|
self,
|
|
612
687
|
api_key: Optional[str],
|
|
613
688
|
base_url: str = "https://api.exa.ai",
|
|
614
|
-
user_agent: str = "exa-py 1.8.
|
|
689
|
+
user_agent: str = "exa-py 1.8.7",
|
|
615
690
|
):
|
|
616
691
|
"""Initialize the Exa client with the provided API key and optional base URL and user agent.
|
|
617
692
|
|
|
@@ -638,7 +713,7 @@ class Exa:
|
|
|
638
713
|
data (dict): The JSON payload to send.
|
|
639
714
|
|
|
640
715
|
Returns:
|
|
641
|
-
Union[dict,
|
|
716
|
+
Union[dict, requests.Response]: If streaming, returns the Response object.
|
|
642
717
|
Otherwise, returns the JSON-decoded response as a dict.
|
|
643
718
|
|
|
644
719
|
Raises:
|
|
@@ -646,9 +721,7 @@ class Exa:
|
|
|
646
721
|
"""
|
|
647
722
|
if data.get("stream"):
|
|
648
723
|
res = requests.post(self.base_url + endpoint, json=data, headers=self.headers, stream=True)
|
|
649
|
-
|
|
650
|
-
raise ValueError(f"Request failed with status code {res.status_code}: {res.text}")
|
|
651
|
-
return (line.decode("utf-8") for line in res.iter_lines() if line)
|
|
724
|
+
return res
|
|
652
725
|
|
|
653
726
|
res = requests.post(self.base_url + endpoint, json=data, headers=self.headers)
|
|
654
727
|
if res.status_code != 200:
|
|
@@ -730,6 +803,7 @@ class Exa:
|
|
|
730
803
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
731
804
|
filter_empty_results: Optional[bool] = None,
|
|
732
805
|
subpages: Optional[int] = None,
|
|
806
|
+
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
733
807
|
extras: Optional[ExtrasOptions] = None,
|
|
734
808
|
) -> SearchResponse[ResultWithText]:
|
|
735
809
|
...
|
|
@@ -752,12 +826,11 @@ class Exa:
|
|
|
752
826
|
use_autoprompt: Optional[bool] = None,
|
|
753
827
|
type: Optional[str] = None,
|
|
754
828
|
category: Optional[str] = None,
|
|
755
|
-
flags: Optional[List[str]] = None,
|
|
756
|
-
moderation: Optional[bool] = None,
|
|
757
829
|
subpages: Optional[int] = None,
|
|
758
830
|
livecrawl_timeout: Optional[int] = None,
|
|
759
831
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
760
832
|
filter_empty_results: Optional[bool] = None,
|
|
833
|
+
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
761
834
|
extras: Optional[ExtrasOptions] = None,
|
|
762
835
|
) -> SearchResponse[ResultWithText]:
|
|
763
836
|
...
|
|
@@ -901,8 +974,6 @@ class Exa:
|
|
|
901
974
|
category: Optional[str] = None,
|
|
902
975
|
subpages: Optional[int] = None,
|
|
903
976
|
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
904
|
-
flags: Optional[List[str]] = None,
|
|
905
|
-
moderation: Optional[bool] = None,
|
|
906
977
|
livecrawl_timeout: Optional[int] = None,
|
|
907
978
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
908
979
|
filter_empty_results: Optional[bool] = None,
|
|
@@ -931,12 +1002,11 @@ class Exa:
|
|
|
931
1002
|
type: Optional[str] = None,
|
|
932
1003
|
category: Optional[str] = None,
|
|
933
1004
|
flags: Optional[List[str]] = None,
|
|
934
|
-
moderation: Optional[bool] = None,
|
|
935
1005
|
livecrawl_timeout: Optional[int] = None,
|
|
936
1006
|
livecrawl: Optional[LIVECRAWL_OPTIONS] = None,
|
|
1007
|
+
filter_empty_results: Optional[bool] = None,
|
|
937
1008
|
subpages: Optional[int] = None,
|
|
938
1009
|
subpage_target: Optional[Union[str, List[str]]] = None,
|
|
939
|
-
filter_empty_results: Optional[bool] = None,
|
|
940
1010
|
extras: Optional[ExtrasOptions] = None,
|
|
941
1011
|
) -> SearchResponse[ResultWithTextAndHighlightsAndSummary]:
|
|
942
1012
|
...
|
|
@@ -1584,33 +1654,36 @@ class Exa:
|
|
|
1584
1654
|
self,
|
|
1585
1655
|
query: str,
|
|
1586
1656
|
*,
|
|
1587
|
-
expanded_queries_limit: Optional[int] = 1,
|
|
1588
1657
|
stream: Optional[bool] = False,
|
|
1589
|
-
|
|
1590
|
-
) -> Union[AnswerResponse,
|
|
1658
|
+
text: Optional[bool] = False,
|
|
1659
|
+
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
1591
1660
|
...
|
|
1592
1661
|
|
|
1593
1662
|
def answer(
|
|
1594
1663
|
self,
|
|
1595
1664
|
query: str,
|
|
1596
1665
|
*,
|
|
1597
|
-
expanded_queries_limit: Optional[int] = 1,
|
|
1598
1666
|
stream: Optional[bool] = False,
|
|
1599
|
-
|
|
1600
|
-
) -> Union[AnswerResponse,
|
|
1667
|
+
text: Optional[bool] = False,
|
|
1668
|
+
) -> Union[AnswerResponse, StreamAnswerResponse]:
|
|
1601
1669
|
"""Generate an answer to a query using Exa's search and LLM capabilities.
|
|
1602
1670
|
|
|
1603
1671
|
Args:
|
|
1604
1672
|
query (str): The query to answer.
|
|
1605
|
-
|
|
1606
|
-
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
1607
|
-
include_text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
1673
|
+
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
1608
1674
|
|
|
1609
1675
|
Returns:
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1676
|
+
AnswerResponse: An object containing the answer and citations.
|
|
1677
|
+
|
|
1678
|
+
Raises:
|
|
1679
|
+
ValueError: If stream=True is provided. Use stream_answer() instead for streaming responses.
|
|
1613
1680
|
"""
|
|
1681
|
+
if stream:
|
|
1682
|
+
raise ValueError(
|
|
1683
|
+
"stream=True is not supported in `answer()`. "
|
|
1684
|
+
"Please use `stream_answer(...)` for streaming."
|
|
1685
|
+
)
|
|
1686
|
+
|
|
1614
1687
|
options = {
|
|
1615
1688
|
k: v
|
|
1616
1689
|
for k, v in locals().items()
|
|
@@ -1619,10 +1692,34 @@ class Exa:
|
|
|
1619
1692
|
options = to_camel_case(options)
|
|
1620
1693
|
response = self.request("/answer", options)
|
|
1621
1694
|
|
|
1622
|
-
if stream:
|
|
1623
|
-
return response
|
|
1624
|
-
|
|
1625
1695
|
return AnswerResponse(
|
|
1626
1696
|
response["answer"],
|
|
1627
|
-
[AnswerResult(**to_snake_case(result)) for result in response["
|
|
1697
|
+
[AnswerResult(**to_snake_case(result)) for result in response["citations"]]
|
|
1628
1698
|
)
|
|
1699
|
+
|
|
1700
|
+
def stream_answer(
|
|
1701
|
+
self,
|
|
1702
|
+
query: str,
|
|
1703
|
+
*,
|
|
1704
|
+
text: bool = False,
|
|
1705
|
+
) -> StreamAnswerResponse:
|
|
1706
|
+
"""Generate a streaming answer response.
|
|
1707
|
+
|
|
1708
|
+
Args:
|
|
1709
|
+
query (str): The query to answer.
|
|
1710
|
+
text (bool): Whether to include full text in the results. Defaults to False.
|
|
1711
|
+
|
|
1712
|
+
Returns:
|
|
1713
|
+
StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
1714
|
+
Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
|
|
1715
|
+
"""
|
|
1716
|
+
options = {
|
|
1717
|
+
k: v
|
|
1718
|
+
for k, v in locals().items()
|
|
1719
|
+
if k != "self" and v is not None
|
|
1720
|
+
}
|
|
1721
|
+
options = to_camel_case(options)
|
|
1722
|
+
options["stream"] = True
|
|
1723
|
+
raw_response = self.request("/answer", options)
|
|
1724
|
+
return StreamAnswerResponse(raw_response)
|
|
1725
|
+
|
exa_py/example.py
ADDED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: exa-py
|
|
3
|
-
Version: 1.8.
|
|
3
|
+
Version: 1.8.7
|
|
4
4
|
Summary: Python SDK for Exa API.
|
|
5
5
|
Author: Exa AI
|
|
6
6
|
Author-email: hello@exa.ai
|
|
@@ -91,15 +91,16 @@ exa = Exa(api_key="your-api-key")
|
|
|
91
91
|
# basic answer
|
|
92
92
|
response = exa.answer("This is a query to answer a question")
|
|
93
93
|
|
|
94
|
-
# answer with
|
|
95
|
-
response = exa.answer("This is a query to answer a question",
|
|
94
|
+
# answer with full text
|
|
95
|
+
response = exa.answer("This is a query to answer a question", text=True)
|
|
96
96
|
|
|
97
97
|
# answer with streaming
|
|
98
|
-
response = exa.
|
|
98
|
+
response = exa.stream_answer("This is a query to answer:")
|
|
99
99
|
|
|
100
|
-
# Print each chunk as it arrives when
|
|
100
|
+
# Print each chunk as it arrives when using the stream_answer method
|
|
101
101
|
for chunk in response:
|
|
102
|
-
print(chunk)
|
|
102
|
+
print(chunk, end='', flush=True)
|
|
103
|
+
|
|
103
104
|
```
|
|
104
105
|
|
|
105
106
|
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
exa_py/__init__.py,sha256=1selemczpRm1y8V9cWNm90LARnU1jbtyp-Qpx3c7cTw,28
|
|
2
|
+
exa_py/api.py,sha256=5uuKVZpLtXQvxEi_X7W_x8JYqTQlsO8R2Ln0IRAQCrk,62663
|
|
3
|
+
exa_py/example.py,sha256=V2uZvFTQFLVr61lVQ_HbZz8G8TFT6Ic44-TTE5ixzBk,235
|
|
4
|
+
exa_py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
+
exa_py/utils.py,sha256=Rc1FJjoR9LQ7L_OJM91Sd1GNkbHjcLyEvJENhRix6gc,2405
|
|
6
|
+
exa_py-1.8.7.dist-info/METADATA,sha256=xbby0b9LVLEzzBMu1mGivh0ZO0gDm_xjvPoF219riLk,3337
|
|
7
|
+
exa_py-1.8.7.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
8
|
+
exa_py-1.8.7.dist-info/RECORD,,
|
exa_py-1.8.5.dist-info/RECORD
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
exa_py/__init__.py,sha256=1selemczpRm1y8V9cWNm90LARnU1jbtyp-Qpx3c7cTw,28
|
|
2
|
-
exa_py/api.py,sha256=E84d1umFPyi4Xz7L1-BFHYfvCvt9LlT4RBN1lAoPwhk,59596
|
|
3
|
-
exa_py/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
-
exa_py/utils.py,sha256=Rc1FJjoR9LQ7L_OJM91Sd1GNkbHjcLyEvJENhRix6gc,2405
|
|
5
|
-
exa_py-1.8.5.dist-info/METADATA,sha256=wUk1PRI9cYvfX_OwLxOOhVf_VBp1E5DiqAdqmUJdUdM,3389
|
|
6
|
-
exa_py-1.8.5.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
7
|
-
exa_py-1.8.5.dist-info/RECORD,,
|
|
File without changes
|