exa-py 1.14.7__tar.gz → 1.14.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of exa-py might be problematic. Click here for more details.
- {exa_py-1.14.7 → exa_py-1.14.8}/PKG-INFO +3 -3
- {exa_py-1.14.7 → exa_py-1.14.8}/README.md +2 -2
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/api.py +8 -8
- {exa_py-1.14.7 → exa_py-1.14.8}/pyproject.toml +1 -1
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/py.typed +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/research/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/research/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/research/models.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/utils.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/_generator/pydantic/BaseModel.jinja2 +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/core/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/core/base.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/enrichments/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/enrichments/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/imports/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/imports/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/items/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/items/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/monitors/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/monitors/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/monitors/runs/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/monitors/runs/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/searches/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/searches/client.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/types.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/webhooks/__init__.py +0 -0
- {exa_py-1.14.7 → exa_py-1.14.8}/exa_py/websets/webhooks/client.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: exa-py
|
|
3
|
-
Version: 1.14.
|
|
3
|
+
Version: 1.14.8
|
|
4
4
|
Summary: Python SDK for Exa API.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Exa AI
|
|
@@ -85,8 +85,8 @@ exa = Exa(api_key="your-api-key")
|
|
|
85
85
|
# basic answer
|
|
86
86
|
response = exa.answer("This is a query to answer a question")
|
|
87
87
|
|
|
88
|
-
# answer with full text
|
|
89
|
-
response = exa.answer("This is a query to answer a question", text=True
|
|
88
|
+
# answer with full text
|
|
89
|
+
response = exa.answer("This is a query to answer a question", text=True)
|
|
90
90
|
|
|
91
91
|
# answer with streaming
|
|
92
92
|
response = exa.stream_answer("This is a query to answer:")
|
|
@@ -63,8 +63,8 @@ exa = Exa(api_key="your-api-key")
|
|
|
63
63
|
# basic answer
|
|
64
64
|
response = exa.answer("This is a query to answer a question")
|
|
65
65
|
|
|
66
|
-
# answer with full text
|
|
67
|
-
response = exa.answer("This is a query to answer a question", text=True
|
|
66
|
+
# answer with full text
|
|
67
|
+
response = exa.answer("This is a query to answer a question", text=True)
|
|
68
68
|
|
|
69
69
|
# answer with streaming
|
|
70
70
|
response = exa.stream_answer("This is a query to answer:")
|
|
@@ -134,7 +134,7 @@ SEARCH_OPTIONS_TYPES = {
|
|
|
134
134
|
list
|
|
135
135
|
], # Must not be present in webpage text. (One string, up to 5 words)
|
|
136
136
|
"use_autoprompt": [bool], # Convert query to Exa. (Default: false)
|
|
137
|
-
"type": [str], # 'keyword', 'neural', or 'auto' (Default: auto)
|
|
137
|
+
"type": [str], # 'keyword', 'neural', 'hybrid', or 'auto' (Default: auto)
|
|
138
138
|
"category": [
|
|
139
139
|
str
|
|
140
140
|
], # A data category to focus on: 'company', 'research paper', 'news', 'pdf', 'github', 'tweet', 'personal site', 'linkedin profile', 'financial report'
|
|
@@ -874,7 +874,7 @@ class Exa:
|
|
|
874
874
|
self,
|
|
875
875
|
api_key: Optional[str],
|
|
876
876
|
base_url: str = "https://api.exa.ai",
|
|
877
|
-
user_agent: str = "exa-py 1.
|
|
877
|
+
user_agent: str = "exa-py 1.14.9",
|
|
878
878
|
):
|
|
879
879
|
"""Initialize the Exa client with the provided API key and optional base URL and user agent.
|
|
880
880
|
|
|
@@ -995,7 +995,7 @@ class Exa:
|
|
|
995
995
|
include_text (List[str], optional): Strings that must appear in the page text.
|
|
996
996
|
exclude_text (List[str], optional): Strings that must not appear in the page text.
|
|
997
997
|
use_autoprompt (bool, optional): Convert query to Exa (default False).
|
|
998
|
-
type (str, optional): 'keyword' or '
|
|
998
|
+
type (str, optional): 'keyword', 'neural', or 'hybrid' (default 'neural').
|
|
999
999
|
category (str, optional): e.g. 'company'
|
|
1000
1000
|
flags (List[str], optional): Experimental flags for Exa usage.
|
|
1001
1001
|
moderation (bool, optional): If True, the search results will be moderated for safety.
|
|
@@ -1909,7 +1909,7 @@ class Exa:
|
|
|
1909
1909
|
query (str): The query to answer.
|
|
1910
1910
|
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
1911
1911
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
1912
|
-
model (str, optional): The model to use for answering.
|
|
1912
|
+
model (str, optional): The model to use for answering. Defaults to None.
|
|
1913
1913
|
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
1914
1914
|
|
|
1915
1915
|
Returns:
|
|
@@ -1948,7 +1948,7 @@ class Exa:
|
|
|
1948
1948
|
query (str): The query to answer.
|
|
1949
1949
|
text (bool): Whether to include full text in the results. Defaults to False.
|
|
1950
1950
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
1951
|
-
model (str, optional): The model to use for answering.
|
|
1951
|
+
model (str, optional): The model to use for answering. Defaults to None.
|
|
1952
1952
|
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
1953
1953
|
Returns:
|
|
1954
1954
|
StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
@@ -2040,7 +2040,7 @@ class AsyncExa(Exa):
|
|
|
2040
2040
|
include_text (List[str], optional): Strings that must appear in the page text.
|
|
2041
2041
|
exclude_text (List[str], optional): Strings that must not appear in the page text.
|
|
2042
2042
|
use_autoprompt (bool, optional): Convert query to Exa (default False).
|
|
2043
|
-
type (str, optional): 'keyword' or '
|
|
2043
|
+
type (str, optional): 'keyword', 'neural', or 'hybrid' (default 'neural').
|
|
2044
2044
|
category (str, optional): e.g. 'company'
|
|
2045
2045
|
flags (List[str], optional): Experimental flags for Exa usage.
|
|
2046
2046
|
moderation (bool, optional): If True, the search results will be moderated for safety.
|
|
@@ -2253,7 +2253,7 @@ class AsyncExa(Exa):
|
|
|
2253
2253
|
query (str): The query to answer.
|
|
2254
2254
|
text (bool, optional): Whether to include full text in the results. Defaults to False.
|
|
2255
2255
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
2256
|
-
model (str, optional): The model to use for answering.
|
|
2256
|
+
model (str, optional): The model to use for answering. Defaults to None.
|
|
2257
2257
|
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
2258
2258
|
|
|
2259
2259
|
Returns:
|
|
@@ -2292,7 +2292,7 @@ class AsyncExa(Exa):
|
|
|
2292
2292
|
query (str): The query to answer.
|
|
2293
2293
|
text (bool): Whether to include full text in the results. Defaults to False.
|
|
2294
2294
|
system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
|
|
2295
|
-
model (str, optional): The model to use for answering.
|
|
2295
|
+
model (str, optional): The model to use for answering. Defaults to None.
|
|
2296
2296
|
output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
|
|
2297
2297
|
Returns:
|
|
2298
2298
|
AsyncStreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|