exa-py 1.13.0__tar.gz → 1.13.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of exa-py might be problematic. Click here for more details.

Files changed (29) hide show
  1. {exa_py-1.13.0 → exa_py-1.13.1}/PKG-INFO +18 -13
  2. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/api.py +42 -31
  3. exa_py-1.13.1/exa_py/websets/_generator/pydantic/BaseModel.jinja2 +42 -0
  4. {exa_py-1.13.0 → exa_py-1.13.1}/pyproject.toml +1 -1
  5. exa_py-1.13.0/exa_py.egg-info/PKG-INFO +0 -95
  6. exa_py-1.13.0/exa_py.egg-info/SOURCES.txt +0 -26
  7. exa_py-1.13.0/exa_py.egg-info/dependency_links.txt +0 -1
  8. exa_py-1.13.0/exa_py.egg-info/requires.txt +0 -6
  9. exa_py-1.13.0/exa_py.egg-info/top_level.txt +0 -1
  10. exa_py-1.13.0/setup.cfg +0 -4
  11. exa_py-1.13.0/setup.py +0 -26
  12. exa_py-1.13.0/tests/test_websets.py +0 -415
  13. {exa_py-1.13.0 → exa_py-1.13.1}/README.md +0 -0
  14. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/__init__.py +0 -0
  15. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/py.typed +0 -0
  16. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/utils.py +0 -0
  17. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/__init__.py +0 -0
  18. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/client.py +0 -0
  19. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/core/__init__.py +0 -0
  20. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/core/base.py +0 -0
  21. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/enrichments/__init__.py +0 -0
  22. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/enrichments/client.py +0 -0
  23. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/items/__init__.py +0 -0
  24. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/items/client.py +0 -0
  25. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/searches/__init__.py +0 -0
  26. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/searches/client.py +0 -0
  27. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/types.py +0 -0
  28. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/webhooks/__init__.py +0 -0
  29. {exa_py-1.13.0 → exa_py-1.13.1}/exa_py/websets/webhooks/client.py +0 -0
@@ -1,21 +1,25 @@
1
- Metadata-Version: 2.4
1
+ Metadata-Version: 2.3
2
2
  Name: exa-py
3
- Version: 1.13.0
3
+ Version: 1.13.1
4
4
  Summary: Python SDK for Exa API.
5
- Home-page: https://github.com/exa-labs/exa-py
6
- Author: Exa
7
- Author-email: Exa AI <hello@exa.ai>
8
5
  License: MIT
6
+ Author: Exa AI
7
+ Author-email: hello@exa.ai
9
8
  Requires-Python: >=3.9
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.9
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Requires-Dist: httpx (>=0.28.1)
17
+ Requires-Dist: openai (>=1.48)
18
+ Requires-Dist: pydantic (>=2.10.6)
19
+ Requires-Dist: pytest-mock (>=3.14.0)
20
+ Requires-Dist: requests (>=2.32.3)
21
+ Requires-Dist: typing-extensions (>=4.12.2)
10
22
  Description-Content-Type: text/markdown
11
- Requires-Dist: requests>=2.32.3
12
- Requires-Dist: typing-extensions>=4.12.2
13
- Requires-Dist: openai>=1.48
14
- Requires-Dist: pydantic>=2.10.6
15
- Requires-Dist: pytest-mock>=3.14.0
16
- Requires-Dist: httpx>=0.28.1
17
- Dynamic: author
18
- Dynamic: home-page
19
23
 
20
24
  # Exa
21
25
 
@@ -93,3 +97,4 @@ exa = Exa(api_key="your-api-key")
93
97
 
94
98
  ```
95
99
 
100
+
@@ -56,7 +56,7 @@ def snake_to_camel(snake_str: str) -> str:
56
56
  return "$schema"
57
57
  if snake_str == "not_":
58
58
  return "not"
59
-
59
+
60
60
  components = snake_str.split("_")
61
61
  return components[0] + "".join(x.title() for x in components[1:])
62
62
 
@@ -261,6 +261,7 @@ class JSONSchema(TypedDict, total=False):
261
261
  """Represents a JSON Schema definition used for structured summary output.
262
262
  To learn more visit https://json-schema.org/overview/what-is-jsonschema.
263
263
  """
264
+
264
265
  schema_: str # This will be converted to "$schema" in JSON
265
266
  title: str
266
267
  description: str
@@ -288,7 +289,7 @@ class SummaryContentsOptions(TypedDict, total=False):
288
289
 
289
290
  query: str
290
291
  schema: JSONSchema
291
-
292
+
292
293
 
293
294
  class ExtrasOptions(TypedDict, total=False):
294
295
  """A class representing additional extraction fields (e.g. links, images)"""
@@ -669,7 +670,7 @@ class AnswerResponse:
669
670
  citations (List[AnswerResult]): A list of citations used to generate the answer.
670
671
  """
671
672
 
672
- answer: str
673
+ answer: Union[str, dict[str, Any]]
673
674
  citations: List[AnswerResult]
674
675
 
675
676
  def __str__(self):
@@ -765,9 +766,9 @@ class AsyncStreamAnswerResponse:
765
766
  content = chunk["choices"][0]["delta"].get("content")
766
767
 
767
768
  if (
768
- "citations" in chunk
769
- and chunk["citations"]
770
- and chunk["citations"] != "null"
769
+ "citations" in chunk
770
+ and chunk["citations"]
771
+ and chunk["citations"] != "null"
771
772
  ):
772
773
  citations = [
773
774
  AnswerResult(**to_snake_case(s)) for s in chunk["citations"]
@@ -776,6 +777,7 @@ class AsyncStreamAnswerResponse:
776
777
  stream_chunk = StreamChunk(content=content, citations=citations)
777
778
  if stream_chunk.has_data():
778
779
  yield stream_chunk
780
+
779
781
  return generator()
780
782
 
781
783
  def close(self) -> None:
@@ -834,6 +836,7 @@ def nest_fields(original_dict: Dict, fields_to_nest: List[str], new_key: str):
834
836
 
835
837
  return original_dict
836
838
 
839
+
837
840
  @dataclass
838
841
  class ResearchTaskResponse:
839
842
  """A class representing the response for a research task.
@@ -889,10 +892,20 @@ class Exa:
889
892
  "API key must be provided as an argument or in EXA_API_KEY environment variable"
890
893
  )
891
894
  self.base_url = base_url
892
- self.headers = {"x-api-key": api_key, "User-Agent": user_agent, "Content-Type": "application/json"}
895
+ self.headers = {
896
+ "x-api-key": api_key,
897
+ "User-Agent": user_agent,
898
+ "Content-Type": "application/json",
899
+ }
893
900
  self.websets = WebsetsClient(self)
894
901
 
895
- def request(self, endpoint: str, data: Optional[Union[Dict[str, Any], str]] = None, method: str = "POST", params: Optional[Dict[str, Any]] = None) -> Union[Dict[str, Any], requests.Response]:
902
+ def request(
903
+ self,
904
+ endpoint: str,
905
+ data: Optional[Union[Dict[str, Any], str]] = None,
906
+ method: str = "POST",
907
+ params: Optional[Dict[str, Any]] = None,
908
+ ) -> Union[Dict[str, Any], requests.Response]:
896
909
  """Send a request to the Exa API, optionally streaming if data['stream'] is True.
897
910
 
898
911
  Args:
@@ -915,13 +928,13 @@ class Exa:
915
928
  else:
916
929
  # Otherwise, serialize the dictionary to JSON if it exists
917
930
  json_data = json.dumps(data, cls=ExaJSONEncoder) if data else None
918
-
931
+
919
932
  if data and data.get("stream"):
920
933
  res = requests.post(
921
- self.base_url + endpoint,
934
+ self.base_url + endpoint,
922
935
  data=json_data,
923
- headers=self.headers,
924
- stream=True
936
+ headers=self.headers,
937
+ stream=True,
925
938
  )
926
939
  return res
927
940
 
@@ -931,20 +944,14 @@ class Exa:
931
944
  )
932
945
  elif method.upper() == "POST":
933
946
  res = requests.post(
934
- self.base_url + endpoint,
935
- data=json_data,
936
- headers=self.headers
947
+ self.base_url + endpoint, data=json_data, headers=self.headers
937
948
  )
938
949
  elif method.upper() == "PATCH":
939
950
  res = requests.patch(
940
- self.base_url + endpoint,
941
- data=json_data,
942
- headers=self.headers
951
+ self.base_url + endpoint, data=json_data, headers=self.headers
943
952
  )
944
953
  elif method.upper() == "DELETE":
945
- res = requests.delete(
946
- self.base_url + endpoint, headers=self.headers
947
- )
954
+ res = requests.delete(self.base_url + endpoint, headers=self.headers)
948
955
  else:
949
956
  raise ValueError(f"Unsupported HTTP method: {method}")
950
957
 
@@ -1875,6 +1882,7 @@ class Exa:
1875
1882
  text: Optional[bool] = False,
1876
1883
  system_prompt: Optional[str] = None,
1877
1884
  model: Optional[Literal["exa", "exa-pro"]] = None,
1885
+ output_schema: Optional[dict[str, Any]] = None,
1878
1886
  ) -> Union[AnswerResponse, StreamAnswerResponse]: ...
1879
1887
 
1880
1888
  def answer(
@@ -1885,6 +1893,7 @@ class Exa:
1885
1893
  text: Optional[bool] = False,
1886
1894
  system_prompt: Optional[str] = None,
1887
1895
  model: Optional[Literal["exa", "exa-pro"]] = None,
1896
+ output_schema: Optional[dict[str, Any]] = None,
1888
1897
  ) -> Union[AnswerResponse, StreamAnswerResponse]:
1889
1898
  """Generate an answer to a query using Exa's search and LLM capabilities.
1890
1899
 
@@ -1893,6 +1902,7 @@ class Exa:
1893
1902
  text (bool, optional): Whether to include full text in the results. Defaults to False.
1894
1903
  system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
1895
1904
  model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
1905
+ output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
1896
1906
 
1897
1907
  Returns:
1898
1908
  AnswerResponse: An object containing the answer and citations.
@@ -1922,6 +1932,7 @@ class Exa:
1922
1932
  text: bool = False,
1923
1933
  system_prompt: Optional[str] = None,
1924
1934
  model: Optional[Literal["exa", "exa-pro"]] = None,
1935
+ output_schema: Optional[dict[str, Any]] = None,
1925
1936
  ) -> StreamAnswerResponse:
1926
1937
  """Generate a streaming answer response.
1927
1938
 
@@ -1930,7 +1941,7 @@ class Exa:
1930
1941
  text (bool): Whether to include full text in the results. Defaults to False.
1931
1942
  system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
1932
1943
  model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
1933
-
1944
+ output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
1934
1945
  Returns:
1935
1946
  StreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
1936
1947
  Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
@@ -1982,9 +1993,7 @@ class AsyncExa(Exa):
1982
1993
  # this may only be a
1983
1994
  if self._client is None:
1984
1995
  self._client = httpx.AsyncClient(
1985
- base_url=self.base_url,
1986
- headers=self.headers,
1987
- timeout=60
1996
+ base_url=self.base_url, headers=self.headers, timeout=60
1988
1997
  )
1989
1998
  return self._client
1990
1999
 
@@ -2004,15 +2013,14 @@ class AsyncExa(Exa):
2004
2013
  """
2005
2014
  if data.get("stream"):
2006
2015
  request = httpx.Request(
2007
- 'POST',
2008
- self.base_url + endpoint,
2009
- json=data,
2010
- headers=self.headers
2016
+ "POST", self.base_url + endpoint, json=data, headers=self.headers
2011
2017
  )
2012
2018
  res = await self.client.send(request, stream=True)
2013
2019
  return res
2014
2020
 
2015
- res = await self.client.post(self.base_url + endpoint, json=data, headers=self.headers)
2021
+ res = await self.client.post(
2022
+ self.base_url + endpoint, json=data, headers=self.headers
2023
+ )
2016
2024
  if res.status_code != 200:
2017
2025
  raise ValueError(
2018
2026
  f"Request failed with status code {res.status_code}: {res.text}"
@@ -2250,6 +2258,7 @@ class AsyncExa(Exa):
2250
2258
  text: Optional[bool] = False,
2251
2259
  system_prompt: Optional[str] = None,
2252
2260
  model: Optional[Literal["exa", "exa-pro"]] = None,
2261
+ output_schema: Optional[dict[str, Any]] = None,
2253
2262
  ) -> Union[AnswerResponse, StreamAnswerResponse]:
2254
2263
  """Generate an answer to a query using Exa's search and LLM capabilities.
2255
2264
 
@@ -2258,6 +2267,7 @@ class AsyncExa(Exa):
2258
2267
  text (bool, optional): Whether to include full text in the results. Defaults to False.
2259
2268
  system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
2260
2269
  model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
2270
+ output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
2261
2271
 
2262
2272
  Returns:
2263
2273
  AnswerResponse: An object containing the answer and citations.
@@ -2287,6 +2297,7 @@ class AsyncExa(Exa):
2287
2297
  text: bool = False,
2288
2298
  system_prompt: Optional[str] = None,
2289
2299
  model: Optional[Literal["exa", "exa-pro"]] = None,
2300
+ output_schema: Optional[dict[str, Any]] = None,
2290
2301
  ) -> AsyncStreamAnswerResponse:
2291
2302
  """Generate a streaming answer response.
2292
2303
 
@@ -2295,7 +2306,7 @@ class AsyncExa(Exa):
2295
2306
  text (bool): Whether to include full text in the results. Defaults to False.
2296
2307
  system_prompt (str, optional): A system prompt to guide the LLM's behavior when generating the answer.
2297
2308
  model (str, optional): The model to use for answering. Either "exa" or "exa-pro". Defaults to None.
2298
-
2309
+ output_schema (dict[str, Any], optional): JSON schema describing the desired answer structure.
2299
2310
  Returns:
2300
2311
  AsyncStreamAnswerResponse: An object that can be iterated over to retrieve (partial text, partial citations).
2301
2312
  Each iteration yields a tuple of (Optional[str], Optional[List[AnswerResult]]).
@@ -0,0 +1,42 @@
1
+ {% for decorator in decorators -%}
2
+ {{ decorator }}
3
+ {% endfor -%}
4
+ class {{ class_name }}({{ base_class }}):{% if comment is defined %} # {{ comment }}{% endif %}
5
+ {%- if description %}
6
+ """
7
+ {{ description | indent(4) }}
8
+ """
9
+ {%- endif %}
10
+ {%- if not fields and not description %}
11
+ pass
12
+ {%- endif %}
13
+ {%- if config %}
14
+ {%- filter indent(4) %}
15
+ {%- endfilter %}
16
+ {%- endif %}
17
+ {%- for field in fields -%}
18
+ {%- if field.name == "type" and field.field %}
19
+ type: Literal['{{ field.default }}']
20
+ {%- elif field.name == "object" and field.field %}
21
+ object: Literal['{{ field.default }}']
22
+ {%- elif not field.annotated and field.field %}
23
+ {{ field.name }}: {{ field.type_hint }} = {{ field.field }}
24
+ {%- else %}
25
+ {%- if field.annotated %}
26
+ {{ field.name }}: {{ field.annotated }}
27
+ {%- else %}
28
+ {{ field.name }}: {{ field.type_hint }}
29
+ {%- endif %}
30
+ {%- if not (field.required or (field.represented_default == 'None' and field.strip_default_none)) or field.data_type.is_optional
31
+ %} = {{ field.represented_default }}
32
+ {%- endif -%}
33
+ {%- endif %}
34
+ {%- if field.docstring %}
35
+ """
36
+ {{ field.docstring | indent(4) }}
37
+ """
38
+ {%- endif %}
39
+ {%- for method in methods -%}
40
+ {{ method }}
41
+ {%- endfor -%}
42
+ {%- endfor -%}
@@ -32,7 +32,7 @@ in-project = true
32
32
 
33
33
  [project]
34
34
  name = "exa-py"
35
- version = "1.13.0"
35
+ version = "1.13.1"
36
36
  description = "Python SDK for Exa API."
37
37
  readme = "README.md"
38
38
  requires-python = ">=3.9"
@@ -1,95 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: exa-py
3
- Version: 1.13.0
4
- Summary: Python SDK for Exa API.
5
- Home-page: https://github.com/exa-labs/exa-py
6
- Author: Exa
7
- Author-email: Exa AI <hello@exa.ai>
8
- License: MIT
9
- Requires-Python: >=3.9
10
- Description-Content-Type: text/markdown
11
- Requires-Dist: requests>=2.32.3
12
- Requires-Dist: typing-extensions>=4.12.2
13
- Requires-Dist: openai>=1.48
14
- Requires-Dist: pydantic>=2.10.6
15
- Requires-Dist: pytest-mock>=3.14.0
16
- Requires-Dist: httpx>=0.28.1
17
- Dynamic: author
18
- Dynamic: home-page
19
-
20
- # Exa
21
-
22
- Exa (formerly Metaphor) API in Python
23
-
24
- Note: This API is basically the same as `metaphor-python` but reflects new
25
- features associated with Metaphor's rename to Exa. New site is https://exa.ai
26
-
27
- ## Installation
28
-
29
- ```bash
30
- pip install exa_py
31
- ```
32
-
33
- ## Usage
34
-
35
- Import the package and initialize the Exa client with your API key:
36
-
37
- ```python
38
- from exa_py import Exa
39
-
40
- exa = Exa(api_key="your-api-key")
41
- ```
42
-
43
- ## Common requests
44
- ```python
45
-
46
- # basic search
47
- results = exa.search("This is a Exa query:")
48
-
49
- # keyword search (non-neural)
50
- results = exa.search("Google-style query", type="keyword")
51
-
52
- # search with date filters
53
- results = exa.search("This is a Exa query:", start_published_date="2019-01-01", end_published_date="2019-01-31")
54
-
55
- # search with domain filters
56
- results = exa.search("This is a Exa query:", include_domains=["www.cnn.com", "www.nytimes.com"])
57
-
58
- # search and get text contents
59
- results = exa.search_and_contents("This is a Exa query:")
60
-
61
- # search and get contents with contents options
62
- results = exa.search_and_contents("This is a Exa query:",
63
- text={"include_html_tags": True, "max_characters": 1000})
64
-
65
- # find similar documents
66
- results = exa.find_similar("https://example.com")
67
-
68
- # find similar excluding source domain
69
- results = exa.find_similar("https://example.com", exclude_source_domain=True)
70
-
71
- # find similar with contents
72
- results = exa.find_similar_and_contents("https://example.com", text=True)
73
-
74
- # get text contents
75
- results = exa.get_contents(["tesla.com"])
76
-
77
- # get contents with contents options
78
- results = exa.get_contents(["urls"],
79
- text={"include_html_tags": True, "max_characters": 1000})
80
-
81
- # basic answer
82
- response = exa.answer("This is a query to answer a question")
83
-
84
- # answer with full text, using the exa-pro model (sends 2 expanded quries to exa search)
85
- response = exa.answer("This is a query to answer a question", text=True, model="exa-pro")
86
-
87
- # answer with streaming
88
- response = exa.stream_answer("This is a query to answer:")
89
-
90
- # Print each chunk as it arrives when using the stream_answer method
91
- for chunk in response:
92
- print(chunk, end='', flush=True)
93
-
94
- ```
95
-
@@ -1,26 +0,0 @@
1
- README.md
2
- pyproject.toml
3
- setup.py
4
- exa_py/__init__.py
5
- exa_py/api.py
6
- exa_py/py.typed
7
- exa_py/utils.py
8
- exa_py.egg-info/PKG-INFO
9
- exa_py.egg-info/SOURCES.txt
10
- exa_py.egg-info/dependency_links.txt
11
- exa_py.egg-info/requires.txt
12
- exa_py.egg-info/top_level.txt
13
- exa_py/websets/__init__.py
14
- exa_py/websets/client.py
15
- exa_py/websets/types.py
16
- exa_py/websets/core/__init__.py
17
- exa_py/websets/core/base.py
18
- exa_py/websets/enrichments/__init__.py
19
- exa_py/websets/enrichments/client.py
20
- exa_py/websets/items/__init__.py
21
- exa_py/websets/items/client.py
22
- exa_py/websets/searches/__init__.py
23
- exa_py/websets/searches/client.py
24
- exa_py/websets/webhooks/__init__.py
25
- exa_py/websets/webhooks/client.py
26
- tests/test_websets.py
@@ -1,6 +0,0 @@
1
- requests>=2.32.3
2
- typing-extensions>=4.12.2
3
- openai>=1.48
4
- pydantic>=2.10.6
5
- pytest-mock>=3.14.0
6
- httpx>=0.28.1
@@ -1 +0,0 @@
1
- exa_py
exa_py-1.13.0/setup.cfg DELETED
@@ -1,4 +0,0 @@
1
- [egg_info]
2
- tag_build =
3
- tag_date = 0
4
-
exa_py-1.13.0/setup.py DELETED
@@ -1,26 +0,0 @@
1
- from setuptools import find_packages, setup
2
-
3
- setup(
4
- name="exa_py",
5
- version="1.13.0",
6
- description="Python SDK for Exa API.",
7
- long_description_content_type="text/markdown",
8
- long_description=open("README.md").read(),
9
- author="Exa",
10
- author_email="hello@exa.ai",
11
- package_data={"exa_py": ["py.typed"]},
12
- url="https://github.com/exa-labs/exa-py",
13
- packages=find_packages(),
14
- install_requires=["requests", "typing-extensions", "openai>=1.10.0"],
15
- classifiers=[
16
- "Development Status :: 5 - Production/Stable",
17
- "Intended Audience :: Developers",
18
- "License :: OSI Approved :: MIT License",
19
- "Typing :: Typed",
20
- "Programming Language :: Python :: 3.8",
21
- "Programming Language :: Python :: 3.9",
22
- "Programming Language :: Python :: 3.10",
23
- "Programming Language :: Python :: 3.11",
24
- "Programming Language :: Python :: 3.12",
25
- ],
26
- )
@@ -1,415 +0,0 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Dict, Any
4
-
5
- from pydantic import AnyUrl
6
- import pytest
7
- from unittest.mock import MagicMock
8
-
9
- from exa_py.websets.client import WebsetsClient
10
- from exa_py.websets.core.base import WebsetsBaseClient
11
- from exa_py.api import snake_to_camel, camel_to_snake, to_camel_case, to_snake_case
12
- from exa_py.websets.types import (
13
- UpdateWebsetRequest,
14
- CreateWebsetParameters,
15
- Search,
16
- CreateEnrichmentParameters,
17
- Format
18
- )
19
-
20
- # ============================================================================
21
- # Fixtures
22
- # ============================================================================
23
-
24
- @pytest.fixture
25
- def mock_response():
26
- """Factory fixture to create mock responses with custom data."""
27
- def _create_response(json_data: Dict[str, Any], status_code: int = 200) -> MagicMock:
28
- mock = MagicMock()
29
- mock.json_data = json_data
30
- mock.status_code = status_code
31
- mock.text = json.dumps(json_data)
32
- mock.json.return_value = json_data
33
- return mock
34
- return _create_response
35
-
36
- @pytest.fixture
37
- def parent_mock():
38
- """Create a mock parent client."""
39
- return MagicMock()
40
-
41
- @pytest.fixture
42
- def base_client(parent_mock):
43
- """Create a base client instance with mock parent."""
44
- return WebsetsBaseClient(parent_mock)
45
-
46
- @pytest.fixture
47
- def websets_client(parent_mock):
48
- """Create a WebsetsClient instance with mock parent."""
49
- return WebsetsClient(parent_mock)
50
-
51
- @pytest.fixture
52
- def items_client(websets_client):
53
- """Create an items client instance."""
54
- return websets_client.items
55
-
56
- # ============================================================================
57
- # Case Conversion Tests
58
- # ============================================================================
59
-
60
- @pytest.mark.parametrize("input,expected", [
61
- ("test_case", "testCase"),
62
- ("multiple_word_test", "multipleWordTest"),
63
- ("single", "single"),
64
- ("schema_", "$schema"),
65
- ("not_", "not"),
66
- ])
67
- def test_snake_to_camel(input, expected):
68
- """Test snake_case to camelCase conversion."""
69
- assert snake_to_camel(input) == expected
70
-
71
- @pytest.mark.parametrize("input,expected", [
72
- ("testCase", "test_case"),
73
- ("multipleWordTest", "multiple_word_test"),
74
- ("single", "single"),
75
- ])
76
- def test_camel_to_snake(input, expected):
77
- """Test camelCase to snake_case conversion."""
78
- assert camel_to_snake(input) == expected
79
-
80
- def test_dict_to_camel_case():
81
- """Test converting dictionary keys from snake_case to camelCase."""
82
- snake_dict = {
83
- "test_key": "value",
84
- "nested_dict": {
85
- "inner_key": 123,
86
- "another_key": True
87
- },
88
- "normal_key": None
89
- }
90
-
91
- expected = {
92
- "testKey": "value",
93
- "nestedDict": {
94
- "innerKey": 123,
95
- "anotherKey": True
96
- }
97
- }
98
-
99
- assert to_camel_case(snake_dict) == expected
100
-
101
- def test_dict_to_snake_case():
102
- """Test converting dictionary keys from camelCase to snake_case."""
103
- camel_dict = {
104
- "testKey": "value",
105
- "nestedDict": {
106
- "innerKey": 123,
107
- "anotherKey": True
108
- }
109
- }
110
-
111
- expected = {
112
- "test_key": "value",
113
- "nested_dict": {
114
- "inner_key": 123,
115
- "another_key": True
116
- }
117
- }
118
-
119
- assert to_snake_case(camel_dict) == expected
120
-
121
- def test_request_body_case_conversion(websets_client, parent_mock):
122
- """Test that request body fields are converted from snake_case to camelCase."""
123
- mock_response = {
124
- "id": "ws_123",
125
- "object": "webset",
126
- "status": "idle",
127
- "externalId": "test-id",
128
- "createdAt": "2023-01-01T00:00:00Z",
129
- "updatedAt": "2023-01-01T00:00:00Z",
130
- "searches": [],
131
- "enrichments": []
132
- }
133
-
134
- parent_mock.request.return_value = mock_response
135
-
136
- request = CreateWebsetParameters(
137
- external_id="test-id",
138
- search=Search(
139
- query="test query",
140
- count=10
141
- ),
142
- metadata={"snake_case_key": "value"}
143
- )
144
-
145
- websets_client.create(params=request)
146
-
147
- actual_data = parent_mock.request.call_args[1]['data']
148
- assert actual_data == {
149
- "search": {
150
- "query": "test query",
151
- "count": 10
152
- },
153
- "externalId": "test-id", # This should be camelCase in the request
154
- "metadata": {"snake_case_key": "value"} # metadata preserved original case
155
- }
156
-
157
- def test_response_case_conversion(websets_client, parent_mock):
158
- """Test that API response fields are converted from camelCase to snake_case."""
159
- mock_response = {
160
- "id": "ws_123",
161
- "object": "webset",
162
- "status": "idle",
163
- "externalId": "test-id",
164
- "createdAt": "2023-01-01T00:00:00Z",
165
- "updatedAt": "2023-01-01T00:00:00Z",
166
- "searches": [],
167
- "enrichments": []
168
- }
169
-
170
- parent_mock.request.return_value = mock_response
171
- result = websets_client.get(id="ws_123")
172
-
173
- assert result.external_id == "test-id"
174
- assert result.created_at == datetime.fromisoformat(mock_response["createdAt"])
175
-
176
-
177
- def test_metadata_case_preservation(websets_client, parent_mock):
178
- """Test that metadata keys preserve their original case format when sent to API."""
179
- test_cases = [
180
- {"snake_case_key": "value"},
181
- {"camelCaseKey": "value"},
182
- {"UPPER_CASE": "value"},
183
- {"mixed_Case_Key": "value"},
184
- ]
185
-
186
- for metadata in test_cases:
187
- mock_response = {
188
- "id": "ws_123",
189
- "object": "webset",
190
- "status": "idle",
191
- "metadata": metadata,
192
- "externalId": "test-id",
193
- "searches": [],
194
- "enrichments": [],
195
- "createdAt": "2023-01-01T00:00:00Z",
196
- "updatedAt": "2023-01-01T00:00:00Z"
197
- }
198
-
199
- parent_mock.request.return_value = mock_response
200
-
201
- request = UpdateWebsetRequest(metadata=metadata)
202
- result = websets_client.update(id="ws_123", params=request)
203
-
204
- actual_data = parent_mock.request.call_args[1]['data']
205
- assert actual_data["metadata"] == metadata
206
-
207
- assert result.metadata == metadata
208
-
209
- def test_nested_property_case_conversion(items_client, parent_mock):
210
- """Test that nested property fields follow proper case conversion rules."""
211
- mock_response = {
212
- "data": [{
213
- "id": "item_123",
214
- "object": "webset_item",
215
- "source": "search",
216
- "sourceId": "search_123",
217
- "websetId": "ws_123",
218
- "properties": {
219
- "type": "company",
220
- "url": "https://example.com",
221
- "description": "This is a test description",
222
- "company": {
223
- "name": "Example Company",
224
- "logoUrl": "https://example.com/logo.png",
225
- }
226
- },
227
- "evaluations": [],
228
- "enrichments": [],
229
- "createdAt": "2023-01-01T00:00:00Z",
230
- "updatedAt": "2023-01-01T00:00:00Z"
231
- }],
232
- "hasMore": False,
233
- "nextCursor": None
234
- }
235
-
236
- parent_mock.request.return_value = mock_response
237
- result = items_client.list(webset_id="ws_123", limit=10)
238
-
239
- assert result.data[0].properties.company.logo_url == AnyUrl("https://example.com/logo.png")
240
-
241
- def test_request_forwards_to_parent(base_client, parent_mock):
242
- """Test that BaseClient.request forwards to the parent client's request method."""
243
- parent_mock.request.return_value = {"key": "value"}
244
-
245
- result = base_client.request(
246
- "/test",
247
- data={"param": "value"},
248
- method="POST",
249
- params={"query": "test"}
250
- )
251
-
252
- # WebsetsBaseClient prepends '/websets/' to all endpoints
253
- parent_mock.request.assert_called_once_with(
254
- "/websets//test", # Double slash is preserved
255
- data={"param": "value"},
256
- method="POST",
257
- params={"query": "test"}
258
- )
259
-
260
- assert result == {"key": "value"}
261
-
262
- def test_format_validation_string_and_enum():
263
- """Test that the format field accepts both string and enum values."""
264
- # Test with enum value
265
- params1 = CreateEnrichmentParameters(
266
- description="Test description",
267
- format=Format.text
268
- )
269
- # Since use_enum_values=True in ExaBaseModel, the enum is converted to its string value
270
- assert params1.format == Format.text.value
271
-
272
- # Test with string value
273
- params2 = CreateEnrichmentParameters(
274
- description="Test description",
275
- format="text"
276
- )
277
- assert params2.format == "text"
278
-
279
- # Both should serialize to the same value
280
- assert params1.model_dump()["format"] == params2.model_dump()["format"]
281
-
282
- # Test with invalid string value
283
- with pytest.raises(ValueError):
284
- CreateEnrichmentParameters(
285
- description="Test description",
286
- format="invalid_format"
287
- )
288
-
289
- def test_dict_and_model_parameter_support(websets_client, parent_mock):
290
- """Test that client methods accept both dictionaries and model instances."""
291
- from exa_py.websets.types import CreateWebsetParameters, Format
292
-
293
- # Set up mock response
294
- mock_response = {
295
- "id": "ws_123",
296
- "object": "webset",
297
- "status": "idle",
298
- "externalId": None,
299
- "createdAt": "2023-01-01T00:00:00Z",
300
- "updatedAt": "2023-01-01T00:00:00Z",
301
- "searches": [],
302
- "enrichments": []
303
- }
304
- parent_mock.request.return_value = mock_response
305
-
306
- # Test with a model instance
307
- model_params = CreateWebsetParameters(
308
- search={
309
- "query": "Test query",
310
- "count": 10
311
- },
312
- enrichments=[{
313
- "description": "Test enrichment",
314
- "format": Format.text
315
- }]
316
- )
317
- model_result = websets_client.create(params=model_params)
318
-
319
- # Test with an equivalent dictionary
320
- dict_params = {
321
- "search": {
322
- "query": "Test query",
323
- "count": 10
324
- },
325
- "enrichments": [{
326
- "description": "Test enrichment",
327
- "format": "text"
328
- }]
329
- }
330
- dict_result = websets_client.create(params=dict_params)
331
-
332
- # Verify both calls produce the same result
333
- assert model_result.id == dict_result.id
334
- assert model_result.status == dict_result.status
335
-
336
- # Verify both calls were made (we don't need to verify exact equality of serialized data)
337
- assert len(parent_mock.request.call_args_list) == 2
338
-
339
- # Both serialization approaches should have the same functionality
340
- # The differences (Enum vs string, float vs int) are still valid when sent to the API
341
- model_call_data = parent_mock.request.call_args_list[0][1]['data']
342
- dict_call_data = parent_mock.request.call_args_list[1][1]['data']
343
-
344
- # Check that fields are functionally equivalent
345
- assert model_call_data['search']['query'] == dict_call_data['search']['query']
346
- assert float(model_call_data['search']['count']) == float(dict_call_data['search']['count'])
347
- assert model_call_data['enrichments'][0]['description'] == dict_call_data['enrichments'][0]['description']
348
-
349
- # For format, we should get either the enum's value or the string directly
350
- format1 = model_call_data['enrichments'][0]['format']
351
- format2 = dict_call_data['enrichments'][0]['format']
352
-
353
- # If format1 is an enum, get its value
354
- format1_value = format1.value if hasattr(format1, 'value') else format1
355
- # If format2 is an enum, get its value
356
- format2_value = format2.value if hasattr(format2, 'value') else format2
357
-
358
- assert format1_value == format2_value
359
-
360
- def test_webhook_attempts_list(websets_client, parent_mock):
361
- """Test that the WebhookAttemptsClient.list method works correctly."""
362
- # Mock response for webhook attempts
363
- mock_response = {
364
- "data": [{
365
- "id": "attempt_123",
366
- "object": "webhook_attempt",
367
- "eventId": "event_123",
368
- "eventType": "webset.created",
369
- "webhookId": "webhook_123",
370
- "url": "https://example.com/webhook",
371
- "successful": True,
372
- "responseHeaders": {"content-type": "application/json"},
373
- "responseBody": '{"status": "ok"}',
374
- "responseStatusCode": 200,
375
- "attempt": 1,
376
- "attemptedAt": "2023-01-01T00:00:00Z"
377
- }],
378
- "hasMore": False,
379
- "nextCursor": None
380
- }
381
-
382
- parent_mock.request.return_value = mock_response
383
-
384
- # Test without optional parameters
385
- result = websets_client.webhooks.attempts.list(webhook_id="webhook_123")
386
-
387
- parent_mock.request.assert_called_with(
388
- "/websets//v0/webhooks/webhook_123/attempts",
389
- params={},
390
- method="GET",
391
- data=None
392
- )
393
-
394
- assert len(result.data) == 1
395
- assert result.data[0].id == "attempt_123"
396
- assert result.data[0].event_type == "webset.created"
397
- assert result.data[0].successful is True
398
-
399
- # Reset mock and test with all optional parameters
400
- parent_mock.request.reset_mock()
401
- parent_mock.request.return_value = mock_response
402
-
403
- result = websets_client.webhooks.attempts.list(
404
- webhook_id="webhook_123",
405
- cursor="cursor_value",
406
- limit=10,
407
- event_type="webset.created"
408
- )
409
-
410
- parent_mock.request.assert_called_with(
411
- "/websets//v0/webhooks/webhook_123/attempts",
412
- params={"cursor": "cursor_value", "limit": 10, "eventType": "webset.created"},
413
- method="GET",
414
- data=None
415
- )
File without changes
File without changes
File without changes
File without changes
File without changes