exa-py 1.14.2__tar.gz → 1.14.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of exa-py might be problematic. Click here for more details.
- {exa_py-1.14.2 → exa_py-1.14.3}/PKG-INFO +13 -18
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/api.py +1 -1
- exa_py-1.14.3/exa_py.egg-info/PKG-INFO +129 -0
- exa_py-1.14.3/exa_py.egg-info/SOURCES.txt +34 -0
- exa_py-1.14.3/exa_py.egg-info/dependency_links.txt +1 -0
- exa_py-1.14.3/exa_py.egg-info/requires.txt +6 -0
- exa_py-1.14.3/exa_py.egg-info/top_level.txt +1 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/pyproject.toml +1 -1
- exa_py-1.14.3/setup.cfg +4 -0
- exa_py-1.14.3/setup.py +26 -0
- exa_py-1.14.3/tests/test_monitors.py +502 -0
- exa_py-1.14.3/tests/test_websets.py +418 -0
- exa_py-1.14.2/exa_py/websets/_generator/pydantic/BaseModel.jinja2 +0 -42
- {exa_py-1.14.2 → exa_py-1.14.3}/README.md +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/py.typed +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/research/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/research/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/research/models.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/utils.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/core/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/core/base.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/enrichments/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/enrichments/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/items/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/items/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/monitors/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/monitors/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/monitors/runs/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/monitors/runs/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/searches/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/searches/client.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/types.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/webhooks/__init__.py +0 -0
- {exa_py-1.14.2 → exa_py-1.14.3}/exa_py/websets/webhooks/client.py +0 -0
|
@@ -1,25 +1,21 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: exa-py
|
|
3
|
-
Version: 1.14.
|
|
3
|
+
Version: 1.14.3
|
|
4
4
|
Summary: Python SDK for Exa API.
|
|
5
|
+
Home-page: https://github.com/exa-labs/exa-py
|
|
6
|
+
Author: Exa
|
|
7
|
+
Author-email: Exa AI <hello@exa.ai>
|
|
5
8
|
License: MIT
|
|
6
|
-
Author: Exa AI
|
|
7
|
-
Author-email: hello@exa.ai
|
|
8
9
|
Requires-Python: >=3.9
|
|
9
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
-
Requires-Dist: httpx (>=0.28.1)
|
|
17
|
-
Requires-Dist: openai (>=1.48)
|
|
18
|
-
Requires-Dist: pydantic (>=2.10.6)
|
|
19
|
-
Requires-Dist: pytest-mock (>=3.14.0)
|
|
20
|
-
Requires-Dist: requests (>=2.32.3)
|
|
21
|
-
Requires-Dist: typing-extensions (>=4.12.2)
|
|
22
10
|
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: requests>=2.32.3
|
|
12
|
+
Requires-Dist: typing-extensions>=4.12.2
|
|
13
|
+
Requires-Dist: openai>=1.48
|
|
14
|
+
Requires-Dist: pydantic>=2.10.6
|
|
15
|
+
Requires-Dist: pytest-mock>=3.14.0
|
|
16
|
+
Requires-Dist: httpx>=0.28.1
|
|
17
|
+
Dynamic: author
|
|
18
|
+
Dynamic: home-page
|
|
23
19
|
|
|
24
20
|
# Exa
|
|
25
21
|
|
|
@@ -131,4 +127,3 @@ exa = Exa(api_key="your-api-key")
|
|
|
131
127
|
output_schema=OUTPUT_SCHEMA,
|
|
132
128
|
)
|
|
133
129
|
```
|
|
134
|
-
|
|
@@ -159,7 +159,7 @@ FIND_SIMILAR_OPTIONS_TYPES = {
|
|
|
159
159
|
}
|
|
160
160
|
|
|
161
161
|
# the livecrawl options
|
|
162
|
-
LIVECRAWL_OPTIONS = Literal["always", "fallback", "never", "auto"]
|
|
162
|
+
LIVECRAWL_OPTIONS = Literal["always", "fallback", "never", "auto", "preferred"]
|
|
163
163
|
|
|
164
164
|
CONTENTS_OPTIONS_TYPES = {
|
|
165
165
|
"urls": [list],
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: exa-py
|
|
3
|
+
Version: 1.14.3
|
|
4
|
+
Summary: Python SDK for Exa API.
|
|
5
|
+
Home-page: https://github.com/exa-labs/exa-py
|
|
6
|
+
Author: Exa
|
|
7
|
+
Author-email: Exa AI <hello@exa.ai>
|
|
8
|
+
License: MIT
|
|
9
|
+
Requires-Python: >=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: requests>=2.32.3
|
|
12
|
+
Requires-Dist: typing-extensions>=4.12.2
|
|
13
|
+
Requires-Dist: openai>=1.48
|
|
14
|
+
Requires-Dist: pydantic>=2.10.6
|
|
15
|
+
Requires-Dist: pytest-mock>=3.14.0
|
|
16
|
+
Requires-Dist: httpx>=0.28.1
|
|
17
|
+
Dynamic: author
|
|
18
|
+
Dynamic: home-page
|
|
19
|
+
|
|
20
|
+
# Exa
|
|
21
|
+
|
|
22
|
+
Exa (formerly Metaphor) API in Python
|
|
23
|
+
|
|
24
|
+
Note: This API is basically the same as `metaphor-python` but reflects new
|
|
25
|
+
features associated with Metaphor's rename to Exa. New site is https://exa.ai
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
pip install exa_py
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
Import the package and initialize the Exa client with your API key:
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
from exa_py import Exa
|
|
39
|
+
|
|
40
|
+
exa = Exa(api_key="your-api-key")
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Common requests
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
|
|
47
|
+
# basic search
|
|
48
|
+
results = exa.search("This is a Exa query:")
|
|
49
|
+
|
|
50
|
+
# keyword search (non-neural)
|
|
51
|
+
results = exa.search("Google-style query", type="keyword")
|
|
52
|
+
|
|
53
|
+
# search with date filters
|
|
54
|
+
results = exa.search("This is a Exa query:", start_published_date="2019-01-01", end_published_date="2019-01-31")
|
|
55
|
+
|
|
56
|
+
# search with domain filters
|
|
57
|
+
results = exa.search("This is a Exa query:", include_domains=["www.cnn.com", "www.nytimes.com"])
|
|
58
|
+
|
|
59
|
+
# search and get text contents
|
|
60
|
+
results = exa.search_and_contents("This is a Exa query:")
|
|
61
|
+
|
|
62
|
+
# search and get contents with contents options
|
|
63
|
+
results = exa.search_and_contents("This is a Exa query:",
|
|
64
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
65
|
+
|
|
66
|
+
# find similar documents
|
|
67
|
+
results = exa.find_similar("https://example.com")
|
|
68
|
+
|
|
69
|
+
# find similar excluding source domain
|
|
70
|
+
results = exa.find_similar("https://example.com", exclude_source_domain=True)
|
|
71
|
+
|
|
72
|
+
# find similar with contents
|
|
73
|
+
results = exa.find_similar_and_contents("https://example.com", text=True)
|
|
74
|
+
|
|
75
|
+
# get text contents
|
|
76
|
+
results = exa.get_contents(["tesla.com"])
|
|
77
|
+
|
|
78
|
+
# get contents with contents options
|
|
79
|
+
results = exa.get_contents(["urls"],
|
|
80
|
+
text={"include_html_tags": True, "max_characters": 1000})
|
|
81
|
+
|
|
82
|
+
# basic answer
|
|
83
|
+
response = exa.answer("This is a query to answer a question")
|
|
84
|
+
|
|
85
|
+
# answer with full text, using the exa-pro model (sends 2 expanded quries to exa search)
|
|
86
|
+
response = exa.answer("This is a query to answer a question", text=True, model="exa-pro")
|
|
87
|
+
|
|
88
|
+
# answer with streaming
|
|
89
|
+
response = exa.stream_answer("This is a query to answer:")
|
|
90
|
+
|
|
91
|
+
# Print each chunk as it arrives when using the stream_answer method
|
|
92
|
+
for chunk in response:
|
|
93
|
+
print(chunk, end='', flush=True)
|
|
94
|
+
|
|
95
|
+
# research task example – answer a question with citations
|
|
96
|
+
# Example prompt & schema inspired by the TypeScript example.
|
|
97
|
+
QUESTION = (
|
|
98
|
+
"Summarize the history of San Francisco highlighting one or two major events "
|
|
99
|
+
"for each decade from 1850 to 1950"
|
|
100
|
+
)
|
|
101
|
+
OUTPUT_SCHEMA: Dict[str, Any] = {
|
|
102
|
+
"type": "object",
|
|
103
|
+
"required": ["timeline"],
|
|
104
|
+
"properties": {
|
|
105
|
+
"timeline": {
|
|
106
|
+
"type": "array",
|
|
107
|
+
"items": {
|
|
108
|
+
"type": "object",
|
|
109
|
+
"required": ["decade", "notableEvents"],
|
|
110
|
+
"properties": {
|
|
111
|
+
"decade": {
|
|
112
|
+
"type": "string",
|
|
113
|
+
"description": 'Decade label e.g. "1850s"',
|
|
114
|
+
},
|
|
115
|
+
"notableEvents": {
|
|
116
|
+
"type": "string",
|
|
117
|
+
"description": "A summary of notable events.",
|
|
118
|
+
},
|
|
119
|
+
},
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
}
|
|
124
|
+
resp = exa.research.create_task(
|
|
125
|
+
instructions=QUESTION,
|
|
126
|
+
model="exa-research",
|
|
127
|
+
output_schema=OUTPUT_SCHEMA,
|
|
128
|
+
)
|
|
129
|
+
```
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
setup.py
|
|
4
|
+
exa_py/__init__.py
|
|
5
|
+
exa_py/api.py
|
|
6
|
+
exa_py/py.typed
|
|
7
|
+
exa_py/utils.py
|
|
8
|
+
exa_py.egg-info/PKG-INFO
|
|
9
|
+
exa_py.egg-info/SOURCES.txt
|
|
10
|
+
exa_py.egg-info/dependency_links.txt
|
|
11
|
+
exa_py.egg-info/requires.txt
|
|
12
|
+
exa_py.egg-info/top_level.txt
|
|
13
|
+
exa_py/research/__init__.py
|
|
14
|
+
exa_py/research/client.py
|
|
15
|
+
exa_py/research/models.py
|
|
16
|
+
exa_py/websets/__init__.py
|
|
17
|
+
exa_py/websets/client.py
|
|
18
|
+
exa_py/websets/types.py
|
|
19
|
+
exa_py/websets/core/__init__.py
|
|
20
|
+
exa_py/websets/core/base.py
|
|
21
|
+
exa_py/websets/enrichments/__init__.py
|
|
22
|
+
exa_py/websets/enrichments/client.py
|
|
23
|
+
exa_py/websets/items/__init__.py
|
|
24
|
+
exa_py/websets/items/client.py
|
|
25
|
+
exa_py/websets/monitors/__init__.py
|
|
26
|
+
exa_py/websets/monitors/client.py
|
|
27
|
+
exa_py/websets/monitors/runs/__init__.py
|
|
28
|
+
exa_py/websets/monitors/runs/client.py
|
|
29
|
+
exa_py/websets/searches/__init__.py
|
|
30
|
+
exa_py/websets/searches/client.py
|
|
31
|
+
exa_py/websets/webhooks/__init__.py
|
|
32
|
+
exa_py/websets/webhooks/client.py
|
|
33
|
+
tests/test_monitors.py
|
|
34
|
+
tests/test_websets.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
exa_py
|
exa_py-1.14.3/setup.cfg
ADDED
exa_py-1.14.3/setup.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from setuptools import find_packages, setup
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
name="exa_py",
|
|
5
|
+
version="1.12.4",
|
|
6
|
+
description="Python SDK for Exa API.",
|
|
7
|
+
long_description_content_type="text/markdown",
|
|
8
|
+
long_description=open("README.md").read(),
|
|
9
|
+
author="Exa",
|
|
10
|
+
author_email="hello@exa.ai",
|
|
11
|
+
package_data={"exa_py": ["py.typed"]},
|
|
12
|
+
url="https://github.com/exa-labs/exa-py",
|
|
13
|
+
packages=find_packages(),
|
|
14
|
+
install_requires=["requests", "typing-extensions", "openai>=1.10.0"],
|
|
15
|
+
classifiers=[
|
|
16
|
+
"Development Status :: 5 - Production/Stable",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Typing :: Typed",
|
|
20
|
+
"Programming Language :: Python :: 3.8",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
"Programming Language :: Python :: 3.12",
|
|
25
|
+
],
|
|
26
|
+
)
|
|
@@ -0,0 +1,502 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from unittest.mock import MagicMock
|
|
7
|
+
|
|
8
|
+
from exa_py.websets.monitors.client import MonitorsClient
|
|
9
|
+
from exa_py.websets.monitors.runs.client import MonitorRunsClient
|
|
10
|
+
from exa_py.websets.types import (
|
|
11
|
+
CreateMonitorParameters,
|
|
12
|
+
UpdateMonitor,
|
|
13
|
+
MonitorBehaviorSearch,
|
|
14
|
+
MonitorBehaviorRefresh,
|
|
15
|
+
MonitorBehaviorSearchConfig,
|
|
16
|
+
MonitorRefreshBehaviorEnrichmentsConfig,
|
|
17
|
+
MonitorRefreshBehaviorContentsConfig,
|
|
18
|
+
MonitorCadence,
|
|
19
|
+
MonitorStatus,
|
|
20
|
+
WebsetCompanyEntity,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# ============================================================================
|
|
24
|
+
# Fixtures
|
|
25
|
+
# ============================================================================
|
|
26
|
+
|
|
27
|
+
@pytest.fixture
|
|
28
|
+
def mock_response():
|
|
29
|
+
"""Factory fixture to create mock responses with custom data."""
|
|
30
|
+
def _create_response(json_data: Dict[str, Any], status_code: int = 200) -> MagicMock:
|
|
31
|
+
mock = MagicMock()
|
|
32
|
+
mock.json_data = json_data
|
|
33
|
+
mock.status_code = status_code
|
|
34
|
+
mock.text = json.dumps(json_data)
|
|
35
|
+
mock.json.return_value = json_data
|
|
36
|
+
return mock
|
|
37
|
+
return _create_response
|
|
38
|
+
|
|
39
|
+
@pytest.fixture
|
|
40
|
+
def parent_mock():
|
|
41
|
+
"""Create a mock parent client."""
|
|
42
|
+
return MagicMock()
|
|
43
|
+
|
|
44
|
+
@pytest.fixture
|
|
45
|
+
def monitors_client(parent_mock):
|
|
46
|
+
"""Create a MonitorsClient instance with mock parent."""
|
|
47
|
+
return MonitorsClient(parent_mock)
|
|
48
|
+
|
|
49
|
+
@pytest.fixture
|
|
50
|
+
def runs_client(parent_mock):
|
|
51
|
+
"""Create a MonitorRunsClient instance with mock parent."""
|
|
52
|
+
return MonitorRunsClient(parent_mock)
|
|
53
|
+
|
|
54
|
+
@pytest.fixture
|
|
55
|
+
def sample_monitor_response():
|
|
56
|
+
"""Sample monitor response data."""
|
|
57
|
+
return {
|
|
58
|
+
"id": "monitor_123",
|
|
59
|
+
"object": "monitor",
|
|
60
|
+
"status": "enabled",
|
|
61
|
+
"websetId": "ws_123",
|
|
62
|
+
"cadence": {
|
|
63
|
+
"cron": "0 9 * * *", # Daily at 9:00 AM
|
|
64
|
+
"timezone": "Etc/UTC"
|
|
65
|
+
},
|
|
66
|
+
"behavior": {
|
|
67
|
+
"type": "search",
|
|
68
|
+
"config": {
|
|
69
|
+
"query": "AI startups",
|
|
70
|
+
"criteria": [{"description": "Must be AI focused"}],
|
|
71
|
+
"entity": {"type": "company"},
|
|
72
|
+
"count": 10,
|
|
73
|
+
"behavior": "append"
|
|
74
|
+
}
|
|
75
|
+
},
|
|
76
|
+
"lastRun": {
|
|
77
|
+
"id": "run_123",
|
|
78
|
+
"object": "monitor_run",
|
|
79
|
+
"status": "completed",
|
|
80
|
+
"monitorId": "monitor_123",
|
|
81
|
+
"type": "search",
|
|
82
|
+
"completedAt": "2023-01-01T10:00:00Z",
|
|
83
|
+
"failedAt": None,
|
|
84
|
+
"canceledAt": None,
|
|
85
|
+
"createdAt": "2023-01-01T09:00:00Z",
|
|
86
|
+
"updatedAt": "2023-01-01T10:00:00Z"
|
|
87
|
+
},
|
|
88
|
+
"nextRunAt": "2023-01-02T09:00:00Z",
|
|
89
|
+
"metadata": {"key": "value"},
|
|
90
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
91
|
+
"updatedAt": "2023-01-01T00:00:00Z"
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
@pytest.fixture
|
|
95
|
+
def sample_monitor_run_response():
|
|
96
|
+
"""Sample monitor run response data."""
|
|
97
|
+
return {
|
|
98
|
+
"id": "run_123",
|
|
99
|
+
"object": "monitor_run",
|
|
100
|
+
"status": "completed",
|
|
101
|
+
"monitorId": "monitor_123",
|
|
102
|
+
"type": "search",
|
|
103
|
+
"completedAt": "2023-01-01T10:00:00Z",
|
|
104
|
+
"failedAt": None,
|
|
105
|
+
"canceledAt": None,
|
|
106
|
+
"createdAt": "2023-01-01T09:00:00Z",
|
|
107
|
+
"updatedAt": "2023-01-01T10:00:00Z"
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# ============================================================================
|
|
111
|
+
# MonitorsClient Tests
|
|
112
|
+
# ============================================================================
|
|
113
|
+
|
|
114
|
+
def test_monitors_client_has_runs_client(monitors_client):
|
|
115
|
+
"""Test that MonitorsClient properly initializes with a runs client."""
|
|
116
|
+
assert hasattr(monitors_client, 'runs')
|
|
117
|
+
assert isinstance(monitors_client.runs, MonitorRunsClient)
|
|
118
|
+
|
|
119
|
+
def test_create_monitor_with_search_behavior(monitors_client, parent_mock, sample_monitor_response):
|
|
120
|
+
"""Test creating a monitor with search behavior."""
|
|
121
|
+
parent_mock.request.return_value = sample_monitor_response
|
|
122
|
+
|
|
123
|
+
# Create parameters with search behavior
|
|
124
|
+
params = CreateMonitorParameters(
|
|
125
|
+
webset_id="ws_123",
|
|
126
|
+
cadence=MonitorCadence(
|
|
127
|
+
cron="0 9 * * *", # Daily at 9:00 AM
|
|
128
|
+
timezone="America/New_York"
|
|
129
|
+
),
|
|
130
|
+
behavior=MonitorBehaviorSearch(
|
|
131
|
+
type="search",
|
|
132
|
+
config=MonitorBehaviorSearchConfig(
|
|
133
|
+
query="AI startups",
|
|
134
|
+
criteria=[{"description": "Must be AI focused"}],
|
|
135
|
+
entity=WebsetCompanyEntity(type="company"),
|
|
136
|
+
count=10
|
|
137
|
+
)
|
|
138
|
+
),
|
|
139
|
+
metadata={"environment": "test"}
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
result = monitors_client.create(params)
|
|
143
|
+
|
|
144
|
+
# Verify the request was made correctly
|
|
145
|
+
parent_mock.request.assert_called_once_with(
|
|
146
|
+
"/websets/v0/monitors",
|
|
147
|
+
data={
|
|
148
|
+
'websetId': 'ws_123',
|
|
149
|
+
'cadence': {'cron': '0 9 * * *', 'timezone': 'America/New_York'},
|
|
150
|
+
'behavior': {
|
|
151
|
+
'type': 'search',
|
|
152
|
+
'config': {
|
|
153
|
+
'query': 'AI startups',
|
|
154
|
+
'criteria': [{'description': 'Must be AI focused'}],
|
|
155
|
+
'entity': {'type': 'company'},
|
|
156
|
+
'count': 10,
|
|
157
|
+
'behavior': 'append'
|
|
158
|
+
}
|
|
159
|
+
},
|
|
160
|
+
'metadata': {'environment': 'test'}
|
|
161
|
+
},
|
|
162
|
+
method="POST",
|
|
163
|
+
params=None
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
# Verify the response
|
|
167
|
+
assert result.id == "monitor_123"
|
|
168
|
+
assert result.webset_id == "ws_123"
|
|
169
|
+
assert result.status == "enabled"
|
|
170
|
+
|
|
171
|
+
def test_create_monitor_with_refresh_behavior(monitors_client, parent_mock, sample_monitor_response):
|
|
172
|
+
"""Test creating a monitor with refresh behavior."""
|
|
173
|
+
# Modify response for refresh behavior
|
|
174
|
+
refresh_response = sample_monitor_response.copy()
|
|
175
|
+
refresh_response["behavior"] = {
|
|
176
|
+
"type": "refresh",
|
|
177
|
+
"config": {
|
|
178
|
+
"target": "enrichments",
|
|
179
|
+
"enrichments": {"ids": ["enrich_123"]}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
parent_mock.request.return_value = refresh_response
|
|
183
|
+
|
|
184
|
+
params = CreateMonitorParameters(
|
|
185
|
+
webset_id="ws_123",
|
|
186
|
+
cadence=MonitorCadence(cron="0 9 * * 1"), # Weekly on Monday at 9:00 AM
|
|
187
|
+
behavior=MonitorBehaviorRefresh(
|
|
188
|
+
type="refresh",
|
|
189
|
+
config=MonitorRefreshBehaviorEnrichmentsConfig(
|
|
190
|
+
target="enrichments",
|
|
191
|
+
enrichments={"ids": ["enrich_123"]}
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
result = monitors_client.create(params)
|
|
197
|
+
|
|
198
|
+
assert result.id == "monitor_123"
|
|
199
|
+
assert result.behavior.type == "refresh"
|
|
200
|
+
|
|
201
|
+
def test_create_monitor_with_contents_refresh(monitors_client, parent_mock, sample_monitor_response):
|
|
202
|
+
"""Test creating a monitor with contents refresh behavior."""
|
|
203
|
+
# Modify response for contents refresh
|
|
204
|
+
refresh_response = sample_monitor_response.copy()
|
|
205
|
+
refresh_response["behavior"] = {
|
|
206
|
+
"type": "refresh",
|
|
207
|
+
"config": {
|
|
208
|
+
"target": "contents"
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
parent_mock.request.return_value = refresh_response
|
|
212
|
+
|
|
213
|
+
params = CreateMonitorParameters(
|
|
214
|
+
webset_id="ws_123",
|
|
215
|
+
cadence=MonitorCadence(cron="0 9 * * 1"),
|
|
216
|
+
behavior=MonitorBehaviorRefresh(
|
|
217
|
+
type="refresh",
|
|
218
|
+
config=MonitorRefreshBehaviorContentsConfig(target="contents")
|
|
219
|
+
)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
result = monitors_client.create(params)
|
|
223
|
+
|
|
224
|
+
assert result.id == "monitor_123"
|
|
225
|
+
assert result.behavior.type == "refresh"
|
|
226
|
+
assert result.behavior.config.target == "contents"
|
|
227
|
+
|
|
228
|
+
def test_create_monitor_with_dict_params(monitors_client, parent_mock, sample_monitor_response):
|
|
229
|
+
"""Test creating a monitor with dictionary parameters."""
|
|
230
|
+
parent_mock.request.return_value = sample_monitor_response
|
|
231
|
+
|
|
232
|
+
params = {
|
|
233
|
+
"websetId": "ws_123",
|
|
234
|
+
"cadence": {
|
|
235
|
+
"cron": "0 9 * * *",
|
|
236
|
+
"timezone": "America/New_York"
|
|
237
|
+
},
|
|
238
|
+
"behavior": {
|
|
239
|
+
"type": "search",
|
|
240
|
+
"config": {
|
|
241
|
+
"query": "AI startups",
|
|
242
|
+
"criteria": [{"description": "Must be AI focused"}],
|
|
243
|
+
"entity": {"type": "company"},
|
|
244
|
+
"count": 10
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
result = monitors_client.create(params)
|
|
250
|
+
|
|
251
|
+
assert result.id == "monitor_123"
|
|
252
|
+
assert result.webset_id == "ws_123"
|
|
253
|
+
|
|
254
|
+
def test_get_monitor(monitors_client, parent_mock, sample_monitor_response):
|
|
255
|
+
"""Test getting a specific monitor."""
|
|
256
|
+
parent_mock.request.return_value = sample_monitor_response
|
|
257
|
+
|
|
258
|
+
result = monitors_client.get("monitor_123")
|
|
259
|
+
|
|
260
|
+
parent_mock.request.assert_called_once_with(
|
|
261
|
+
"/websets/v0/monitors/monitor_123",
|
|
262
|
+
data=None,
|
|
263
|
+
method="GET",
|
|
264
|
+
params=None
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
assert result.id == "monitor_123"
|
|
268
|
+
assert result.status == "enabled"
|
|
269
|
+
|
|
270
|
+
def test_list_monitors(monitors_client, parent_mock):
|
|
271
|
+
"""Test listing monitors with parameters."""
|
|
272
|
+
response_data = {
|
|
273
|
+
"data": [
|
|
274
|
+
{
|
|
275
|
+
"id": "monitor_123",
|
|
276
|
+
"object": "monitor",
|
|
277
|
+
"status": "enabled",
|
|
278
|
+
"websetId": "ws_123",
|
|
279
|
+
"cadence": {"cron": "0 9 * * *", "timezone": "Etc/UTC"},
|
|
280
|
+
"behavior": {
|
|
281
|
+
"type": "search",
|
|
282
|
+
"config": {
|
|
283
|
+
"query": "AI startups",
|
|
284
|
+
"criteria": [{"description": "Must be AI focused"}],
|
|
285
|
+
"entity": {"type": "company"},
|
|
286
|
+
"count": 10,
|
|
287
|
+
"behavior": "append"
|
|
288
|
+
}
|
|
289
|
+
},
|
|
290
|
+
"metadata": {},
|
|
291
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
292
|
+
"updatedAt": "2023-01-01T00:00:00Z"
|
|
293
|
+
}
|
|
294
|
+
],
|
|
295
|
+
"hasMore": False,
|
|
296
|
+
"nextCursor": None
|
|
297
|
+
}
|
|
298
|
+
parent_mock.request.return_value = response_data
|
|
299
|
+
|
|
300
|
+
result = monitors_client.list(cursor="cursor_123", limit=10, webset_id="ws_123")
|
|
301
|
+
|
|
302
|
+
parent_mock.request.assert_called_once_with(
|
|
303
|
+
"/websets/v0/monitors",
|
|
304
|
+
data=None,
|
|
305
|
+
method="GET",
|
|
306
|
+
params={"cursor": "cursor_123", "limit": 10, "websetId": "ws_123"}
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
assert len(result.data) == 1
|
|
310
|
+
assert result.data[0].id == "monitor_123"
|
|
311
|
+
assert not result.has_more
|
|
312
|
+
|
|
313
|
+
def test_list_monitors_no_params(monitors_client, parent_mock):
|
|
314
|
+
"""Test listing monitors without parameters."""
|
|
315
|
+
response_data = {
|
|
316
|
+
"data": [],
|
|
317
|
+
"hasMore": False,
|
|
318
|
+
"nextCursor": None
|
|
319
|
+
}
|
|
320
|
+
parent_mock.request.return_value = response_data
|
|
321
|
+
|
|
322
|
+
result = monitors_client.list()
|
|
323
|
+
|
|
324
|
+
parent_mock.request.assert_called_once_with(
|
|
325
|
+
"/websets/v0/monitors",
|
|
326
|
+
data=None,
|
|
327
|
+
method="GET",
|
|
328
|
+
params={}
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
assert len(result.data) == 0
|
|
332
|
+
|
|
333
|
+
def test_update_monitor(monitors_client, parent_mock, sample_monitor_response):
|
|
334
|
+
"""Test updating a monitor."""
|
|
335
|
+
updated_response = sample_monitor_response.copy()
|
|
336
|
+
updated_response["status"] = "disabled"
|
|
337
|
+
parent_mock.request.return_value = updated_response
|
|
338
|
+
|
|
339
|
+
params = UpdateMonitor(status=MonitorStatus.disabled, metadata={"updated": "true"})
|
|
340
|
+
|
|
341
|
+
result = monitors_client.update("monitor_123", params)
|
|
342
|
+
|
|
343
|
+
parent_mock.request.assert_called_once_with(
|
|
344
|
+
"/websets/v0/monitors/monitor_123",
|
|
345
|
+
data={"status": "disabled", "metadata": {"updated": "true"}},
|
|
346
|
+
method="PATCH",
|
|
347
|
+
params=None
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
assert result.id == "monitor_123"
|
|
351
|
+
assert result.status == "disabled"
|
|
352
|
+
|
|
353
|
+
def test_update_monitor_with_dict(monitors_client, parent_mock, sample_monitor_response):
|
|
354
|
+
"""Test updating a monitor with dictionary parameters."""
|
|
355
|
+
updated_response = sample_monitor_response.copy()
|
|
356
|
+
updated_response["status"] = "disabled"
|
|
357
|
+
parent_mock.request.return_value = updated_response
|
|
358
|
+
|
|
359
|
+
params = {"status": "disabled", "metadata": {"updated": "true"}}
|
|
360
|
+
|
|
361
|
+
result = monitors_client.update("monitor_123", params)
|
|
362
|
+
|
|
363
|
+
assert result.id == "monitor_123"
|
|
364
|
+
assert result.status == "disabled"
|
|
365
|
+
|
|
366
|
+
def test_delete_monitor(monitors_client, parent_mock, sample_monitor_response):
|
|
367
|
+
"""Test deleting a monitor."""
|
|
368
|
+
deleted_response = sample_monitor_response.copy()
|
|
369
|
+
deleted_response["status"] = "disabled"
|
|
370
|
+
parent_mock.request.return_value = deleted_response
|
|
371
|
+
|
|
372
|
+
result = monitors_client.delete("monitor_123")
|
|
373
|
+
|
|
374
|
+
parent_mock.request.assert_called_once_with(
|
|
375
|
+
"/websets/v0/monitors/monitor_123",
|
|
376
|
+
data=None,
|
|
377
|
+
method="DELETE",
|
|
378
|
+
params=None
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
assert result.id == "monitor_123"
|
|
382
|
+
|
|
383
|
+
# ============================================================================
|
|
384
|
+
# MonitorRunsClient Tests
|
|
385
|
+
# ============================================================================
|
|
386
|
+
|
|
387
|
+
def test_list_monitor_runs(runs_client, parent_mock, sample_monitor_run_response):
|
|
388
|
+
"""Test listing monitor runs."""
|
|
389
|
+
response_data = {
|
|
390
|
+
"data": [sample_monitor_run_response],
|
|
391
|
+
"hasMore": False,
|
|
392
|
+
"nextCursor": None
|
|
393
|
+
}
|
|
394
|
+
parent_mock.request.return_value = response_data
|
|
395
|
+
|
|
396
|
+
result = runs_client.list("monitor_123")
|
|
397
|
+
|
|
398
|
+
parent_mock.request.assert_called_once_with(
|
|
399
|
+
"/websets/v0/monitors/monitor_123/runs",
|
|
400
|
+
data=None,
|
|
401
|
+
method="GET",
|
|
402
|
+
params=None
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
assert len(result.data) == 1
|
|
406
|
+
assert result.data[0].id == "run_123"
|
|
407
|
+
assert result.data[0].monitor_id == "monitor_123"
|
|
408
|
+
|
|
409
|
+
def test_get_monitor_run(runs_client, parent_mock, sample_monitor_run_response):
|
|
410
|
+
"""Test getting a specific monitor run."""
|
|
411
|
+
parent_mock.request.return_value = sample_monitor_run_response
|
|
412
|
+
|
|
413
|
+
result = runs_client.get("monitor_123", "run_123")
|
|
414
|
+
|
|
415
|
+
parent_mock.request.assert_called_once_with(
|
|
416
|
+
"/websets/v0/monitors/monitor_123/runs/run_123",
|
|
417
|
+
data=None,
|
|
418
|
+
method="GET",
|
|
419
|
+
params=None
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
assert result.id == "run_123"
|
|
423
|
+
assert result.monitor_id == "monitor_123"
|
|
424
|
+
assert result.status == "completed"
|
|
425
|
+
|
|
426
|
+
def test_monitors_client_runs_integration(monitors_client, parent_mock, sample_monitor_run_response):
|
|
427
|
+
"""Test that monitors client properly integrates with runs client."""
|
|
428
|
+
response_data = {
|
|
429
|
+
"data": [sample_monitor_run_response],
|
|
430
|
+
"hasMore": False,
|
|
431
|
+
"nextCursor": None
|
|
432
|
+
}
|
|
433
|
+
parent_mock.request.return_value = response_data
|
|
434
|
+
|
|
435
|
+
result = monitors_client.runs.list("monitor_123")
|
|
436
|
+
|
|
437
|
+
assert len(result.data) == 1
|
|
438
|
+
assert result.data[0].id == "run_123"
|
|
439
|
+
|
|
440
|
+
def test_case_conversion_in_monitor_params(monitors_client, parent_mock, sample_monitor_response):
|
|
441
|
+
"""Test that camelCase fields are properly handled."""
|
|
442
|
+
parent_mock.request.return_value = sample_monitor_response
|
|
443
|
+
|
|
444
|
+
params = CreateMonitorParameters(
|
|
445
|
+
webset_id="ws_123", # This should become websetId
|
|
446
|
+
cadence=MonitorCadence(cron="0 9 * * *"),
|
|
447
|
+
behavior=MonitorBehaviorSearch(
|
|
448
|
+
type="search",
|
|
449
|
+
config=MonitorBehaviorSearchConfig(
|
|
450
|
+
query="test",
|
|
451
|
+
criteria=[],
|
|
452
|
+
entity=WebsetCompanyEntity(type="company"),
|
|
453
|
+
count=10
|
|
454
|
+
)
|
|
455
|
+
)
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
result = monitors_client.create(params)
|
|
459
|
+
|
|
460
|
+
# Verify the response properly maps camelCase back to snake_case
|
|
461
|
+
assert result.webset_id == "ws_123" # websetId -> webset_id
|
|
462
|
+
assert result.next_run_at is not None # nextRunAt -> next_run_at
|
|
463
|
+
|
|
464
|
+
def test_cron_expression_serialization(monitors_client, parent_mock, sample_monitor_response):
|
|
465
|
+
"""Test that cron expressions are properly serialized."""
|
|
466
|
+
parent_mock.request.return_value = sample_monitor_response
|
|
467
|
+
|
|
468
|
+
params = CreateMonitorParameters(
|
|
469
|
+
webset_id="ws_123",
|
|
470
|
+
cadence=MonitorCadence(
|
|
471
|
+
cron="0 9 * * 1-5", # Weekdays at 9 AM
|
|
472
|
+
timezone="America/New_York"
|
|
473
|
+
),
|
|
474
|
+
behavior=MonitorBehaviorSearch(
|
|
475
|
+
type="search",
|
|
476
|
+
config=MonitorBehaviorSearchConfig(
|
|
477
|
+
query="test",
|
|
478
|
+
criteria=[],
|
|
479
|
+
entity=WebsetCompanyEntity(type="company"),
|
|
480
|
+
count=5
|
|
481
|
+
)
|
|
482
|
+
)
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
result = monitors_client.create(params)
|
|
486
|
+
|
|
487
|
+
assert result.cadence.cron == "0 9 * * *" # From response
|
|
488
|
+
assert result.cadence.timezone == "Etc/UTC" # From response
|
|
489
|
+
|
|
490
|
+
def test_monitors_client_request_error_handling(monitors_client, parent_mock):
|
|
491
|
+
"""Test that client properly handles request errors."""
|
|
492
|
+
parent_mock.request.side_effect = Exception("Network error")
|
|
493
|
+
|
|
494
|
+
with pytest.raises(Exception, match="Network error"):
|
|
495
|
+
monitors_client.get("monitor_123")
|
|
496
|
+
|
|
497
|
+
def test_invalid_monitor_id_format(runs_client, parent_mock):
|
|
498
|
+
"""Test handling of invalid monitor ID format."""
|
|
499
|
+
parent_mock.request.side_effect = Exception("Invalid monitor ID")
|
|
500
|
+
|
|
501
|
+
with pytest.raises(Exception, match="Invalid monitor ID"):
|
|
502
|
+
runs_client.list("invalid_id")
|
|
@@ -0,0 +1,418 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import json
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
5
|
+
from pydantic import AnyUrl
|
|
6
|
+
import pytest
|
|
7
|
+
from unittest.mock import MagicMock
|
|
8
|
+
|
|
9
|
+
from exa_py.websets.client import WebsetsClient
|
|
10
|
+
from exa_py.websets.core.base import WebsetsBaseClient
|
|
11
|
+
from exa_py.api import snake_to_camel, camel_to_snake, to_camel_case, to_snake_case
|
|
12
|
+
from exa_py.websets.types import (
|
|
13
|
+
UpdateWebsetRequest,
|
|
14
|
+
CreateWebsetParameters,
|
|
15
|
+
CreateWebsetParametersSearch,
|
|
16
|
+
CreateEnrichmentParameters,
|
|
17
|
+
Format
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# ============================================================================
|
|
21
|
+
# Fixtures
|
|
22
|
+
# ============================================================================
|
|
23
|
+
|
|
24
|
+
@pytest.fixture
|
|
25
|
+
def mock_response():
|
|
26
|
+
"""Factory fixture to create mock responses with custom data."""
|
|
27
|
+
def _create_response(json_data: Dict[str, Any], status_code: int = 200) -> MagicMock:
|
|
28
|
+
mock = MagicMock()
|
|
29
|
+
mock.json_data = json_data
|
|
30
|
+
mock.status_code = status_code
|
|
31
|
+
mock.text = json.dumps(json_data)
|
|
32
|
+
mock.json.return_value = json_data
|
|
33
|
+
return mock
|
|
34
|
+
return _create_response
|
|
35
|
+
|
|
36
|
+
@pytest.fixture
|
|
37
|
+
def parent_mock():
|
|
38
|
+
"""Create a mock parent client."""
|
|
39
|
+
return MagicMock()
|
|
40
|
+
|
|
41
|
+
@pytest.fixture
|
|
42
|
+
def base_client(parent_mock):
|
|
43
|
+
"""Create a base client instance with mock parent."""
|
|
44
|
+
return WebsetsBaseClient(parent_mock)
|
|
45
|
+
|
|
46
|
+
@pytest.fixture
|
|
47
|
+
def websets_client(parent_mock):
|
|
48
|
+
"""Create a WebsetsClient instance with mock parent."""
|
|
49
|
+
return WebsetsClient(parent_mock)
|
|
50
|
+
|
|
51
|
+
@pytest.fixture
|
|
52
|
+
def items_client(websets_client):
|
|
53
|
+
"""Create an items client instance."""
|
|
54
|
+
return websets_client.items
|
|
55
|
+
|
|
56
|
+
# ============================================================================
|
|
57
|
+
# Case Conversion Tests
|
|
58
|
+
# ============================================================================
|
|
59
|
+
|
|
60
|
+
@pytest.mark.parametrize("input,expected", [
|
|
61
|
+
("test_case", "testCase"),
|
|
62
|
+
("multiple_word_test", "multipleWordTest"),
|
|
63
|
+
("single", "single"),
|
|
64
|
+
("schema_", "$schema"),
|
|
65
|
+
("not_", "not"),
|
|
66
|
+
])
|
|
67
|
+
def test_snake_to_camel(input, expected):
|
|
68
|
+
"""Test snake_case to camelCase conversion."""
|
|
69
|
+
assert snake_to_camel(input) == expected
|
|
70
|
+
|
|
71
|
+
@pytest.mark.parametrize("input,expected", [
|
|
72
|
+
("testCase", "test_case"),
|
|
73
|
+
("multipleWordTest", "multiple_word_test"),
|
|
74
|
+
("single", "single"),
|
|
75
|
+
])
|
|
76
|
+
def test_camel_to_snake(input, expected):
|
|
77
|
+
"""Test camelCase to snake_case conversion."""
|
|
78
|
+
assert camel_to_snake(input) == expected
|
|
79
|
+
|
|
80
|
+
def test_dict_to_camel_case():
|
|
81
|
+
"""Test converting dictionary keys from snake_case to camelCase."""
|
|
82
|
+
snake_dict = {
|
|
83
|
+
"test_key": "value",
|
|
84
|
+
"nested_dict": {
|
|
85
|
+
"inner_key": 123,
|
|
86
|
+
"another_key": True
|
|
87
|
+
},
|
|
88
|
+
"normal_key": None
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
expected = {
|
|
92
|
+
"testKey": "value",
|
|
93
|
+
"nestedDict": {
|
|
94
|
+
"innerKey": 123,
|
|
95
|
+
"anotherKey": True
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
assert to_camel_case(snake_dict) == expected
|
|
100
|
+
|
|
101
|
+
def test_dict_to_snake_case():
|
|
102
|
+
"""Test converting dictionary keys from camelCase to snake_case."""
|
|
103
|
+
camel_dict = {
|
|
104
|
+
"testKey": "value",
|
|
105
|
+
"nestedDict": {
|
|
106
|
+
"innerKey": 123,
|
|
107
|
+
"anotherKey": True
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
expected = {
|
|
112
|
+
"test_key": "value",
|
|
113
|
+
"nested_dict": {
|
|
114
|
+
"inner_key": 123,
|
|
115
|
+
"another_key": True
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
assert to_snake_case(camel_dict) == expected
|
|
120
|
+
|
|
121
|
+
def test_request_body_case_conversion(websets_client, parent_mock):
|
|
122
|
+
"""Test that request body fields are converted from snake_case to camelCase."""
|
|
123
|
+
mock_response = {
|
|
124
|
+
"id": "ws_123",
|
|
125
|
+
"object": "webset",
|
|
126
|
+
"status": "idle",
|
|
127
|
+
"externalId": "test-id",
|
|
128
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
129
|
+
"updatedAt": "2023-01-01T00:00:00Z",
|
|
130
|
+
"searches": [],
|
|
131
|
+
"enrichments": [],
|
|
132
|
+
"monitors": []
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
parent_mock.request.return_value = mock_response
|
|
136
|
+
|
|
137
|
+
request = CreateWebsetParameters(
|
|
138
|
+
external_id="test-id",
|
|
139
|
+
search=CreateWebsetParametersSearch(
|
|
140
|
+
query="test query",
|
|
141
|
+
count=10
|
|
142
|
+
),
|
|
143
|
+
metadata={"snake_case_key": "value"}
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
websets_client.create(params=request)
|
|
147
|
+
|
|
148
|
+
actual_data = parent_mock.request.call_args[1]['data']
|
|
149
|
+
assert actual_data == {
|
|
150
|
+
"search": {
|
|
151
|
+
"query": "test query",
|
|
152
|
+
"count": 10
|
|
153
|
+
},
|
|
154
|
+
"externalId": "test-id", # This should be camelCase in the request
|
|
155
|
+
"metadata": {"snake_case_key": "value"} # metadata preserved original case
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
def test_response_case_conversion(websets_client, parent_mock):
|
|
159
|
+
"""Test that API response fields are converted from camelCase to snake_case."""
|
|
160
|
+
mock_response = {
|
|
161
|
+
"id": "ws_123",
|
|
162
|
+
"object": "webset",
|
|
163
|
+
"status": "idle",
|
|
164
|
+
"externalId": "test-id",
|
|
165
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
166
|
+
"updatedAt": "2023-01-01T00:00:00Z",
|
|
167
|
+
"searches": [],
|
|
168
|
+
"enrichments": [],
|
|
169
|
+
"monitors": []
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
parent_mock.request.return_value = mock_response
|
|
173
|
+
result = websets_client.get(id="ws_123")
|
|
174
|
+
|
|
175
|
+
assert result.external_id == "test-id"
|
|
176
|
+
assert result.created_at == datetime.fromisoformat(mock_response["createdAt"])
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def test_metadata_case_preservation(websets_client, parent_mock):
|
|
180
|
+
"""Test that metadata keys preserve their original case format when sent to API."""
|
|
181
|
+
test_cases = [
|
|
182
|
+
{"snake_case_key": "value"},
|
|
183
|
+
{"camelCaseKey": "value"},
|
|
184
|
+
{"UPPER_CASE": "value"},
|
|
185
|
+
{"mixed_Case_Key": "value"},
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
for metadata in test_cases:
|
|
189
|
+
mock_response = {
|
|
190
|
+
"id": "ws_123",
|
|
191
|
+
"object": "webset",
|
|
192
|
+
"status": "idle",
|
|
193
|
+
"metadata": metadata,
|
|
194
|
+
"externalId": "test-id",
|
|
195
|
+
"searches": [],
|
|
196
|
+
"enrichments": [],
|
|
197
|
+
"monitors": [],
|
|
198
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
199
|
+
"updatedAt": "2023-01-01T00:00:00Z"
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
parent_mock.request.return_value = mock_response
|
|
203
|
+
|
|
204
|
+
request = UpdateWebsetRequest(metadata=metadata)
|
|
205
|
+
result = websets_client.update(id="ws_123", params=request)
|
|
206
|
+
|
|
207
|
+
assert result.metadata == metadata
|
|
208
|
+
|
|
209
|
+
actual_data = parent_mock.request.call_args[1]['data']
|
|
210
|
+
assert actual_data["metadata"] == metadata
|
|
211
|
+
|
|
212
|
+
def test_nested_property_case_conversion(items_client, parent_mock):
|
|
213
|
+
"""Test that nested property fields follow proper case conversion rules."""
|
|
214
|
+
mock_response = {
|
|
215
|
+
"data": [{
|
|
216
|
+
"id": "item_123",
|
|
217
|
+
"object": "webset_item",
|
|
218
|
+
"source": "search",
|
|
219
|
+
"sourceId": "search_123",
|
|
220
|
+
"websetId": "ws_123",
|
|
221
|
+
"properties": {
|
|
222
|
+
"type": "company",
|
|
223
|
+
"url": "https://example.com",
|
|
224
|
+
"description": "This is a test description",
|
|
225
|
+
"company": {
|
|
226
|
+
"name": "Example Company",
|
|
227
|
+
"logoUrl": "https://example.com/logo.png",
|
|
228
|
+
}
|
|
229
|
+
},
|
|
230
|
+
"evaluations": [],
|
|
231
|
+
"enrichments": [],
|
|
232
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
233
|
+
"updatedAt": "2023-01-01T00:00:00Z"
|
|
234
|
+
}],
|
|
235
|
+
"hasMore": False,
|
|
236
|
+
"nextCursor": None
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
parent_mock.request.return_value = mock_response
|
|
240
|
+
result = items_client.list(webset_id="ws_123", limit=10)
|
|
241
|
+
|
|
242
|
+
assert result.data[0].properties.company.logo_url == AnyUrl("https://example.com/logo.png")
|
|
243
|
+
|
|
244
|
+
def test_request_forwards_to_parent(base_client, parent_mock):
|
|
245
|
+
"""Test that BaseClient.request forwards to the parent client's request method."""
|
|
246
|
+
parent_mock.request.return_value = {"key": "value"}
|
|
247
|
+
|
|
248
|
+
result = base_client.request(
|
|
249
|
+
"/test",
|
|
250
|
+
data={"param": "value"},
|
|
251
|
+
method="POST",
|
|
252
|
+
params={"query": "test"}
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
parent_mock.request.assert_called_once_with(
|
|
256
|
+
"/websets/test",
|
|
257
|
+
data={"param": "value"},
|
|
258
|
+
method="POST",
|
|
259
|
+
params={"query": "test"}
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
assert result == {"key": "value"}
|
|
263
|
+
|
|
264
|
+
def test_format_validation_string_and_enum():
|
|
265
|
+
"""Test that the format field accepts both string and enum values."""
|
|
266
|
+
# Test with enum value
|
|
267
|
+
params1 = CreateEnrichmentParameters(
|
|
268
|
+
description="Test description",
|
|
269
|
+
format=Format.text
|
|
270
|
+
)
|
|
271
|
+
# Since use_enum_values=True in ExaBaseModel, the enum is converted to its string value
|
|
272
|
+
assert params1.format == Format.text.value
|
|
273
|
+
|
|
274
|
+
# Test with string value
|
|
275
|
+
params2 = CreateEnrichmentParameters(
|
|
276
|
+
description="Test description",
|
|
277
|
+
format="text"
|
|
278
|
+
)
|
|
279
|
+
assert params2.format == "text"
|
|
280
|
+
|
|
281
|
+
# Both should serialize to the same value
|
|
282
|
+
assert params1.model_dump()["format"] == params2.model_dump()["format"]
|
|
283
|
+
|
|
284
|
+
# Test with invalid string value
|
|
285
|
+
with pytest.raises(ValueError):
|
|
286
|
+
CreateEnrichmentParameters(
|
|
287
|
+
description="Test description",
|
|
288
|
+
format="invalid_format"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
def test_dict_and_model_parameter_support(websets_client, parent_mock):
|
|
292
|
+
"""Test that client methods accept both dictionaries and model instances."""
|
|
293
|
+
from exa_py.websets.types import CreateWebsetParameters, Format
|
|
294
|
+
|
|
295
|
+
# Set up mock response
|
|
296
|
+
mock_response = {
|
|
297
|
+
"id": "ws_123",
|
|
298
|
+
"object": "webset",
|
|
299
|
+
"status": "idle",
|
|
300
|
+
"externalId": None,
|
|
301
|
+
"createdAt": "2023-01-01T00:00:00Z",
|
|
302
|
+
"updatedAt": "2023-01-01T00:00:00Z",
|
|
303
|
+
"searches": [],
|
|
304
|
+
"enrichments": [],
|
|
305
|
+
"monitors": []
|
|
306
|
+
}
|
|
307
|
+
parent_mock.request.return_value = mock_response
|
|
308
|
+
|
|
309
|
+
# Test with a model instance
|
|
310
|
+
model_params = CreateWebsetParameters(
|
|
311
|
+
search={
|
|
312
|
+
"query": "Test query",
|
|
313
|
+
"count": 10
|
|
314
|
+
},
|
|
315
|
+
enrichments=[{
|
|
316
|
+
"description": "Test enrichment",
|
|
317
|
+
"format": Format.text
|
|
318
|
+
}]
|
|
319
|
+
)
|
|
320
|
+
model_result = websets_client.create(params=model_params)
|
|
321
|
+
|
|
322
|
+
# Test with an equivalent dictionary
|
|
323
|
+
dict_params = {
|
|
324
|
+
"search": {
|
|
325
|
+
"query": "Test query",
|
|
326
|
+
"count": 10
|
|
327
|
+
},
|
|
328
|
+
"enrichments": [{
|
|
329
|
+
"description": "Test enrichment",
|
|
330
|
+
"format": "text"
|
|
331
|
+
}]
|
|
332
|
+
}
|
|
333
|
+
dict_result = websets_client.create(params=dict_params)
|
|
334
|
+
|
|
335
|
+
# Verify both calls produce the same result
|
|
336
|
+
assert model_result.id == dict_result.id
|
|
337
|
+
assert model_result.status == dict_result.status
|
|
338
|
+
|
|
339
|
+
# Verify both calls were made (we don't need to verify exact equality of serialized data)
|
|
340
|
+
assert len(parent_mock.request.call_args_list) == 2
|
|
341
|
+
|
|
342
|
+
# Both serialization approaches should have the same functionality
|
|
343
|
+
# The differences (Enum vs string, float vs int) are still valid when sent to the API
|
|
344
|
+
model_call_data = parent_mock.request.call_args_list[0][1]['data']
|
|
345
|
+
dict_call_data = parent_mock.request.call_args_list[1][1]['data']
|
|
346
|
+
|
|
347
|
+
# Check that fields are functionally equivalent
|
|
348
|
+
assert model_call_data['search']['query'] == dict_call_data['search']['query']
|
|
349
|
+
assert float(model_call_data['search']['count']) == float(dict_call_data['search']['count'])
|
|
350
|
+
assert model_call_data['enrichments'][0]['description'] == dict_call_data['enrichments'][0]['description']
|
|
351
|
+
|
|
352
|
+
# For format, we should get either the enum's value or the string directly
|
|
353
|
+
format1 = model_call_data['enrichments'][0]['format']
|
|
354
|
+
format2 = dict_call_data['enrichments'][0]['format']
|
|
355
|
+
|
|
356
|
+
# If format1 is an enum, get its value
|
|
357
|
+
format1_value = format1.value if hasattr(format1, 'value') else format1
|
|
358
|
+
# If format2 is an enum, get its value
|
|
359
|
+
format2_value = format2.value if hasattr(format2, 'value') else format2
|
|
360
|
+
|
|
361
|
+
assert format1_value == format2_value
|
|
362
|
+
|
|
363
|
+
def test_webhook_attempts_list(websets_client, parent_mock):
|
|
364
|
+
"""Test that the WebhookAttemptsClient.list method works correctly."""
|
|
365
|
+
# Mock response for webhook attempts
|
|
366
|
+
mock_response = {
|
|
367
|
+
"data": [{
|
|
368
|
+
"id": "attempt_123",
|
|
369
|
+
"object": "webhook_attempt",
|
|
370
|
+
"eventId": "event_123",
|
|
371
|
+
"eventType": "webset.created",
|
|
372
|
+
"webhookId": "webhook_123",
|
|
373
|
+
"url": "https://example.com/webhook",
|
|
374
|
+
"successful": True,
|
|
375
|
+
"responseHeaders": {"content-type": "application/json"},
|
|
376
|
+
"responseBody": '{"status": "ok"}',
|
|
377
|
+
"responseStatusCode": 200,
|
|
378
|
+
"attempt": 1,
|
|
379
|
+
"attemptedAt": "2023-01-01T00:00:00Z"
|
|
380
|
+
}],
|
|
381
|
+
"hasMore": False,
|
|
382
|
+
"nextCursor": None
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
parent_mock.request.return_value = mock_response
|
|
386
|
+
|
|
387
|
+
# Test without optional parameters
|
|
388
|
+
result = websets_client.webhooks.attempts.list(webhook_id="webhook_123")
|
|
389
|
+
|
|
390
|
+
parent_mock.request.assert_called_with(
|
|
391
|
+
"/websets/v0/webhooks/webhook_123/attempts",
|
|
392
|
+
params={},
|
|
393
|
+
method="GET",
|
|
394
|
+
data=None
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
assert len(result.data) == 1
|
|
398
|
+
assert result.data[0].id == "attempt_123"
|
|
399
|
+
assert result.data[0].event_type == "webset.created"
|
|
400
|
+
assert result.data[0].successful is True
|
|
401
|
+
|
|
402
|
+
# Reset mock and test with all optional parameters
|
|
403
|
+
parent_mock.request.reset_mock()
|
|
404
|
+
parent_mock.request.return_value = mock_response
|
|
405
|
+
|
|
406
|
+
result = websets_client.webhooks.attempts.list(
|
|
407
|
+
webhook_id="webhook_123",
|
|
408
|
+
cursor="cursor_value",
|
|
409
|
+
limit=10,
|
|
410
|
+
event_type="webset.created"
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
parent_mock.request.assert_called_with(
|
|
414
|
+
"/websets/v0/webhooks/webhook_123/attempts",
|
|
415
|
+
params={"cursor": "cursor_value", "limit": 10, "eventType": "webset.created"},
|
|
416
|
+
method="GET",
|
|
417
|
+
data=None
|
|
418
|
+
)
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
{% for decorator in decorators -%}
|
|
2
|
-
{{ decorator }}
|
|
3
|
-
{% endfor -%}
|
|
4
|
-
class {{ class_name }}({{ base_class }}):{% if comment is defined %} # {{ comment }}{% endif %}
|
|
5
|
-
{%- if description %}
|
|
6
|
-
"""
|
|
7
|
-
{{ description | indent(4) }}
|
|
8
|
-
"""
|
|
9
|
-
{%- endif %}
|
|
10
|
-
{%- if not fields and not description %}
|
|
11
|
-
pass
|
|
12
|
-
{%- endif %}
|
|
13
|
-
{%- if config %}
|
|
14
|
-
{%- filter indent(4) %}
|
|
15
|
-
{%- endfilter %}
|
|
16
|
-
{%- endif %}
|
|
17
|
-
{%- for field in fields -%}
|
|
18
|
-
{%- if field.name == "type" and field.field %}
|
|
19
|
-
type: Literal['{{ field.default }}']
|
|
20
|
-
{%- elif field.name == "object" and field.field %}
|
|
21
|
-
object: Literal['{{ field.default }}']
|
|
22
|
-
{%- elif not field.annotated and field.field %}
|
|
23
|
-
{{ field.name }}: {{ field.type_hint }} = {{ field.field }}
|
|
24
|
-
{%- else %}
|
|
25
|
-
{%- if field.annotated %}
|
|
26
|
-
{{ field.name }}: {{ field.annotated }}
|
|
27
|
-
{%- else %}
|
|
28
|
-
{{ field.name }}: {{ field.type_hint }}
|
|
29
|
-
{%- endif %}
|
|
30
|
-
{%- if not (field.required or (field.represented_default == 'None' and field.strip_default_none)) or field.data_type.is_optional
|
|
31
|
-
%} = {{ field.represented_default }}
|
|
32
|
-
{%- endif -%}
|
|
33
|
-
{%- endif %}
|
|
34
|
-
{%- if field.docstring %}
|
|
35
|
-
"""
|
|
36
|
-
{{ field.docstring | indent(4) }}
|
|
37
|
-
"""
|
|
38
|
-
{%- endif %}
|
|
39
|
-
{%- for method in methods -%}
|
|
40
|
-
{{ method }}
|
|
41
|
-
{%- endfor -%}
|
|
42
|
-
{%- endfor -%}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|