veris-ai 1.2.0__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of veris-ai might be problematic. Click here for more details.

Files changed (29) hide show
  1. {veris_ai-1.2.0 → veris_ai-1.3.0}/PKG-INFO +1 -1
  2. {veris_ai-1.2.0 → veris_ai-1.3.0}/pyproject.toml +1 -1
  3. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/__init__.py +2 -7
  4. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/jaeger_interface/client.py +44 -41
  5. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/jaeger_interface/models.py +1 -2
  6. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/models.py +2 -2
  7. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/tool_mock.py +24 -23
  8. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/conftest.py +12 -2
  9. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/test_tool_mock.py +25 -25
  10. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/test_utils.py +3 -3
  11. {veris_ai-1.2.0 → veris_ai-1.3.0}/uv.lock +2 -2
  12. {veris_ai-1.2.0 → veris_ai-1.3.0}/.github/workflows/release.yml +0 -0
  13. {veris_ai-1.2.0 → veris_ai-1.3.0}/.github/workflows/test.yml +0 -0
  14. {veris_ai-1.2.0 → veris_ai-1.3.0}/.gitignore +0 -0
  15. {veris_ai-1.2.0 → veris_ai-1.3.0}/CHANGELOG.md +0 -0
  16. {veris_ai-1.2.0 → veris_ai-1.3.0}/CLAUDE.md +0 -0
  17. {veris_ai-1.2.0 → veris_ai-1.3.0}/LICENSE +0 -0
  18. {veris_ai-1.2.0 → veris_ai-1.3.0}/README.md +0 -0
  19. {veris_ai-1.2.0 → veris_ai-1.3.0}/examples/__init__.py +0 -0
  20. {veris_ai-1.2.0 → veris_ai-1.3.0}/examples/import_options.py +0 -0
  21. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/braintrust_tracing.py +0 -0
  22. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/jaeger_interface/README.md +0 -0
  23. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/jaeger_interface/__init__.py +0 -0
  24. {veris_ai-1.2.0 → veris_ai-1.3.0}/src/veris_ai/utils.py +0 -0
  25. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/__init__.py +0 -0
  26. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/fixtures/__init__.py +0 -0
  27. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/fixtures/simple_app.py +0 -0
  28. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/fixtures/sse_server.py +0 -0
  29. {veris_ai-1.2.0 → veris_ai-1.3.0}/tests/test_mcp_protocol_server_mocked.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: veris-ai
3
- Version: 1.2.0
3
+ Version: 1.3.0
4
4
  Summary: A Python package for Veris AI tools
5
5
  Project-URL: Homepage, https://github.com/veris-ai/veris-python-sdk
6
6
  Project-URL: Bug Tracker, https://github.com/veris-ai/veris-python-sdk/issues
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "veris-ai"
7
- version = "1.2.0"
7
+ version = "1.3.0"
8
8
  description = "A Python package for Veris AI tools"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.11"
@@ -5,9 +5,9 @@ from typing import Any
5
5
  __version__ = "0.1.0"
6
6
 
7
7
  # Import lightweight modules that only use base dependencies
8
- from .tool_mock import veris
9
8
  from .jaeger_interface import JaegerClient
10
9
  from .models import ResponseExpectation
10
+ from .tool_mock import veris
11
11
 
12
12
  # Lazy import for modules with heavy dependencies
13
13
  _instrument = None
@@ -34,9 +34,4 @@ def instrument(*args: Any, **kwargs: Any) -> Any: # noqa: ANN401
34
34
  return _instrument(*args, **kwargs)
35
35
 
36
36
 
37
- __all__ = [
38
- "veris",
39
- "JaegerClient",
40
- "instrument",
41
- "ResponseExpectation"
42
- ]
37
+ __all__ = ["veris", "JaegerClient", "instrument", "ResponseExpectation"]
@@ -5,12 +5,12 @@ This implementation keeps dependencies minimal while providing fully-typed
5
5
  """
6
6
 
7
7
  import json
8
- from typing import Any, Dict, List, Optional, Self
8
+ import types
9
+ from typing import Any, Self
9
10
 
10
11
  import requests
11
12
 
12
- from .models import GetTraceResponse, SearchResponse, Span, Tag, Trace
13
-
13
+ from .models import GetTraceResponse, SearchResponse, Span, Trace
14
14
 
15
15
  __all__ = ["JaegerClient"]
16
16
 
@@ -60,30 +60,27 @@ class JaegerClient: # noqa: D101
60
60
  session.headers.update(self._headers)
61
61
  return session, True
62
62
 
63
- def _span_matches_tags(self, span: Span, span_tags: Dict[str, Any]) -> bool:
63
+ def _span_matches_tags(self, span: Span, span_tags: dict[str, Any]) -> bool:
64
64
  """Check if a span matches any of the provided tags (OR logic)."""
65
65
  if not span.tags or not span_tags:
66
66
  return False
67
-
67
+
68
68
  # Convert span tags to a dict for easier lookup
69
69
  span_tag_dict = {tag.key: tag.value for tag in span.tags}
70
-
70
+
71
71
  # OR logic: return True if ANY tag matches
72
- for key, value in span_tags.items():
73
- if span_tag_dict.get(key) == value:
74
- return True
75
-
76
- return False
72
+ return any(span_tag_dict.get(key) == value for key, value in span_tags.items())
77
73
 
78
74
  def _filter_spans(
79
75
  self,
80
- traces: List[Trace],
81
- span_tags: Optional[Dict[str, Any]],
82
- span_operations: Optional[List[str]] = None
83
- ) -> List[Trace]:
84
- """
85
- Filter spans within traces based on span_tags (OR logic) and/or span_operations (OR logic).
86
- If both are provided, a span must match at least one tag AND at least one operation.
76
+ traces: list[Trace],
77
+ span_tags: dict[str, Any] | None,
78
+ span_operations: list[str] | None = None,
79
+ ) -> list[Trace]:
80
+ """Filter spans within traces based on span_tags and/or span_operations.
81
+
82
+ Uses OR logic within each filter type. If both are provided, a span must
83
+ match at least one tag AND at least one operation.
87
84
  """
88
85
  if not span_tags and not span_operations:
89
86
  return traces
@@ -109,7 +106,7 @@ class JaegerClient: # noqa: D101
109
106
  traceID=trace.traceID,
110
107
  spans=filtered_spans,
111
108
  process=trace.process,
112
- warnings=trace.warnings
109
+ warnings=trace.warnings,
113
110
  )
114
111
  filtered_traces.append(filtered_trace)
115
112
 
@@ -119,16 +116,16 @@ class JaegerClient: # noqa: D101
119
116
  # Public API
120
117
  # ---------------------------------------------------------------------
121
118
 
122
- def search(
119
+ def search( # noqa: PLR0913
123
120
  self,
124
- service: Optional[str] = None,
121
+ service: str | None = None,
125
122
  *,
126
- limit: Optional[int] = None,
127
- tags: Optional[Dict[str, Any]] = None,
128
- operation: Optional[str] = None,
129
- span_tags: Optional[Dict[str, Any]] = None,
130
- span_operations: Optional[List[str]] = None,
131
- **kwargs: Any
123
+ limit: int | None = None,
124
+ tags: dict[str, Any] | None = None,
125
+ operation: str | None = None,
126
+ span_tags: dict[str, Any] | None = None,
127
+ span_operations: list[str] | None = None,
128
+ **kwargs: Any, # noqa: ANN401
132
129
  ) -> SearchResponse: # noqa: D401
133
130
  """Search traces using the *v1* ``/api/traces`` endpoint with optional span filtering.
134
131
 
@@ -137,9 +134,11 @@ class JaegerClient: # noqa: D101
137
134
  limit: Maximum number of traces to return.
138
135
  tags: Dictionary of tag filters for trace-level filtering (AND-combined).
139
136
  operation: Operation name to search for.
140
- span_tags: Dictionary of tag filters for span-level filtering (OR-combined AND-combined with span_operations).
137
+ span_tags: Dictionary of tag filters for span-level filtering.
138
+ Uses OR logic. Combined with span_operations using AND.
141
139
  Applied client-side after retrieving traces.
142
- span_operations: List of operation names to search for (OR-combined AND-combined with span_tags).
140
+ span_operations: List of operation names to search for.
141
+ Uses OR logic. Combined with span_tags using AND.
143
142
  **kwargs: Additional parameters to pass to the Jaeger API.
144
143
 
145
144
  Returns:
@@ -147,24 +146,24 @@ class JaegerClient: # noqa: D101
147
146
  with spans filtered according to span_tags if provided.
148
147
  """
149
148
  # Build params for the Jaeger API (excluding span_tags)
150
- params: Dict[str, Any] = {}
151
-
149
+ params: dict[str, Any] = {}
150
+
152
151
  if service is not None:
153
152
  params["service"] = service
154
-
153
+
155
154
  if limit is not None:
156
155
  params["limit"] = limit
157
-
156
+
158
157
  if operation is not None:
159
158
  params["operation"] = operation
160
-
159
+
161
160
  if tags:
162
161
  # Convert tags to JSON string as expected by Jaeger API
163
162
  params["tags"] = json.dumps(tags)
164
-
163
+
165
164
  # Add any additional parameters
166
165
  params.update(kwargs)
167
-
166
+
168
167
  session, should_close = self._make_session()
169
168
  try:
170
169
  url = f"{self._base_url}/api/traces"
@@ -174,18 +173,22 @@ class JaegerClient: # noqa: D101
174
173
  finally:
175
174
  if should_close:
176
175
  session.close()
177
-
176
+
178
177
  # Parse the response
179
178
  search_response = SearchResponse.model_validate(data) # type: ignore[arg-type]
180
-
179
+
181
180
  # Apply span-level filtering if span_tags is provided
182
- if span_tags or span_operations and search_response.data and isinstance(search_response.data, list):
181
+ if (
182
+ (span_tags or span_operations)
183
+ and search_response.data
184
+ and isinstance(search_response.data, list)
185
+ ):
183
186
  filtered_traces = self._filter_spans(search_response.data, span_tags, span_operations)
184
187
  search_response.data = filtered_traces
185
188
  # Update the total to reflect filtered results
186
189
  if search_response.total is not None:
187
190
  search_response.total = len(filtered_traces)
188
-
191
+
189
192
  return search_response
190
193
 
191
194
  def get_trace(self, trace_id: str) -> GetTraceResponse: # noqa: D401
@@ -225,7 +228,7 @@ class JaegerClient: # noqa: D101
225
228
  self,
226
229
  exc_type: type[BaseException] | None,
227
230
  exc: BaseException | None,
228
- tb: Any | None,
231
+ tb: types.TracebackType | None,
229
232
  ) -> None:
230
233
  """Exit the context manager."""
231
234
  # Only close if we created the session
@@ -1,9 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- import json
4
3
  from typing import Any
5
4
 
6
- from pydantic import BaseModel, Field, ConfigDict
5
+ from pydantic import BaseModel, ConfigDict, Field
7
6
 
8
7
  __all__ = [
9
8
  "Tag",
@@ -5,7 +5,7 @@ from enum import Enum
5
5
 
6
6
  class ResponseExpectation(str, Enum):
7
7
  """Expected response behavior for tool mocking."""
8
-
8
+
9
9
  AUTO = "auto"
10
10
  REQUIRED = "required"
11
- NONE = "none"
11
+ NONE = "none"
@@ -92,7 +92,7 @@ class VerisSDK:
92
92
  **params_dict,
93
93
  )
94
94
 
95
- def mock(
95
+ def mock( # noqa: C901, PLR0915
96
96
  self,
97
97
  mode: Literal["tool", "function"] = "tool",
98
98
  expects_response: bool | None = None,
@@ -100,15 +100,8 @@ class VerisSDK:
100
100
  ) -> Callable:
101
101
  """Decorator for mocking tool calls."""
102
102
 
103
- def decorator(func: Callable) -> Callable:
103
+ def decorator(func: Callable) -> Callable: # noqa: C901, PLR0915
104
104
  """Decorator for mocking tool calls."""
105
- endpoint = os.getenv("VERIS_MOCK_ENDPOINT_URL")
106
- if not endpoint:
107
- error_msg = "VERIS_MOCK_ENDPOINT_URL environment variable is not set"
108
- raise ValueError(error_msg)
109
- # Default timeout of 30 seconds
110
- timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
111
-
112
105
  # Check if the original function is async
113
106
  is_async = inspect.iscoroutinefunction(func)
114
107
 
@@ -148,7 +141,7 @@ class VerisSDK:
148
141
  response_expectation = ResponseExpectation.REQUIRED
149
142
  else:
150
143
  response_expectation = ResponseExpectation.AUTO
151
-
144
+
152
145
  payload = {
153
146
  "session_id": self.session_id,
154
147
  "response_expectation": response_expectation.value,
@@ -169,11 +162,16 @@ class VerisSDK:
169
162
  **kwargs: dict[str, object],
170
163
  ) -> object:
171
164
  # Check if we're in simulation mode
172
- env_mode = os.getenv("ENV", "").lower()
173
- if env_mode != "simulation":
165
+ if not self.session_id:
174
166
  # If not in simulation mode, execute the original function
175
167
  return await func(*args, **kwargs)
176
-
168
+ endpoint = os.getenv("VERIS_MOCK_ENDPOINT_URL")
169
+ if not endpoint:
170
+ error_msg = "VERIS_MOCK_ENDPOINT_URL environment variable is not set"
171
+ raise ValueError(error_msg)
172
+ # Default timeout of 30 seconds
173
+ timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
174
+
177
175
  logger.info(f"Simulating function: {func.__name__}")
178
176
  payload, return_type_obj = create_mock_payload(*args, **kwargs)
179
177
 
@@ -196,11 +194,16 @@ class VerisSDK:
196
194
  **kwargs: dict[str, object],
197
195
  ) -> object:
198
196
  # Check if we're in simulation mode
199
- env_mode = os.getenv("ENV", "").lower()
200
- if env_mode != "simulation":
197
+ if not self.session_id:
201
198
  # If not in simulation mode, execute the original function
202
199
  return func(*args, **kwargs)
203
-
200
+ endpoint = os.getenv("VERIS_MOCK_ENDPOINT_URL")
201
+ if not endpoint:
202
+ error_msg = "VERIS_MOCK_ENDPOINT_URL environment variable is not set"
203
+ raise ValueError(error_msg)
204
+ # Default timeout of 30 seconds
205
+ timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
206
+
204
207
  logger.info(f"Simulating function: {func.__name__}")
205
208
  payload, return_type_obj = create_mock_payload(*args, **kwargs)
206
209
 
@@ -228,23 +231,21 @@ class VerisSDK:
228
231
  def decorator(func: Callable) -> Callable:
229
232
  # Check if the original function is async
230
233
  is_async = inspect.iscoroutinefunction(func)
231
-
234
+
232
235
  @wraps(func)
233
236
  async def async_wrapper(
234
237
  *args: tuple[object, ...],
235
238
  **kwargs: dict[str, object],
236
239
  ) -> object:
237
- env_mode = os.getenv("ENV", "").lower()
238
- if env_mode != "simulation":
240
+ if not self.session_id:
239
241
  # If not in simulation mode, execute the original function
240
- return func(*args, **kwargs)
242
+ return await func(*args, **kwargs)
241
243
  logger.info(f"Simulating function: {func.__name__}")
242
244
  return return_value
243
-
245
+
244
246
  @wraps(func)
245
247
  def sync_wrapper(*args: tuple[object, ...], **kwargs: dict[str, object]) -> object:
246
- env_mode = os.getenv("ENV", "").lower()
247
- if env_mode != "simulation":
248
+ if not self.session_id:
248
249
  # If not in simulation mode, execute the original function
249
250
  return func(*args, **kwargs)
250
251
  logger.info(f"Simulating function: {func.__name__}")
@@ -27,23 +27,33 @@ def mock_context():
27
27
 
28
28
  @pytest.fixture
29
29
  def simulation_env():
30
+ from veris_ai import veris
31
+
32
+ # Set session_id to enable simulation mode
33
+ veris.set_session_id("test-session-123")
34
+
30
35
  with patch.dict(
31
36
  os.environ,
32
37
  {
33
38
  "VERIS_MOCK_ENDPOINT_URL": "http://test-endpoint",
34
- "ENV": "simulation",
35
39
  },
36
40
  ):
37
41
  yield
42
+ # Clean up session_id after test
43
+ veris.clear_session_id()
38
44
 
39
45
 
40
46
  @pytest.fixture
41
47
  def production_env():
48
+ from veris_ai import veris
49
+
50
+ # Clear session_id to ensure production mode (no simulation)
51
+ veris.clear_session_id()
52
+
42
53
  with patch.dict(
43
54
  os.environ,
44
55
  {
45
56
  "VERIS_MOCK_ENDPOINT_URL": "http://test-endpoint",
46
- "ENV": "production",
47
57
  },
48
58
  ):
49
59
  yield
@@ -71,26 +71,19 @@ async def test_mock_with_context(simulation_env):
71
71
 
72
72
 
73
73
  @pytest.mark.asyncio
74
- async def test_mock_without_context(simulation_env):
74
+ async def test_mock_without_context():
75
+ """Test that without session_id, the function runs in production mode."""
76
+
75
77
  @veris.mock(mode="function")
76
78
  async def test_func() -> dict:
77
79
  return {"result": "real"}
78
80
 
79
- mock_response = {"result": {"mocked": True}}
80
-
81
- with patch("veris_ai.tool_mock.httpx.AsyncClient") as mock_client:
82
- mock_response_obj = Mock()
83
- mock_response_obj.json.return_value = mock_response
84
- mock_response_obj.raise_for_status.return_value = None
81
+ # Clear session_id to ensure production mode
82
+ veris.clear_session_id()
85
83
 
86
- mock_client.return_value.__aenter__.return_value.post = AsyncMock(
87
- return_value=mock_response_obj,
88
- )
89
- veris.clear_session_id()
90
- result = await test_func()
91
- first_call = mock_client.return_value.__aenter__.return_value.post.call_args
92
- assert first_call.kwargs["json"]["session_id"] == None
93
- assert result == {"result": {"mocked": True}}
84
+ # In production mode, the original function should be called
85
+ result = await test_func()
86
+ assert result == {"result": "real"}
94
87
 
95
88
 
96
89
  # Test error handling
@@ -111,15 +104,22 @@ async def test_mock_http_error(simulation_env):
111
104
 
112
105
  @pytest.mark.asyncio
113
106
  async def test_mock_missing_endpoint():
114
- with (
115
- patch.dict(os.environ, {"VERIS_MOCK_ENDPOINT_URL": ""}),
116
- pytest.raises(ValueError, match="VERIS_MOCK_ENDPOINT_URL environment variable is not set"),
117
- ):
107
+ """Test that missing endpoint raises ValueError when function is called in simulation mode."""
108
+ with patch.dict(os.environ, {"VERIS_MOCK_ENDPOINT_URL": ""}):
118
109
 
119
110
  @veris.mock()
120
111
  async def test_func():
121
112
  return {"result": "real"}
122
113
 
114
+ # Set session_id to enable simulation mode
115
+ veris.set_session_id("test-session")
116
+
117
+ # Error should be raised when function is called
118
+ with pytest.raises(
119
+ ValueError, match="VERIS_MOCK_ENDPOINT_URL environment variable is not set"
120
+ ):
121
+ await test_func()
122
+
123
123
 
124
124
  @pytest.mark.asyncio
125
125
  async def test_mock_invalid_endpoint(simulation_env):
@@ -352,7 +352,7 @@ async def test_mock_decorator_tool_mode(simulation_env):
352
352
  async def test_func() -> dict:
353
353
  return {"result": "real"}
354
354
 
355
- mock_response = "This is a mocked tool response"
355
+ mock_response = {"result": "mocked"}
356
356
 
357
357
  with patch("veris_ai.tool_mock.httpx.AsyncClient") as mock_client:
358
358
  mock_response_obj = Mock()
@@ -364,7 +364,7 @@ async def test_mock_decorator_tool_mode(simulation_env):
364
364
  )
365
365
 
366
366
  result = await test_func()
367
- assert result == {"content": [{"type": "text", "text": mock_response}]}
367
+ assert result == {"result": "mocked"}
368
368
 
369
369
 
370
370
  @pytest.mark.asyncio
@@ -412,7 +412,7 @@ async def test_mock_decorator_with_expects_response(simulation_env):
412
412
  result = await test_func()
413
413
  # Verify the request payload included expects_response
414
414
  first_call = mock_client.return_value.__aenter__.return_value.post.call_args
415
- assert first_call.kwargs["json"]["expects_response"] is True
415
+ assert first_call.kwargs["json"]["response_expectation"] == "required"
416
416
  assert result == {"result": {"mocked": True}}
417
417
 
418
418
 
@@ -465,7 +465,7 @@ async def test_mock_decorator_all_parameters(simulation_env):
465
465
  # Verify the request payload
466
466
  first_call = mock_client.return_value.__aenter__.return_value.post.call_args
467
467
  payload = first_call.kwargs["json"]
468
- assert payload["expects_response"] is False
468
+ assert payload["response_expectation"] == "none"
469
469
  assert payload["cache_response"] is True
470
470
  assert result == mock_response
471
471
 
@@ -492,7 +492,7 @@ async def test_mock_decorator_function_mode_defaults_expects_response(simulation
492
492
  await test_func()
493
493
  # Verify expects_response is False for function mode by default
494
494
  first_call = mock_client.return_value.__aenter__.return_value.post.call_args
495
- assert first_call.kwargs["json"]["expects_response"] is False
495
+ assert first_call.kwargs["json"]["response_expectation"] == "none"
496
496
 
497
497
 
498
498
  @pytest.mark.asyncio
@@ -504,7 +504,7 @@ async def test_mock_decorator_json_schema_in_payload(simulation_env):
504
504
  async def test_func() -> List[dict]:
505
505
  return [{"result": "real"}]
506
506
 
507
- mock_response = {"result": [{"mocked": True}]}
507
+ mock_response = [{"mocked": True}]
508
508
 
509
509
  with patch("veris_ai.tool_mock.httpx.AsyncClient") as mock_client:
510
510
  mock_response_obj = Mock()
@@ -114,17 +114,17 @@ class TestConvertToType:
114
114
  result = convert_to_type("42", str | int)
115
115
  assert result == "42" # String type is tried first and succeeds
116
116
  assert isinstance(result, str)
117
-
117
+
118
118
  # Test with int value - will be converted to string (first type in union)
119
119
  result = convert_to_type(42, str | int)
120
120
  assert result == "42" # Converted to string since str is first in union
121
121
  assert isinstance(result, str)
122
-
122
+
123
123
  # Test with int | str (reversed order) - int comes first
124
124
  result = convert_to_type("42", int | str)
125
125
  assert result == 42 # Converted to int since int is first
126
126
  assert isinstance(result, int)
127
-
127
+
128
128
  # Test with a value that only works as string
129
129
  result = convert_to_type("hello", str | int | float)
130
130
  assert result == "hello"
@@ -1,5 +1,5 @@
1
1
  version = 1
2
- revision = 3
2
+ revision = 2
3
3
  requires-python = ">=3.11"
4
4
  resolution-markers = [
5
5
  "python_full_version >= '3.13'",
@@ -1472,7 +1472,7 @@ wheels = [
1472
1472
 
1473
1473
  [[package]]
1474
1474
  name = "veris-ai"
1475
- version = "1.1.0"
1475
+ version = "1.2.0"
1476
1476
  source = { editable = "." }
1477
1477
  dependencies = [
1478
1478
  { name = "httpx" },
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes