veris-ai 1.5.0__tar.gz → 1.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of veris-ai might be problematic. Click here for more details.

Files changed (38) hide show
  1. {veris_ai-1.5.0 → veris_ai-1.6.0}/.cursor/rules/documentation-management.mdc +9 -7
  2. veris_ai-1.6.0/.pre-commit-config.yaml +24 -0
  3. {veris_ai-1.5.0 → veris_ai-1.6.0}/CHANGELOG.md +0 -14
  4. {veris_ai-1.5.0 → veris_ai-1.6.0}/PKG-INFO +33 -18
  5. {veris_ai-1.5.0 → veris_ai-1.6.0}/README.md +23 -12
  6. {veris_ai-1.5.0 → veris_ai-1.6.0}/examples/README.md +1 -9
  7. {veris_ai-1.5.0 → veris_ai-1.6.0}/examples/import_options.py +1 -12
  8. {veris_ai-1.5.0 → veris_ai-1.6.0}/pyproject.toml +19 -6
  9. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/README.md +0 -7
  10. veris_ai-1.6.0/src/veris_ai/__init__.py +17 -0
  11. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/jaeger_interface/__init__.py +1 -1
  12. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/logging.py +43 -6
  13. veris_ai-1.6.0/src/veris_ai/observability.py +133 -0
  14. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/tool_mock.py +22 -2
  15. veris_ai-1.6.0/uv.lock +2880 -0
  16. veris_ai-1.5.0/src/veris_ai/__init__.py +0 -37
  17. veris_ai-1.5.0/src/veris_ai/braintrust_tracing.py +0 -282
  18. veris_ai-1.5.0/uv.lock +0 -1631
  19. {veris_ai-1.5.0 → veris_ai-1.6.0}/.github/workflows/release.yml +0 -0
  20. {veris_ai-1.5.0 → veris_ai-1.6.0}/.github/workflows/test.yml +0 -0
  21. {veris_ai-1.5.0 → veris_ai-1.6.0}/.gitignore +0 -0
  22. {veris_ai-1.5.0 → veris_ai-1.6.0}/CLAUDE.md +0 -0
  23. {veris_ai-1.5.0 → veris_ai-1.6.0}/LICENSE +0 -0
  24. {veris_ai-1.5.0 → veris_ai-1.6.0}/examples/__init__.py +0 -0
  25. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/jaeger_interface/README.md +0 -0
  26. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/jaeger_interface/client.py +0 -0
  27. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/jaeger_interface/models.py +0 -0
  28. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/models.py +0 -0
  29. {veris_ai-1.5.0 → veris_ai-1.6.0}/src/veris_ai/utils.py +0 -0
  30. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/README.md +0 -0
  31. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/__init__.py +0 -0
  32. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/conftest.py +0 -0
  33. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/fixtures/__init__.py +0 -0
  34. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/fixtures/http_server.py +0 -0
  35. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/fixtures/simple_app.py +0 -0
  36. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/test_mcp_protocol_server_mocked.py +0 -0
  37. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/test_tool_mock.py +0 -0
  38. {veris_ai-1.5.0 → veris_ai-1.6.0}/tests/test_utils.py +0 -0
@@ -4,11 +4,12 @@ alwaysApply: false
4
4
  ---
5
5
 
6
6
  # Documentation Handling Guidelines
7
+ Ensure READMEs function as both a human-friendly guide and an LLM semantic router, while designating source code as the sole source of truth. This document also defines LLM workflows that continuously evolve documentation in response to actual usage patterns.
7
8
 
8
9
  ### Dual‑Use Overview
9
10
 
10
11
  * **Humans**: READMEs must be narrative‑driven, intuitive, and structured for readability, with inline cross‑links to related READMEs or code modules for deeper context.
11
- * **LLM Agents**: Treat these same READMEs as semantic routers. Use linked references and tags within the document to locate the most relevant code, tests, and workflows — never treat the README as the ultimate logic source.
12
+ * **LLM Agents**: Treat these same READMEs as semantic routers and exploration waypoints. Use linked references and tags to dynamically navigate through the documentation web, following trails of dependency and discovering emergent patterns — never treat the README as the ultimate logic source.
12
13
  * **Shared Goal**: Documentation must actively fight codebase complexity. Instead of growing endlessly, it should simplify, subtract redundancy, and delegate details to lower‑level READMEs in the hierarchy.
13
14
 
14
15
  ---
@@ -33,6 +34,7 @@ Updates should simplify where possible: remove outdated or redundant content, an
33
34
  2. **Parallel instance memory**: Maintain awareness of session context across LLM instances to keep documentation aligned with ongoing workflows.
34
35
  3. **LLM as thought partner**: Propose not only wording edits but also simplifications and delegation opportunities — e.g., linking to an existing module README rather than duplicating logic.
35
36
  4. **Complexity management**: Treat every update as a chance to prune. The README should remain a high‑level, navigable entry point, not a catch‑all.
37
+ 5. **Recursive documentation weaving**: When creating or updating a README, treat it as the entry point to a documentation web. Draft the initial README, then follow every dependency thread—navigating to parent and child READMEs, creating missing pieces, and refining connections. Continue this cyclical exploration until the entire documentation network achieves seamless consistency, with each iteration strengthening the coherence of the whole system.
36
38
 
37
39
  ---
38
40
 
@@ -41,13 +43,14 @@ Updates should simplify where possible: remove outdated or redundant content, an
41
43
  Each meaningful workspace or module must include a `README.md` designed to operate like a **hub page** in an IDE‑backed website:
42
44
 
43
45
  * **Quick Reference**: Purpose, setup, usage, and high‑level architecture.
44
- * **Linked Context**: Cross‑links to deeper READMEs, design docs, or code directories.
45
- * **Semantic Anchors**: Inline cues (e.g., tags, headings, links) that help the LLM map concepts to code without requiring redundant prose.
46
+ * **Linked Context**: Cross‑links to deeper child READMEs and parent READMEs, design docs, or code directories.
47
+ * **Visual Aids**: Use visual aids (e.g. mermaid diagrams) to as needed to help explain the codebase to humans.
48
+
46
49
 
47
50
  > Example:
48
51
  > *“For transaction processing, see [Transactions README](./transactions/README.md). For error handling logic, see [Error Handling README](./errors/README.md).”*
49
52
 
50
- The human and LLM share the same document: humans follow the narrative, while the LLM uses references and anchors to navigate the codebase semantically.
53
+ The human and LLM share the same document: humans follow the narrative, while the LLM uses references in the LLM to navigate the codebase semantically.
51
54
 
52
55
  ---
53
56
 
@@ -57,12 +60,11 @@ The human and LLM share the same document: humans follow the narrative, while th
57
60
 
58
61
  * Detect README drift by comparing live code to described behavior.
59
62
  * Perform updates with an emphasis on pruning duplication and linking to existing READMEs.
60
- * Use end‑of‑session summaries to suggest or implement simplifications.
61
63
  * Ensure docs remain aligned with code without ballooning in size.
62
64
 
63
65
  #### Human Responsibilities
64
66
 
65
- * Review LLM‑driven updates for clarity, accuracy, and usability.
67
+ * Diligently read through *ALL* LLM‑driven documentation updates for clarity, accuracy, and usability.
66
68
  * Refactor prose when needed to keep explanations intuitive.
67
69
  * Validate that cross‑links resolve correctly and are helpful for navigation.
68
70
 
@@ -77,4 +79,4 @@ The human and LLM share the same document: humans follow the narrative, while th
77
79
  | **Simplicity over sprawl** | Fight complexity by pruning, delegating, and cross‑linking. |
78
80
  | **One README, two roles** | The same README serves both humans and LLMs through cross‑referencing. |
79
81
  | **Real‑world grounding** | Updates reflect actual changes in workflows and architecture. |
80
- | **Human validation** | LLM edits require human review to ensure usability and accuracy. |
82
+ | **Human validation** | LLM documentation edits require thorough human driver review to ensure usability and accuracy. |
@@ -0,0 +1,24 @@
1
+ repos:
2
+ - repo: local
3
+ hooks:
4
+ - id: ruff-check
5
+ name: Ruff lint
6
+ entry: uv run ruff check .
7
+ language: system
8
+ pass_filenames: false
9
+ types: [python]
10
+
11
+ - id: ruff-format-check
12
+ name: Ruff format (check)
13
+ entry: uv run ruff format --check .
14
+ language: system
15
+ pass_filenames: false
16
+ types: [python]
17
+
18
+ - id: mypy
19
+ name: Mypy type check
20
+ entry: uv run mypy src/veris_ai tests
21
+ language: system
22
+ pass_filenames: false
23
+ types: [python]
24
+
@@ -5,20 +5,6 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
- ## [Unreleased]
9
-
10
- ### Added
11
- - Session ID tracking in OTEL traces: The `veris.session_id` attribute is now automatically added to all spans when a session ID is present
12
- - Session IDs from FastAPI MCP bearer tokens are automatically embedded in traces
13
- - Manual session management via `veris.set_session_id()` and `veris.clear_session_id()`
14
- - Enables filtering and correlation of traces by user session in Jaeger
15
- - New test suite for session ID tracing functionality
16
-
17
- ### Changed
18
- - `AgentsOTELBridgeProcessor` now imports and uses `_session_id_context` from `tool_mock`
19
- - Trace and span creation now includes session ID attributes when available
20
-
21
-
22
8
  ## v0.2.1 (2025-04-18)
23
9
 
24
10
  ### Bug Fixes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: veris-ai
3
- Version: 1.5.0
3
+ Version: 1.6.0
4
4
  Summary: A Python package for Veris AI tools
5
5
  Project-URL: Homepage, https://github.com/veris-ai/veris-python-sdk
6
6
  Project-URL: Bug Tracker, https://github.com/veris-ai/veris-python-sdk/issues
@@ -9,8 +9,17 @@ License-Expression: MIT
9
9
  License-File: LICENSE
10
10
  Requires-Python: >=3.11
11
11
  Requires-Dist: httpx>=0.24.0
12
+ Requires-Dist: opentelemetry-api>=1.34.1
13
+ Requires-Dist: opentelemetry-exporter-otlp>=1.34.1
14
+ Requires-Dist: opentelemetry-instrumentation-fastapi>=0.55b1
15
+ Requires-Dist: opentelemetry-instrumentation-httpx>=0.55b1
16
+ Requires-Dist: opentelemetry-instrumentation-mcp>=0.44.1
17
+ Requires-Dist: opentelemetry-instrumentation-requests>=0.55b1
18
+ Requires-Dist: opentelemetry-instrumentation>=0.55b1
19
+ Requires-Dist: opentelemetry-sdk>=1.34.1
12
20
  Requires-Dist: pydantic>=2.0.0
13
21
  Requires-Dist: requests>=2.31.0
22
+ Requires-Dist: traceloop-sdk>=0.45.4
14
23
  Provides-Extra: dev
15
24
  Requires-Dist: black>=23.7.0; extra == 'dev'
16
25
  Requires-Dist: mypy>=1.5.1; extra == 'dev'
@@ -24,12 +33,7 @@ Provides-Extra: fastapi
24
33
  Requires-Dist: fastapi; extra == 'fastapi'
25
34
  Requires-Dist: fastapi-mcp>=0.4.0; extra == 'fastapi'
26
35
  Provides-Extra: instrument
27
- Requires-Dist: braintrust; extra == 'instrument'
28
36
  Requires-Dist: opentelemetry-api; extra == 'instrument'
29
- Requires-Dist: opentelemetry-exporter-otlp; extra == 'instrument'
30
- Requires-Dist: opentelemetry-exporter-otlp-proto-common; extra == 'instrument'
31
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc; extra == 'instrument'
32
- Requires-Dist: opentelemetry-exporter-otlp-proto-http; extra == 'instrument'
33
37
  Requires-Dist: opentelemetry-sdk; extra == 'instrument'
34
38
  Requires-Dist: wrapt; extra == 'instrument'
35
39
  Description-Content-Type: text/markdown
@@ -41,7 +45,7 @@ A Python package for Veris AI tools with simulation capabilities and FastAPI MCP
41
45
  ## Quick Reference
42
46
 
43
47
  **Purpose**: Tool mocking, tracing, and FastAPI MCP integration for AI agent development
44
- **Core Components**: [`tool_mock`](#function-mocking) • [`jaeger_interface`](#jaeger-trace-interface) • [`braintrust_tracing`](#tracing-integration) • [`fastapi_mcp`](#fastapi-mcp-integration)
48
+ **Core Components**: [`tool_mock`](#function-mocking) • [`observability`](#sdk-observability-helpers) • [`fastapi_mcp`](#fastapi-mcp-integration) • [`jaeger_interface`](#jaeger-trace-interface)
45
49
  **Deep Dive**: [`Module Architecture`](src/veris_ai/README.md) • [`Testing Guide`](tests/README.md) • [`Usage Examples`](examples/README.md)
46
50
  **Source of Truth**: Implementation details in [`src/veris_ai/`](src/veris_ai/) source code
47
51
 
@@ -58,7 +62,7 @@ uv add "veris-ai[dev,fastapi,instrument]"
58
62
  **Installation Profiles**:
59
63
  - `dev`: Development tools (ruff, pytest, mypy)
60
64
  - `fastapi`: FastAPI MCP integration
61
- - `instrument`: Braintrust/OpenTelemetry tracing
65
+ - `observability`: OpenTelemetry tracing
62
66
 
63
67
  ## Import Patterns
64
68
 
@@ -69,7 +73,7 @@ uv add "veris-ai[dev,fastapi,instrument]"
69
73
  from veris_ai import veris, JaegerClient
70
74
 
71
75
  # Optional features (require extras)
72
- from veris_ai import braintrust_tracing # Requires [instrument]
76
+ from veris_ai import init_observability, instrument_fastapi_app # Provided by SDK observability helpers
73
77
  ```
74
78
 
75
79
  **Complete Import Strategies**: See [`examples/README.md`](examples/README.md) for five different import approaches, conditional features, and integration patterns.
@@ -88,22 +92,33 @@ from veris_ai import braintrust_tracing # Requires [instrument]
88
92
 
89
93
  **Configuration Details**: See [`src/veris_ai/tool_mock.py`](src/veris_ai/tool_mock.py) for environment handling logic.
90
94
 
91
- ## Tracing Integration
92
95
 
93
- **Semantic Tag**: `distributed-tracing`
96
+ ### SDK Observability Helpers
94
97
 
95
- Parallel tracing to Braintrust and Jaeger/OpenTelemetry for monitoring and evaluation.
98
+ The SDK provides optional-safe observability helpers that standardize OpenTelemetry setup and W3C context propagation across services.
96
99
 
97
100
  ```python
98
- from veris_ai import braintrust_tracing
101
+ from fastapi import FastAPI
102
+ from veris_ai import init_observability, instrument_fastapi_app
103
+
104
+ # Initialize tracing/export early (no-op if dependencies are absent)
105
+ init_observability(service_name="my-customer-service")
106
+
107
+ app = FastAPI()
99
108
 
100
- # Enable dual tracing
101
- braintrust_tracing.instrument(service_name="my-service", otlp_endpoint="http://localhost:4317")
109
+ # Ensure inbound HTTP requests continue W3C traces
110
+ instrument_fastapi_app(app)
102
111
  ```
103
112
 
104
- **Session Management**: Automatic session ID extraction from bearer tokens. Manual session control via `veris.set_session_id()` and `veris.clear_session_id()`.
113
+ What this enables:
114
+ - Sets global W3C propagator (TraceContext + Baggage)
115
+ - Optionally instruments FastAPI, requests, httpx, MCP client if installed
116
+ - Includes request hooks to attach outbound `traceparent` on HTTP calls for continuity
105
117
 
106
- **Implementation Details**: See [`src/veris_ai/braintrust_tracing.py`](src/veris_ai/braintrust_tracing.py) for instrumentation logic.
118
+ End-to-end propagation with the simulator:
119
+ - The simulator injects W3C headers when connecting to your FastAPI MCP endpoints
120
+ - The SDK injects W3C headers on `/api/v2/tool_mock` and logging requests back to the simulator
121
+ - Result: customer agent spans and tool mocks appear under the same distributed trace
107
122
 
108
123
  ## Function Mocking
109
124
 
@@ -207,7 +222,7 @@ pytest --cov=veris_ai # Test with coverage
207
222
 
208
223
  **Semantic Tag**: `module-architecture`
209
224
 
210
- **Core Modules**: `tool_mock` (mocking), `jaeger_interface` (trace queries), `braintrust_tracing` (dual tracing), `utils` (schema conversion)
225
+ **Core Modules**: `tool_mock` (mocking), `jaeger_interface` (trace queries), `utils` (schema conversion)
211
226
 
212
227
  **Complete Architecture**: See [`src/veris_ai/README.md`](src/veris_ai/README.md) for module overview, implementation flows, and configuration details.
213
228
 
@@ -5,7 +5,7 @@ A Python package for Veris AI tools with simulation capabilities and FastAPI MCP
5
5
  ## Quick Reference
6
6
 
7
7
  **Purpose**: Tool mocking, tracing, and FastAPI MCP integration for AI agent development
8
- **Core Components**: [`tool_mock`](#function-mocking) • [`jaeger_interface`](#jaeger-trace-interface) • [`braintrust_tracing`](#tracing-integration) • [`fastapi_mcp`](#fastapi-mcp-integration)
8
+ **Core Components**: [`tool_mock`](#function-mocking) • [`observability`](#sdk-observability-helpers) • [`fastapi_mcp`](#fastapi-mcp-integration) • [`jaeger_interface`](#jaeger-trace-interface)
9
9
  **Deep Dive**: [`Module Architecture`](src/veris_ai/README.md) • [`Testing Guide`](tests/README.md) • [`Usage Examples`](examples/README.md)
10
10
  **Source of Truth**: Implementation details in [`src/veris_ai/`](src/veris_ai/) source code
11
11
 
@@ -22,7 +22,7 @@ uv add "veris-ai[dev,fastapi,instrument]"
22
22
  **Installation Profiles**:
23
23
  - `dev`: Development tools (ruff, pytest, mypy)
24
24
  - `fastapi`: FastAPI MCP integration
25
- - `instrument`: Braintrust/OpenTelemetry tracing
25
+ - `observability`: OpenTelemetry tracing
26
26
 
27
27
  ## Import Patterns
28
28
 
@@ -33,7 +33,7 @@ uv add "veris-ai[dev,fastapi,instrument]"
33
33
  from veris_ai import veris, JaegerClient
34
34
 
35
35
  # Optional features (require extras)
36
- from veris_ai import braintrust_tracing # Requires [instrument]
36
+ from veris_ai import init_observability, instrument_fastapi_app # Provided by SDK observability helpers
37
37
  ```
38
38
 
39
39
  **Complete Import Strategies**: See [`examples/README.md`](examples/README.md) for five different import approaches, conditional features, and integration patterns.
@@ -52,22 +52,33 @@ from veris_ai import braintrust_tracing # Requires [instrument]
52
52
 
53
53
  **Configuration Details**: See [`src/veris_ai/tool_mock.py`](src/veris_ai/tool_mock.py) for environment handling logic.
54
54
 
55
- ## Tracing Integration
56
55
 
57
- **Semantic Tag**: `distributed-tracing`
56
+ ### SDK Observability Helpers
58
57
 
59
- Parallel tracing to Braintrust and Jaeger/OpenTelemetry for monitoring and evaluation.
58
+ The SDK provides optional-safe observability helpers that standardize OpenTelemetry setup and W3C context propagation across services.
60
59
 
61
60
  ```python
62
- from veris_ai import braintrust_tracing
61
+ from fastapi import FastAPI
62
+ from veris_ai import init_observability, instrument_fastapi_app
63
+
64
+ # Initialize tracing/export early (no-op if dependencies are absent)
65
+ init_observability(service_name="my-customer-service")
66
+
67
+ app = FastAPI()
63
68
 
64
- # Enable dual tracing
65
- braintrust_tracing.instrument(service_name="my-service", otlp_endpoint="http://localhost:4317")
69
+ # Ensure inbound HTTP requests continue W3C traces
70
+ instrument_fastapi_app(app)
66
71
  ```
67
72
 
68
- **Session Management**: Automatic session ID extraction from bearer tokens. Manual session control via `veris.set_session_id()` and `veris.clear_session_id()`.
73
+ What this enables:
74
+ - Sets global W3C propagator (TraceContext + Baggage)
75
+ - Optionally instruments FastAPI, requests, httpx, MCP client if installed
76
+ - Includes request hooks to attach outbound `traceparent` on HTTP calls for continuity
69
77
 
70
- **Implementation Details**: See [`src/veris_ai/braintrust_tracing.py`](src/veris_ai/braintrust_tracing.py) for instrumentation logic.
78
+ End-to-end propagation with the simulator:
79
+ - The simulator injects W3C headers when connecting to your FastAPI MCP endpoints
80
+ - The SDK injects W3C headers on `/api/v2/tool_mock` and logging requests back to the simulator
81
+ - Result: customer agent spans and tool mocks appear under the same distributed trace
71
82
 
72
83
  ## Function Mocking
73
84
 
@@ -171,7 +182,7 @@ pytest --cov=veris_ai # Test with coverage
171
182
 
172
183
  **Semantic Tag**: `module-architecture`
173
184
 
174
- **Core Modules**: `tool_mock` (mocking), `jaeger_interface` (trace queries), `braintrust_tracing` (dual tracing), `utils` (schema conversion)
185
+ **Core Modules**: `tool_mock` (mocking), `jaeger_interface` (trace queries), `utils` (schema conversion)
175
186
 
176
187
  **Complete Architecture**: See [`src/veris_ai/README.md`](src/veris_ai/README.md) for module overview, implementation flows, and configuration details.
177
188
 
@@ -47,15 +47,7 @@ from veris_ai.tool_mock import veris as mock_tool
47
47
  **Use Case**: Maximum control over imported dependencies
48
48
  **Benefits**: Explicit imports, reduced memory footprint
49
49
 
50
- ### 4. Conditional Environment-Based Imports
51
- ```python
52
- if os.getenv("ENABLE_TRACING") == "true":
53
- from veris_ai.braintrust_tracing import instrument
54
- ```
55
- **Use Case**: Feature flags, environment-specific behavior
56
- **Pattern**: Runtime import decisions based on configuration
57
-
58
- ### 5. FastAPI Integration with HTTP Transport
50
+ ### 4. FastAPI Integration with HTTP Transport
59
51
  ```python
60
52
  if os.getenv("USE_FASTAPI") == "true":
61
53
  from fastapi import FastAPI
@@ -50,18 +50,7 @@ def health_check() -> dict[str, str]:
50
50
  return {"status": "healthy", "timestamp": "2024-01-01"}
51
51
 
52
52
 
53
- # Option 4: Conditional imports based on environment
54
-
55
- if os.getenv("ENABLE_TRACING") == "true":
56
- try:
57
- from veris_ai.braintrust_tracing import instrument
58
-
59
- instrument()
60
- except ImportError:
61
- print("Tracing requested but dependencies not installed")
62
-
63
-
64
- # Option 5: FastAPI integration (requires [fastapi] extra)
53
+ # Option 4: FastAPI integration (requires [fastapi] extra)
65
54
  if os.getenv("USE_FASTAPI") == "true":
66
55
  try:
67
56
  from fastapi import FastAPI
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "veris-ai"
7
- version = "1.5.0"
7
+ version = "1.6.0"
8
8
  description = "A Python package for Veris AI tools"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.11"
@@ -16,6 +16,15 @@ dependencies = [
16
16
  "httpx>=0.24.0",
17
17
  "pydantic>=2.0.0",
18
18
  "requests>=2.31.0",
19
+ "opentelemetry-sdk>=1.34.1",
20
+ "opentelemetry-exporter-otlp>=1.34.1",
21
+ "opentelemetry-instrumentation>=0.55b1",
22
+ "opentelemetry-instrumentation-fastapi>=0.55b1",
23
+ "opentelemetry-instrumentation-requests>=0.55b1",
24
+ "opentelemetry-instrumentation-httpx>=0.55b1",
25
+ "opentelemetry-instrumentation-mcp>=0.44.1",
26
+ "traceloop-sdk>=0.45.4",
27
+ "opentelemetry-api>=1.34.1",
19
28
  ]
20
29
 
21
30
  [project.optional-dependencies]
@@ -34,14 +43,9 @@ fastapi = [
34
43
  "fastapi-mcp>=0.4.0",
35
44
  ]
36
45
  instrument = [
37
- "braintrust",
38
46
  "wrapt",
39
47
  "opentelemetry-api",
40
48
  "opentelemetry-sdk",
41
- "opentelemetry-exporter-otlp",
42
- "opentelemetry-exporter-otlp-proto-grpc",
43
- "opentelemetry-exporter-otlp-proto-http",
44
- "opentelemetry-exporter-otlp-proto-common"
45
49
  ]
46
50
 
47
51
  [project.urls]
@@ -103,6 +107,13 @@ ignore = [
103
107
  "ANN204", # Missing type annotation for *args
104
108
  "ANN205", # Missing type annotation for **kwargs
105
109
  "COM812", # Trailing comma conflicts with formatter
110
+ "PLC0415", # allow non-top-level imports
111
+ "I001", # ignore unsorted/unformatted import block
112
+ "EM101", # ignore exception must not use a string literal
113
+ "D202", # ignore No blank lines allowed after function docstring
114
+ "C901", # ignore Complexity is too high
115
+ "TC002", # ignore move X into type checking
116
+ "PLR0915", # ignore Too many statements
106
117
  ]
107
118
 
108
119
  [tool.ruff.lint.isort]
@@ -156,6 +167,8 @@ veris-ai = { workspace = true }
156
167
 
157
168
  [dependency-groups]
158
169
  dev = [
170
+ "fastapi>=0.116.1",
171
+ "fastapi-mcp>=0.4.0",
159
172
  "mypy>=1.17.0",
160
173
  "openai-agents>=0.0.1",
161
174
  "pytest>=8.4.1",
@@ -15,7 +15,6 @@ This module contains the core implementation of the Veris AI Python SDK. Each co
15
15
  | Module | Purpose | Key Classes/Functions | Lines |
16
16
  |--------|---------|----------------------|-------|
17
17
  | [`tool_mock.py`](tool_mock.py) | Function mocking & FastAPI MCP | `VerisSDK`, `@mock`, `@stub` | 327 |
18
- | [`braintrust_tracing.py`](braintrust_tracing.py) | Dual tracing instrumentation | `instrument()` | 283 |
19
18
  | [`utils.py`](utils.py) | Type utilities & JSON schema | `extract_json_schema()` | 272 |
20
19
  | [`logging.py`](logging.py) | Logging configuration | `setup_logging()` | 116 |
21
20
  | [`models.py`](models.py) | Data models | Type definitions | 12 |
@@ -40,12 +39,6 @@ This module contains the core implementation of the Veris AI Python SDK. Each co
40
39
 
41
40
  **Implementation**: [`tool_mock.py:250-300`](tool_mock.py)
42
41
 
43
- ### Tracing Flow
44
- 1. **Dual Setup**: Braintrust + OpenTelemetry instrumentation
45
- 2. **Session Tagging**: Bearer tokens → session IDs
46
- 3. **Span Attribution**: All operations tagged with `veris.session_id`
47
-
48
- **Implementation**: [`braintrust_tracing.py:50-150`](braintrust_tracing.py)
49
42
 
50
43
  ## Configuration
51
44
 
@@ -0,0 +1,17 @@
1
+ """Veris AI Python SDK."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ # Import lightweight modules that only use base dependencies
6
+ from .jaeger_interface import JaegerClient
7
+ from .models import ResponseExpectation
8
+ from .tool_mock import veris
9
+ from .observability import init_observability, instrument_fastapi_app
10
+
11
+ __all__ = [
12
+ "veris",
13
+ "JaegerClient",
14
+ "ResponseExpectation",
15
+ "init_observability",
16
+ "instrument_fastapi_app",
17
+ ]
@@ -34,6 +34,6 @@ are fully typed using *pydantic* models so that IDEs can provide proper
34
34
  autocomplete and type checking.
35
35
  """
36
36
 
37
- from .client import JaegerClient as JaegerClient # noqa: F401
37
+ from .client import JaegerClient
38
38
 
39
39
  __all__ = ["JaegerClient"]
@@ -21,6 +21,7 @@ async def log_tool_call_async(
21
21
  if not base_url:
22
22
  logger.warning("VERIS_ENDPOINT_URL not set, skipping tool call logging")
23
23
  return
24
+ base_url = base_url.rstrip("/")
24
25
 
25
26
  endpoint = f"{base_url}/api/v2/simulations/{session_id}/log_tool_call"
26
27
  payload = {
@@ -32,8 +33,17 @@ async def log_tool_call_async(
32
33
  timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
33
34
 
34
35
  try:
36
+ headers: dict[str, str] | None = None
37
+ try:
38
+ from opentelemetry.propagate import get_global_textmap
39
+
40
+ headers = {}
41
+ get_global_textmap().inject(headers)
42
+ except Exception: # pragma: no cover - otel optional
43
+ headers = None
44
+
35
45
  async with httpx.AsyncClient(timeout=timeout) as client:
36
- response = await client.post(endpoint, json=payload)
46
+ response = await client.post(endpoint, json=payload, headers=headers)
37
47
  response.raise_for_status()
38
48
  logger.debug(f"Tool call logged for {function_name}")
39
49
  except Exception as e:
@@ -62,15 +72,24 @@ def log_tool_call_sync(
62
72
  timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
63
73
 
64
74
  try:
75
+ headers: dict[str, str] | None = None
76
+ try:
77
+ from opentelemetry.propagate import get_global_textmap # type: ignore[import-not-found]
78
+
79
+ headers = {}
80
+ get_global_textmap().inject(headers)
81
+ except Exception: # pragma: no cover - otel optional
82
+ headers = None
83
+
65
84
  with httpx.Client(timeout=timeout) as client:
66
- response = client.post(endpoint, json=payload)
85
+ response = client.post(endpoint, json=payload, headers=headers)
67
86
  response.raise_for_status()
68
87
  logger.debug(f"Tool call logged for {function_name}")
69
88
  except Exception as e:
70
89
  logger.warning(f"Failed to log tool call for {function_name}: {e}")
71
90
 
72
91
 
73
- async def log_tool_response_async(session_id: str, response: object) -> None:
92
+ async def log_tool_response_async(session_id: str, response: Any) -> None: # noqa: ANN401
74
93
  """Log tool response asynchronously to the VERIS logging endpoint."""
75
94
  base_url = os.getenv("VERIS_ENDPOINT_URL")
76
95
  if not base_url:
@@ -85,15 +104,24 @@ async def log_tool_response_async(session_id: str, response: object) -> None:
85
104
  timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
86
105
 
87
106
  try:
107
+ headers: dict[str, str] | None = None
108
+ try:
109
+ from opentelemetry.propagate import get_global_textmap # type: ignore[import-not-found]
110
+
111
+ headers = {}
112
+ get_global_textmap().inject(headers)
113
+ except Exception: # pragma: no cover - otel optional
114
+ headers = None
115
+
88
116
  async with httpx.AsyncClient(timeout=timeout) as client:
89
- http_response = await client.post(endpoint, json=payload)
117
+ http_response = await client.post(endpoint, json=payload, headers=headers)
90
118
  http_response.raise_for_status()
91
119
  logger.debug("Tool response logged")
92
120
  except Exception as e:
93
121
  logger.warning(f"Failed to log tool response: {e}")
94
122
 
95
123
 
96
- def log_tool_response_sync(session_id: str, response: object) -> None:
124
+ def log_tool_response_sync(session_id: str, response: Any) -> None: # noqa: ANN401
97
125
  """Log tool response synchronously to the VERIS logging endpoint."""
98
126
  base_url = os.getenv("VERIS_ENDPOINT_URL")
99
127
  if not base_url:
@@ -108,8 +136,17 @@ def log_tool_response_sync(session_id: str, response: object) -> None:
108
136
  timeout = float(os.getenv("VERIS_MOCK_TIMEOUT", "90.0"))
109
137
 
110
138
  try:
139
+ headers: dict[str, str] | None = None
140
+ try:
141
+ from opentelemetry.propagate import get_global_textmap # type: ignore[import-not-found]
142
+
143
+ headers = {}
144
+ get_global_textmap().inject(headers)
145
+ except Exception: # pragma: no cover - otel optional
146
+ headers = None
147
+
111
148
  with httpx.Client(timeout=timeout) as client:
112
- http_response = client.post(endpoint, json=payload)
149
+ http_response = client.post(endpoint, json=payload, headers=headers)
113
150
  http_response.raise_for_status()
114
151
  logger.debug("Tool response logged")
115
152
  except Exception as e: