voiceeval-sdk 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voiceeval_sdk-0.1.0/.github/workflows/publish.yml +30 -0
- voiceeval_sdk-0.1.0/.gitignore +33 -0
- voiceeval_sdk-0.1.0/CONTRIBUTING.md +49 -0
- voiceeval_sdk-0.1.0/LICENSE +21 -0
- voiceeval_sdk-0.1.0/PKG-INFO +140 -0
- voiceeval_sdk-0.1.0/README.md +91 -0
- voiceeval_sdk-0.1.0/examples/basic_eval.py +46 -0
- voiceeval_sdk-0.1.0/examples/realtime_monitor.py +8 -0
- voiceeval_sdk-0.1.0/pyproject.toml +45 -0
- voiceeval_sdk-0.1.0/server/main.py +70 -0
- voiceeval_sdk-0.1.0/src/voiceeval/__init__.py +5 -0
- voiceeval_sdk-0.1.0/src/voiceeval/audio/__init__.py +5 -0
- voiceeval_sdk-0.1.0/src/voiceeval/audio/ingestion.py +9 -0
- voiceeval_sdk-0.1.0/src/voiceeval/audio/transcription.py +9 -0
- voiceeval_sdk-0.1.0/src/voiceeval/audio/vad.py +11 -0
- voiceeval_sdk-0.1.0/src/voiceeval/client.py +87 -0
- voiceeval_sdk-0.1.0/src/voiceeval/metrics/__init__.py +14 -0
- voiceeval_sdk-0.1.0/src/voiceeval/metrics/base.py +20 -0
- voiceeval_sdk-0.1.0/src/voiceeval/metrics/conversation.py +19 -0
- voiceeval_sdk-0.1.0/src/voiceeval/metrics/performance.py +18 -0
- voiceeval_sdk-0.1.0/src/voiceeval/metrics/voice.py +18 -0
- voiceeval_sdk-0.1.0/src/voiceeval/models.py +41 -0
- voiceeval_sdk-0.1.0/src/voiceeval/observability/__init__.py +3 -0
- voiceeval_sdk-0.1.0/src/voiceeval/observability/instrumentation.py +53 -0
- voiceeval_sdk-0.1.0/src/voiceeval/runners/__init__.py +4 -0
- voiceeval_sdk-0.1.0/src/voiceeval/runners/offline.py +16 -0
- voiceeval_sdk-0.1.0/src/voiceeval/runners/simulator.py +6 -0
- voiceeval_sdk-0.1.0/tests/test_client.py +7 -0
- voiceeval_sdk-0.1.0/tests/test_otel_auth.py +52 -0
- voiceeval_sdk-0.1.0/uv.lock +963 -0
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
name: Publish to PyPI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [published]
|
|
6
|
+
workflow_dispatch:
|
|
7
|
+
|
|
8
|
+
jobs:
|
|
9
|
+
build-and-publish:
|
|
10
|
+
name: Build and Publish
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
environment: pypi
|
|
13
|
+
permissions:
|
|
14
|
+
id-token: write
|
|
15
|
+
contents: read
|
|
16
|
+
|
|
17
|
+
steps:
|
|
18
|
+
- name: Checkout source
|
|
19
|
+
uses: actions/checkout@v4
|
|
20
|
+
|
|
21
|
+
- name: Install uv
|
|
22
|
+
uses: astral-sh/setup-uv@v5
|
|
23
|
+
with:
|
|
24
|
+
version: "latest"
|
|
25
|
+
|
|
26
|
+
- name: Build package
|
|
27
|
+
run: uv build
|
|
28
|
+
|
|
29
|
+
- name: Publish to PyPI
|
|
30
|
+
run: uv publish
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.pyc
|
|
4
|
+
*.pyo
|
|
5
|
+
*.pyd
|
|
6
|
+
.Python
|
|
7
|
+
env/
|
|
8
|
+
venv/
|
|
9
|
+
.venv/
|
|
10
|
+
pip-log.txt
|
|
11
|
+
pip-delete-this-directory.txt
|
|
12
|
+
|
|
13
|
+
# Environments
|
|
14
|
+
.env
|
|
15
|
+
|
|
16
|
+
# Testing
|
|
17
|
+
.pytest_cache/
|
|
18
|
+
.coverage
|
|
19
|
+
htmlcov/
|
|
20
|
+
|
|
21
|
+
# Distribution
|
|
22
|
+
dist/
|
|
23
|
+
build/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
|
|
26
|
+
# MyPy
|
|
27
|
+
.mypy_cache/
|
|
28
|
+
.dmypy.json
|
|
29
|
+
dmypy.json
|
|
30
|
+
|
|
31
|
+
# IDE
|
|
32
|
+
.vscode/
|
|
33
|
+
.idea/
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# Contributing to VoiceEval SDK
|
|
2
|
+
|
|
3
|
+
We love your input! We want to make contributing to VoiceEval as easy and transparent as possible, whether it's:
|
|
4
|
+
|
|
5
|
+
- Reporting a bug
|
|
6
|
+
- Discussing the current state of the code
|
|
7
|
+
- Submitting a fix
|
|
8
|
+
- Proposing new features
|
|
9
|
+
- Becoming a maintainer
|
|
10
|
+
|
|
11
|
+
## Development Workflow
|
|
12
|
+
|
|
13
|
+
We use `uv` and `hatch` for dependency management and packaging.
|
|
14
|
+
|
|
15
|
+
1. **Clone the repo:**
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
git clone https://github.com/voiceeval/voiceeval-sdk.git
|
|
19
|
+
cd voiceeval-sdk
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
2. **Install dependencies:**
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
# Install dev dependencies (includes pytest, etc.)
|
|
26
|
+
uv sync
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
3. **Run Tests:**
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
uv run pytest
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
4. **Linting & Formatting:**
|
|
36
|
+
|
|
37
|
+
We recommend using `ruff` (if configured) or standard tools. Ensure your code is pythonic.
|
|
38
|
+
|
|
39
|
+
## Pull Requests
|
|
40
|
+
|
|
41
|
+
1. Fork the repo and create your branch from `main`.
|
|
42
|
+
2. If you've added code that should be tested, add tests.
|
|
43
|
+
3. If you've changed APIs, update the documentation.
|
|
44
|
+
4. Ensure the test suite passes.
|
|
45
|
+
5. Issue that PR!
|
|
46
|
+
|
|
47
|
+
## License
|
|
48
|
+
|
|
49
|
+
By contributing, you agree that your contributions will be licensed under its MIT License.
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 VoiceEval
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: voiceeval-sdk
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Enterprise-grade Observability and Evaluation SDK for Voice Agents
|
|
5
|
+
Project-URL: Repository, https://github.com/voiceeval/voiceeval-sdk
|
|
6
|
+
Project-URL: Homepage, https://voiceeval.com
|
|
7
|
+
Author-email: VoiceEval Team <hello@voiceeval.com>
|
|
8
|
+
License: MIT License
|
|
9
|
+
|
|
10
|
+
Copyright (c) 2025 VoiceEval
|
|
11
|
+
|
|
12
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
13
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
14
|
+
in the Software without restriction, including without limitation the rights
|
|
15
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
16
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
17
|
+
furnished to do so, subject to the following conditions:
|
|
18
|
+
|
|
19
|
+
The above copyright notice and this permission notice shall be included in all
|
|
20
|
+
copies or substantial portions of the Software.
|
|
21
|
+
|
|
22
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
23
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
24
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
25
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
26
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
27
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
28
|
+
SOFTWARE.
|
|
29
|
+
License-File: LICENSE
|
|
30
|
+
Classifier: Development Status :: 4 - Beta
|
|
31
|
+
Classifier: Intended Audience :: Developers
|
|
32
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
33
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
34
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
35
|
+
Requires-Python: >=3.11
|
|
36
|
+
Requires-Dist: fastapi>=0.127.1
|
|
37
|
+
Requires-Dist: httpx>=0.28.1
|
|
38
|
+
Requires-Dist: openai>=2.14.0
|
|
39
|
+
Requires-Dist: opentelemetry-api>=1.39.1
|
|
40
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.39.1
|
|
41
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.50.1
|
|
42
|
+
Requires-Dist: opentelemetry-instrumentation-google-generativeai>=0.50.1
|
|
43
|
+
Requires-Dist: opentelemetry-instrumentation-openai>=0.50.1
|
|
44
|
+
Requires-Dist: opentelemetry-sdk>=1.39.1
|
|
45
|
+
Requires-Dist: pydantic>=2.0.0
|
|
46
|
+
Requires-Dist: python-dotenv>=1.2.1
|
|
47
|
+
Requires-Dist: uvicorn>=0.40.0
|
|
48
|
+
Description-Content-Type: text/markdown
|
|
49
|
+
|
|
50
|
+
# VoiceEval SDK (Python)
|
|
51
|
+
|
|
52
|
+
[](https://www.python.org/)
|
|
53
|
+
[](LICENSE)
|
|
54
|
+
[](https://opentelemetry.io/)
|
|
55
|
+
|
|
56
|
+
**VoiceEval** is an enterprise-grade observability and evaluation SDK designed specifically for Voice Agents and LLM-powered applications. It provides detailed tracing, latency breakdown, and cost analysis with zero configuration.
|
|
57
|
+
|
|
58
|
+
## 🚀 Key Features
|
|
59
|
+
|
|
60
|
+
- **🔎 Zero-Config Auto-Instrumentation**: Automatically detects and traces calls from major LLM providers (OpenAI, Anthropic, Google Gemini) without any code changes.
|
|
61
|
+
- **🛡️ Secure Ingestion Proxy**: All traces are sent through a secure proxy (`server/`), separating your application logic from downstream observability backends (like Langfuse). This ensures you maintain full control over your data and API keys.
|
|
62
|
+
- **⚡ High Performance**: Built on top of `OpenTelemetry`, utilizing efficient asynchronous Batch exports (`OTLP/HTTP`) to ensure negligible runtime overhead.
|
|
63
|
+
- **🧩 Standardized Data Model**: Uses standard OTel semantic conventions, making your data portable and interoperable with any OTel-compatible backend.
|
|
64
|
+
|
|
65
|
+
## 📦 Installation
|
|
66
|
+
|
|
67
|
+
Install the SDK via `pip` (or `uv`):
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install voiceeval-sdk
|
|
71
|
+
# or
|
|
72
|
+
uv add voiceeval-sdk
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
For local development:
|
|
76
|
+
```bash
|
|
77
|
+
git clone https://github.com/voiceeval/voiceeval-sdk.git
|
|
78
|
+
cd voiceeval-sdk
|
|
79
|
+
pip install -e .
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
## 🏁 Quickstart
|
|
83
|
+
|
|
84
|
+
### 1. Initialize the Client
|
|
85
|
+
|
|
86
|
+
Initialize the `Client` at the start of your application. This single line sets up the OTel exporter and enables auto-instrumentation for all installed LLM libraries.
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
from voiceeval import Client
|
|
90
|
+
|
|
91
|
+
# Initialize SDK - connects to your local proxy or prod endpoint
|
|
92
|
+
client = Client(
|
|
93
|
+
api_key="your_voiceeval_api_key", # or set VOICE_EVAL_API_KEY env var
|
|
94
|
+
base_url="http://api.voiceeval.com/v1/traces"
|
|
95
|
+
)
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### 2. Run Your Agent
|
|
99
|
+
|
|
100
|
+
That's it! Any calls to supported libraries like `openai` or `anthropic` are now automatically traced.
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from openai import OpenAI
|
|
104
|
+
|
|
105
|
+
# No manual wrapping needed!
|
|
106
|
+
client_openai = OpenAI()
|
|
107
|
+
response = client_openai.chat.completions.create(
|
|
108
|
+
model="gpt-4o",
|
|
109
|
+
messages=[{"role": "user", "content": "Hello world"}]
|
|
110
|
+
)
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### 3. Manual Tracing (Optional)
|
|
114
|
+
|
|
115
|
+
For functions that don't call LLMs (like your business logic or RAG pipeline), use the `@observe` decorator:
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
from voiceeval import observe
|
|
119
|
+
|
|
120
|
+
@observe(name_override="rag_retrieval")
|
|
121
|
+
def retrieve_documents(query: str):
|
|
122
|
+
# Your complex logic here
|
|
123
|
+
return docs
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## 🔌 Supported Providers
|
|
127
|
+
|
|
128
|
+
The SDK automatically instruments the following libraries if they are found in your environment:
|
|
129
|
+
|
|
130
|
+
| Provider | Library | Status |
|
|
131
|
+
| :--- | :--- | :--- |
|
|
132
|
+
| **OpenAI** | `openai` | ✅ Auto-Instrumented |
|
|
133
|
+
| **Anthropic** | `anthropic` | ✅ Auto-Instrumented |
|
|
134
|
+
| **Google Gemini** | `google-generativeai` | ✅ Auto-Instrumented |
|
|
135
|
+
|
|
136
|
+
*Note: If a library is not installed, the SDK gracefully skips it.*
|
|
137
|
+
|
|
138
|
+
## 📄 License
|
|
139
|
+
|
|
140
|
+
MIT
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# VoiceEval SDK (Python)
|
|
2
|
+
|
|
3
|
+
[](https://www.python.org/)
|
|
4
|
+
[](LICENSE)
|
|
5
|
+
[](https://opentelemetry.io/)
|
|
6
|
+
|
|
7
|
+
**VoiceEval** is an enterprise-grade observability and evaluation SDK designed specifically for Voice Agents and LLM-powered applications. It provides detailed tracing, latency breakdown, and cost analysis with zero configuration.
|
|
8
|
+
|
|
9
|
+
## 🚀 Key Features
|
|
10
|
+
|
|
11
|
+
- **🔎 Zero-Config Auto-Instrumentation**: Automatically detects and traces calls from major LLM providers (OpenAI, Anthropic, Google Gemini) without any code changes.
|
|
12
|
+
- **🛡️ Secure Ingestion Proxy**: All traces are sent through a secure proxy (`server/`), separating your application logic from downstream observability backends (like Langfuse). This ensures you maintain full control over your data and API keys.
|
|
13
|
+
- **⚡ High Performance**: Built on top of `OpenTelemetry`, utilizing efficient asynchronous Batch exports (`OTLP/HTTP`) to ensure negligible runtime overhead.
|
|
14
|
+
- **🧩 Standardized Data Model**: Uses standard OTel semantic conventions, making your data portable and interoperable with any OTel-compatible backend.
|
|
15
|
+
|
|
16
|
+
## 📦 Installation
|
|
17
|
+
|
|
18
|
+
Install the SDK via `pip` (or `uv`):
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install voiceeval-sdk
|
|
22
|
+
# or
|
|
23
|
+
uv add voiceeval-sdk
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
For local development:
|
|
27
|
+
```bash
|
|
28
|
+
git clone https://github.com/voiceeval/voiceeval-sdk.git
|
|
29
|
+
cd voiceeval-sdk
|
|
30
|
+
pip install -e .
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## 🏁 Quickstart
|
|
34
|
+
|
|
35
|
+
### 1. Initialize the Client
|
|
36
|
+
|
|
37
|
+
Initialize the `Client` at the start of your application. This single line sets up the OTel exporter and enables auto-instrumentation for all installed LLM libraries.
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
from voiceeval import Client
|
|
41
|
+
|
|
42
|
+
# Initialize SDK - connects to your local proxy or prod endpoint
|
|
43
|
+
client = Client(
|
|
44
|
+
api_key="your_voiceeval_api_key", # or set VOICE_EVAL_API_KEY env var
|
|
45
|
+
base_url="http://api.voiceeval.com/v1/traces"
|
|
46
|
+
)
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### 2. Run Your Agent
|
|
50
|
+
|
|
51
|
+
That's it! Any calls to supported libraries like `openai` or `anthropic` are now automatically traced.
|
|
52
|
+
|
|
53
|
+
```python
|
|
54
|
+
from openai import OpenAI
|
|
55
|
+
|
|
56
|
+
# No manual wrapping needed!
|
|
57
|
+
client_openai = OpenAI()
|
|
58
|
+
response = client_openai.chat.completions.create(
|
|
59
|
+
model="gpt-4o",
|
|
60
|
+
messages=[{"role": "user", "content": "Hello world"}]
|
|
61
|
+
)
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
### 3. Manual Tracing (Optional)
|
|
65
|
+
|
|
66
|
+
For functions that don't call LLMs (like your business logic or RAG pipeline), use the `@observe` decorator:
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
from voiceeval import observe
|
|
70
|
+
|
|
71
|
+
@observe(name_override="rag_retrieval")
|
|
72
|
+
def retrieve_documents(query: str):
|
|
73
|
+
# Your complex logic here
|
|
74
|
+
return docs
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## 🔌 Supported Providers
|
|
78
|
+
|
|
79
|
+
The SDK automatically instruments the following libraries if they are found in your environment:
|
|
80
|
+
|
|
81
|
+
| Provider | Library | Status |
|
|
82
|
+
| :--- | :--- | :--- |
|
|
83
|
+
| **OpenAI** | `openai` | ✅ Auto-Instrumented |
|
|
84
|
+
| **Anthropic** | `anthropic` | ✅ Auto-Instrumented |
|
|
85
|
+
| **Google Gemini** | `google-generativeai` | ✅ Auto-Instrumented |
|
|
86
|
+
|
|
87
|
+
*Note: If a library is not installed, the SDK gracefully skips it.*
|
|
88
|
+
|
|
89
|
+
## 📄 License
|
|
90
|
+
|
|
91
|
+
MIT
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from voiceeval import Client, observe
|
|
2
|
+
import os
|
|
3
|
+
from openai import OpenAI
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
|
|
6
|
+
load_dotenv()
|
|
7
|
+
|
|
8
|
+
# OpenAI is auto-instrumented by Client()
|
|
9
|
+
|
|
10
|
+
def main():
|
|
11
|
+
# 1. Initialize Client pointing to local proxy
|
|
12
|
+
client = Client(
|
|
13
|
+
api_key="test_key",
|
|
14
|
+
base_url="http://localhost:8000/v1/traces"
|
|
15
|
+
)
|
|
16
|
+
print("Client initialized.")
|
|
17
|
+
|
|
18
|
+
# 2. Instrument a function calling OpenAI
|
|
19
|
+
@observe(name_override="voice_agent_transaction")
|
|
20
|
+
def run_agent_simulation(user_input):
|
|
21
|
+
print(f"Agent received: {user_input}")
|
|
22
|
+
|
|
23
|
+
client_openai = OpenAI()
|
|
24
|
+
completion = client_openai.chat.completions.create(
|
|
25
|
+
model="gpt-4o-mini",
|
|
26
|
+
messages=[
|
|
27
|
+
{"role": "system", "content": "You are a helpful voice assistant."},
|
|
28
|
+
{"role": "user", "content": user_input}
|
|
29
|
+
]
|
|
30
|
+
)
|
|
31
|
+
response = completion.choices[0].message.content
|
|
32
|
+
return response
|
|
33
|
+
|
|
34
|
+
# 3. Run
|
|
35
|
+
# Ensure OPENAI_API_KEY is set
|
|
36
|
+
if "OPENAI_API_KEY" not in os.environ:
|
|
37
|
+
print("Please set OPENAI_API_KEY environment variable.")
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
print("Running simulation with OpenAI...")
|
|
41
|
+
response = run_agent_simulation("What is the capital of France?")
|
|
42
|
+
print(f"Agent response: {response}")
|
|
43
|
+
print("Trace generated. Check server logs.")
|
|
44
|
+
|
|
45
|
+
if __name__ == "__main__":
|
|
46
|
+
main()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "voiceeval-sdk"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Enterprise-grade Observability and Evaluation SDK for Voice Agents"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.11"
|
|
7
|
+
license = { file = "LICENSE" }
|
|
8
|
+
authors = [
|
|
9
|
+
{ name = "VoiceEval Team", email = "hello@voiceeval.com" },
|
|
10
|
+
]
|
|
11
|
+
classifiers = [
|
|
12
|
+
"Development Status :: 4 - Beta",
|
|
13
|
+
"Intended Audience :: Developers",
|
|
14
|
+
"License :: OSI Approved :: MIT License",
|
|
15
|
+
"Programming Language :: Python :: 3.11",
|
|
16
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
17
|
+
]
|
|
18
|
+
urls.Repository = "https://github.com/voiceeval/voiceeval-sdk"
|
|
19
|
+
urls.Homepage = "https://voiceeval.com"
|
|
20
|
+
dependencies = [
|
|
21
|
+
"fastapi>=0.127.1",
|
|
22
|
+
"httpx>=0.28.1",
|
|
23
|
+
"openai>=2.14.0",
|
|
24
|
+
"opentelemetry-api>=1.39.1",
|
|
25
|
+
"opentelemetry-exporter-otlp>=1.39.1",
|
|
26
|
+
"opentelemetry-instrumentation-anthropic>=0.50.1",
|
|
27
|
+
"opentelemetry-instrumentation-google-generativeai>=0.50.1",
|
|
28
|
+
"opentelemetry-instrumentation-openai>=0.50.1",
|
|
29
|
+
"opentelemetry-sdk>=1.39.1",
|
|
30
|
+
"pydantic>=2.0.0",
|
|
31
|
+
"python-dotenv>=1.2.1",
|
|
32
|
+
"uvicorn>=0.40.0",
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
[build-system]
|
|
36
|
+
requires = ["hatchling"]
|
|
37
|
+
build-backend = "hatchling.build"
|
|
38
|
+
|
|
39
|
+
[tool.hatch.build.targets.wheel]
|
|
40
|
+
packages = ["src/voiceeval"]
|
|
41
|
+
|
|
42
|
+
[dependency-groups]
|
|
43
|
+
dev = [
|
|
44
|
+
"pytest>=9.0.2",
|
|
45
|
+
]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
import httpx
|
|
4
|
+
from fastapi import FastAPI, Request, HTTPException, Depends
|
|
5
|
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
|
6
|
+
|
|
7
|
+
app = FastAPI()
|
|
8
|
+
security = HTTPBearer()
|
|
9
|
+
|
|
10
|
+
# Mock Database
|
|
11
|
+
# In production, this would be a Postgres/Redis lookup
|
|
12
|
+
USER_DB = {
|
|
13
|
+
"test_key": {
|
|
14
|
+
"langfuse_public": "pk-lf-2753cba8-1965-4a02-ae14-84d119f5cb76",
|
|
15
|
+
"langfuse_secret": "sk-lf-b9488264-e94e-4d1f-8e40-4ed7b8ca5b4c",
|
|
16
|
+
"langfuse_host": "https://us.cloud.langfuse.com"
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async def get_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
|
21
|
+
token = credentials.credentials
|
|
22
|
+
if token not in USER_DB:
|
|
23
|
+
raise HTTPException(status_code=403, detail="Invalid API Key")
|
|
24
|
+
return token
|
|
25
|
+
|
|
26
|
+
@app.post("/v1/traces")
|
|
27
|
+
async def ingest_traces(request: Request, api_key: str = Depends(get_api_key)):
|
|
28
|
+
config = USER_DB[api_key]
|
|
29
|
+
lf_public = config["langfuse_public"]
|
|
30
|
+
lf_secret = config["langfuse_secret"]
|
|
31
|
+
lf_host = config["langfuse_host"]
|
|
32
|
+
|
|
33
|
+
body = await request.body()
|
|
34
|
+
|
|
35
|
+
auth_str = f"{lf_public}:{lf_secret}"
|
|
36
|
+
auth_b64 = base64.b64encode(auth_str.encode()).decode()
|
|
37
|
+
|
|
38
|
+
target_url = f"{lf_host}/api/public/otel/v1/traces"
|
|
39
|
+
|
|
40
|
+
headers = {
|
|
41
|
+
"Authorization": f"Basic {auth_b64}",
|
|
42
|
+
"Content-Type": request.headers.get("Content-Type", "application/x-protobuf")
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
46
|
+
try:
|
|
47
|
+
print(f"DEBUG: Forwarding to {target_url}")
|
|
48
|
+
print(f"DEBUG: Headers = {headers}")
|
|
49
|
+
print(f"DEBUG: Body size = {len(body)} bytes")
|
|
50
|
+
|
|
51
|
+
response = await client.post(target_url, content=body, headers=headers)
|
|
52
|
+
|
|
53
|
+
print(f"Langfuse Response Status: {response.status_code}")
|
|
54
|
+
print(f"Langfuse Response Headers: {response.headers}")
|
|
55
|
+
print(f"Langfuse Response Body: {response.text[:500] if response.text else '(empty)'}")
|
|
56
|
+
|
|
57
|
+
if response.status_code >= 400:
|
|
58
|
+
return {"status": "error", "upstream_status": response.status_code, "detail": response.text}
|
|
59
|
+
|
|
60
|
+
return {"status": "success", "upstream_status": response.status_code}
|
|
61
|
+
except httpx.HTTPError as e:
|
|
62
|
+
print(f"HTTP Error forwarding to Langfuse: {type(e).__name__}: {e}")
|
|
63
|
+
raise HTTPException(status_code=502, detail=f"Upstream HTTP error: {str(e)}")
|
|
64
|
+
except Exception as e:
|
|
65
|
+
print(f"Unexpected error forwarding to Langfuse: {type(e).__name__}: {e}")
|
|
66
|
+
raise HTTPException(status_code=502, detail=f"Upstream error: {str(e)}")
|
|
67
|
+
|
|
68
|
+
if __name__ == "__main__":
|
|
69
|
+
import uvicorn
|
|
70
|
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import List, Tuple
|
|
2
|
+
|
|
3
|
+
class VAD:
|
|
4
|
+
"""
|
|
5
|
+
Voice Activity Detection utilities.
|
|
6
|
+
"""
|
|
7
|
+
def detect_speech(self, audio: bytes) -> List[Tuple[float, float]]:
|
|
8
|
+
"""
|
|
9
|
+
Returns list of (start_time, end_time) tuples where speech is detected.
|
|
10
|
+
"""
|
|
11
|
+
return []
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from voiceeval.models import Call
|
|
5
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
6
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
7
|
+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
|
8
|
+
from opentelemetry import trace
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
class Client:
|
|
13
|
+
"""
|
|
14
|
+
Main entry point for the VoiceEval SDK.
|
|
15
|
+
"""
|
|
16
|
+
def __init__(self, api_key: Optional[str] = None, base_url: str = "https://api.voiceeval.com/v1/traces"):
|
|
17
|
+
self.api_key = api_key or os.environ.get("VOICE_EVAL_API_KEY")
|
|
18
|
+
if not self.api_key:
|
|
19
|
+
raise ValueError("API Key is required. Set VOICE_EVAL_API_KEY env var or pass in __init__.")
|
|
20
|
+
|
|
21
|
+
self.ingest_url = base_url
|
|
22
|
+
self.enable_observability()
|
|
23
|
+
|
|
24
|
+
def enable_observability(self):
|
|
25
|
+
"""Auto-configures OTel to send data to VoiceEval Proxy and instruments common libraries."""
|
|
26
|
+
provider = TracerProvider()
|
|
27
|
+
exporter = OTLPSpanExporter(
|
|
28
|
+
endpoint=self.ingest_url,
|
|
29
|
+
headers={"Authorization": f"Bearer {self.api_key}"}
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
provider.add_span_processor(BatchSpanProcessor(exporter))
|
|
33
|
+
trace.set_tracer_provider(provider)
|
|
34
|
+
|
|
35
|
+
# Auto-instrument common libraries
|
|
36
|
+
self._instrument_libraries()
|
|
37
|
+
|
|
38
|
+
def create_call(self, agent_id: str) -> Call:
|
|
39
|
+
"""
|
|
40
|
+
Initialize a tracking object for a new call.
|
|
41
|
+
"""
|
|
42
|
+
raise NotImplementedError("create_call is not implemented yet")
|
|
43
|
+
|
|
44
|
+
def log_call(self, call: Call) -> None:
|
|
45
|
+
"""
|
|
46
|
+
Log a completed call to the platform.
|
|
47
|
+
"""
|
|
48
|
+
tracer = trace.get_tracer("voiceeval.sdk")
|
|
49
|
+
with tracer.start_as_current_span("log_call") as span:
|
|
50
|
+
span.set_attribute("call.id", call.call_id)
|
|
51
|
+
span.set_attribute("agent.id", call.agent_id)
|
|
52
|
+
|
|
53
|
+
def _instrument_libraries(self):
|
|
54
|
+
"""
|
|
55
|
+
Auto-instrument all installed OTel instrumentation packages.
|
|
56
|
+
This uses the standard 'opentelemetry_instrumentor' entry point.
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
from importlib.metadata import entry_points
|
|
60
|
+
except ImportError:
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
logger.debug("Auto-instrumenting installed libraries...")
|
|
64
|
+
# Python 3.10+ supports filtering by group
|
|
65
|
+
eps = entry_points(group="opentelemetry_instrumentor")
|
|
66
|
+
|
|
67
|
+
for entry_point in eps:
|
|
68
|
+
try:
|
|
69
|
+
instrumentor = entry_point.load()()
|
|
70
|
+
if not instrumentor.is_instrumented_by_opentelemetry:
|
|
71
|
+
instrumentor.instrument()
|
|
72
|
+
logger.debug(f"Instrumented: {entry_point.name}")
|
|
73
|
+
except Exception as e:
|
|
74
|
+
# Silently fail (debug log only)
|
|
75
|
+
logger.debug(f"Failed to instrument {entry_point.name}: {e}")
|
|
76
|
+
|
|
77
|
+
if __name__ == "__main__":
|
|
78
|
+
# Configure logging to see output when running this script directly
|
|
79
|
+
# logging.basicConfig(level=logging.DEBUG)
|
|
80
|
+
|
|
81
|
+
# Client() automatically enables observability and instruments libraries
|
|
82
|
+
# Using a dummy key for testing initialization
|
|
83
|
+
try:
|
|
84
|
+
client = Client(api_key="test_key")
|
|
85
|
+
print("Client initialized and libraries instrumented.")
|
|
86
|
+
except Exception as e:
|
|
87
|
+
print(f"Initialization failed: {e}")
|