finchvox 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- finchvox/__init__.py +0 -0
- finchvox/__main__.py +81 -0
- finchvox/audio_recorder.py +278 -0
- finchvox/audio_utils.py +123 -0
- finchvox/cli.py +127 -0
- finchvox/collector/__init__.py +0 -0
- finchvox/collector/__main__.py +22 -0
- finchvox/collector/audio_handler.py +146 -0
- finchvox/collector/collector_routes.py +186 -0
- finchvox/collector/config.py +64 -0
- finchvox/collector/server.py +126 -0
- finchvox/collector/service.py +43 -0
- finchvox/collector/writer.py +86 -0
- finchvox/server.py +201 -0
- finchvox/trace.py +115 -0
- finchvox/ui/css/app.css +774 -0
- finchvox/ui/images/favicon.ico +0 -0
- finchvox/ui/images/finchvox-logo.png +0 -0
- finchvox/ui/js/time-utils.js +97 -0
- finchvox/ui/js/trace_detail.js +1228 -0
- finchvox/ui/js/traces_list.js +26 -0
- finchvox/ui/lib/alpine.min.js +5 -0
- finchvox/ui/lib/wavesurfer.min.js +1 -0
- finchvox/ui/trace_detail.html +313 -0
- finchvox/ui/traces_list.html +63 -0
- finchvox/ui_routes.py +362 -0
- finchvox-0.0.1.dist-info/METADATA +189 -0
- finchvox-0.0.1.dist-info/RECORD +31 -0
- finchvox-0.0.1.dist-info/WHEEL +4 -0
- finchvox-0.0.1.dist-info/entry_points.txt +2 -0
- finchvox-0.0.1.dist-info/licenses/LICENSE +24 -0
finchvox/server.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unified FinchVox server combining collector and UI functionality.
|
|
3
|
+
|
|
4
|
+
This module provides a single server that handles both data collection
|
|
5
|
+
(audio, logs, exceptions, OTLP traces) and the web UI for viewing traces.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import signal
|
|
10
|
+
import grpc
|
|
11
|
+
import uvicorn
|
|
12
|
+
from concurrent import futures
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from fastapi import FastAPI
|
|
15
|
+
from loguru import logger
|
|
16
|
+
from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import (
|
|
17
|
+
add_TraceServiceServicer_to_server
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from finchvox.collector.service import TraceCollectorServicer
|
|
21
|
+
from finchvox.collector.writer import SpanWriter
|
|
22
|
+
from finchvox.collector.audio_handler import AudioHandler
|
|
23
|
+
from finchvox.collector.collector_routes import register_collector_routes
|
|
24
|
+
from finchvox.ui_routes import register_ui_routes
|
|
25
|
+
from finchvox.collector.config import (
|
|
26
|
+
GRPC_PORT,
|
|
27
|
+
MAX_WORKERS,
|
|
28
|
+
get_default_data_dir,
|
|
29
|
+
get_traces_base_dir
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class UnifiedServer:
|
|
34
|
+
"""
|
|
35
|
+
Unified server managing both gRPC (OTLP traces) and HTTP (collector + UI).
|
|
36
|
+
|
|
37
|
+
This server provides:
|
|
38
|
+
- gRPC endpoint for OpenTelemetry trace collection (default port 4317)
|
|
39
|
+
- HTTP endpoints for audio/logs/exceptions collection (under /collector prefix)
|
|
40
|
+
- Web UI and REST API for viewing traces (at root /)
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self, port: int = 3000, grpc_port: int = GRPC_PORT, host: str = "0.0.0.0", data_dir: Path = None):
|
|
44
|
+
"""
|
|
45
|
+
Initialize the unified server.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
port: HTTP server port (default: 3000)
|
|
49
|
+
grpc_port: gRPC server port (default: 4317)
|
|
50
|
+
host: Host to bind to (default: "0.0.0.0")
|
|
51
|
+
data_dir: Base data directory (default: ~/.finchvox)
|
|
52
|
+
"""
|
|
53
|
+
self.port = port
|
|
54
|
+
self.grpc_port = grpc_port
|
|
55
|
+
self.host = host
|
|
56
|
+
self.data_dir = data_dir if data_dir else get_default_data_dir()
|
|
57
|
+
|
|
58
|
+
# Initialize shared writer instances
|
|
59
|
+
self.span_writer = SpanWriter(self.data_dir)
|
|
60
|
+
self.audio_handler = AudioHandler(self.data_dir)
|
|
61
|
+
|
|
62
|
+
# Server instances
|
|
63
|
+
self.grpc_server = None
|
|
64
|
+
self.http_server = None
|
|
65
|
+
self.shutdown_event = asyncio.Event()
|
|
66
|
+
self._is_shutting_down = False
|
|
67
|
+
|
|
68
|
+
# Create unified FastAPI app
|
|
69
|
+
self.app = self._create_app()
|
|
70
|
+
|
|
71
|
+
def _create_app(self) -> FastAPI:
|
|
72
|
+
"""
|
|
73
|
+
Create unified FastAPI application with both collector and UI routes.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Configured FastAPI application
|
|
77
|
+
"""
|
|
78
|
+
app = FastAPI(
|
|
79
|
+
title="FinchVox Unified Server",
|
|
80
|
+
description="Combined collector and UI server for voice AI observability",
|
|
81
|
+
version="0.1.0",
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Register UI routes first (includes static file mounts)
|
|
85
|
+
register_ui_routes(app, self.data_dir)
|
|
86
|
+
|
|
87
|
+
# Register collector routes with /collector prefix
|
|
88
|
+
register_collector_routes(
|
|
89
|
+
app,
|
|
90
|
+
self.audio_handler,
|
|
91
|
+
prefix="/collector"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
return app
|
|
95
|
+
|
|
96
|
+
async def start_grpc(self):
|
|
97
|
+
"""Start the gRPC server for OTLP trace collection."""
|
|
98
|
+
logger.info(f"Starting OTLP gRPC collector on port {self.grpc_port}")
|
|
99
|
+
|
|
100
|
+
# Create gRPC server with thread pool
|
|
101
|
+
self.grpc_server = grpc.server(
|
|
102
|
+
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Register our service implementation
|
|
106
|
+
servicer = TraceCollectorServicer(self.span_writer)
|
|
107
|
+
add_TraceServiceServicer_to_server(servicer, self.grpc_server)
|
|
108
|
+
|
|
109
|
+
# Bind to port (insecure for PoC - no TLS)
|
|
110
|
+
self.grpc_server.add_insecure_port(f'[::]:{self.grpc_port}')
|
|
111
|
+
|
|
112
|
+
# Start serving
|
|
113
|
+
self.grpc_server.start()
|
|
114
|
+
logger.info(f"OTLP collector listening on port {self.grpc_port}")
|
|
115
|
+
logger.info(f"Writing traces to: {get_traces_base_dir(self.data_dir).absolute()}")
|
|
116
|
+
|
|
117
|
+
async def start_http(self):
|
|
118
|
+
"""Start the HTTP server using uvicorn."""
|
|
119
|
+
logger.info(f"Starting HTTP server on {self.host}:{self.port}")
|
|
120
|
+
|
|
121
|
+
# Configure uvicorn server
|
|
122
|
+
config = uvicorn.Config(
|
|
123
|
+
self.app,
|
|
124
|
+
host=self.host,
|
|
125
|
+
port=self.port,
|
|
126
|
+
log_level="info",
|
|
127
|
+
access_log=True,
|
|
128
|
+
)
|
|
129
|
+
self.http_server = uvicorn.Server(config)
|
|
130
|
+
|
|
131
|
+
logger.info(f"HTTP server listening on http://{self.host}:{self.port}")
|
|
132
|
+
logger.info(f" - UI: http://{self.host}:{self.port}")
|
|
133
|
+
logger.info(f" - Collector: http://{self.host}:{self.port}/collector")
|
|
134
|
+
logger.info(f"Data directory: {self.data_dir.absolute()}")
|
|
135
|
+
|
|
136
|
+
# Run server until shutdown event
|
|
137
|
+
await self.http_server.serve()
|
|
138
|
+
|
|
139
|
+
async def start(self):
|
|
140
|
+
"""Start both gRPC and HTTP servers concurrently."""
|
|
141
|
+
# Start gRPC server
|
|
142
|
+
await self.start_grpc()
|
|
143
|
+
|
|
144
|
+
# Start HTTP server (this blocks until shutdown)
|
|
145
|
+
await self.start_http()
|
|
146
|
+
|
|
147
|
+
async def stop(self, grace_period: int = 5):
|
|
148
|
+
"""
|
|
149
|
+
Gracefully stop both servers.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
grace_period: Seconds to wait for in-flight requests to complete
|
|
153
|
+
"""
|
|
154
|
+
# Prevent multiple shutdown attempts
|
|
155
|
+
if self._is_shutting_down:
|
|
156
|
+
return
|
|
157
|
+
|
|
158
|
+
self._is_shutting_down = True
|
|
159
|
+
logger.info(f"Shutting down servers (grace period: {grace_period}s)")
|
|
160
|
+
|
|
161
|
+
# Stop HTTP server
|
|
162
|
+
if self.http_server:
|
|
163
|
+
logger.info("Stopping HTTP server...")
|
|
164
|
+
self.http_server.should_exit = True
|
|
165
|
+
await asyncio.sleep(0.1) # Give it time to process shutdown
|
|
166
|
+
|
|
167
|
+
# Stop gRPC server
|
|
168
|
+
if self.grpc_server:
|
|
169
|
+
logger.info("Stopping gRPC server...")
|
|
170
|
+
self.grpc_server.stop(grace_period)
|
|
171
|
+
|
|
172
|
+
logger.info("All servers stopped")
|
|
173
|
+
|
|
174
|
+
def run(self):
|
|
175
|
+
"""
|
|
176
|
+
Blocking entry point for running the unified server.
|
|
177
|
+
|
|
178
|
+
Sets up signal handlers and runs the event loop until shutdown.
|
|
179
|
+
"""
|
|
180
|
+
async def run_with_signals():
|
|
181
|
+
loop = asyncio.get_running_loop()
|
|
182
|
+
|
|
183
|
+
def handle_shutdown(signum):
|
|
184
|
+
if not self._is_shutting_down:
|
|
185
|
+
logger.info(f"Received signal {signum}")
|
|
186
|
+
# Remove signal handlers to prevent duplicate calls
|
|
187
|
+
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
188
|
+
loop.remove_signal_handler(sig)
|
|
189
|
+
# Create shutdown task
|
|
190
|
+
asyncio.create_task(self.stop())
|
|
191
|
+
|
|
192
|
+
# Register signal handlers
|
|
193
|
+
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
194
|
+
loop.add_signal_handler(sig, lambda s=sig: handle_shutdown(s))
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
await self.start()
|
|
198
|
+
except (KeyboardInterrupt, asyncio.CancelledError):
|
|
199
|
+
await self.stop()
|
|
200
|
+
|
|
201
|
+
asyncio.run(run_with_signals())
|
finchvox/trace.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"""Trace metadata and utilities."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Trace:
|
|
9
|
+
"""
|
|
10
|
+
Represents a trace and provides calculated metadata.
|
|
11
|
+
|
|
12
|
+
Loads span data from a trace JSONL file and calculates:
|
|
13
|
+
- Start time (earliest span start)
|
|
14
|
+
- End time (latest span end)
|
|
15
|
+
- Duration in milliseconds
|
|
16
|
+
- Span count
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, trace_file: Path):
|
|
20
|
+
"""
|
|
21
|
+
Initialize trace from a trace file path.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
trace_file: Path to trace_{trace_id}.jsonl file
|
|
25
|
+
"""
|
|
26
|
+
self.trace_file = trace_file
|
|
27
|
+
self.trace_id = trace_file.stem.replace("trace_", "")
|
|
28
|
+
self._span_count: Optional[int] = None
|
|
29
|
+
self._min_start_nano: Optional[int] = None
|
|
30
|
+
self._max_end_nano: Optional[int] = None
|
|
31
|
+
self._service_name: Optional[str] = None
|
|
32
|
+
self._load_metadata()
|
|
33
|
+
|
|
34
|
+
def _load_metadata(self):
|
|
35
|
+
"""Load span metadata from trace file."""
|
|
36
|
+
span_count = 0
|
|
37
|
+
min_start = None
|
|
38
|
+
max_end = None
|
|
39
|
+
service_name = None
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
with open(self.trace_file, 'r') as f:
|
|
43
|
+
for line in f:
|
|
44
|
+
if line.strip():
|
|
45
|
+
span = json.loads(line)
|
|
46
|
+
span_count += 1
|
|
47
|
+
|
|
48
|
+
if "start_time_unix_nano" in span:
|
|
49
|
+
start_nano = int(span["start_time_unix_nano"])
|
|
50
|
+
if min_start is None or start_nano < min_start:
|
|
51
|
+
min_start = start_nano
|
|
52
|
+
|
|
53
|
+
if "end_time_unix_nano" in span:
|
|
54
|
+
end_nano = int(span["end_time_unix_nano"])
|
|
55
|
+
if max_end is None or end_nano > max_end:
|
|
56
|
+
max_end = end_nano
|
|
57
|
+
|
|
58
|
+
# Extract service name from first span with resource attributes
|
|
59
|
+
if service_name is None and "resource" in span:
|
|
60
|
+
resource = span["resource"]
|
|
61
|
+
if "attributes" in resource:
|
|
62
|
+
for attr in resource["attributes"]:
|
|
63
|
+
if attr.get("key") == "service.name":
|
|
64
|
+
value = attr.get("value", {})
|
|
65
|
+
service_name = value.get("string_value")
|
|
66
|
+
break
|
|
67
|
+
except Exception as e:
|
|
68
|
+
print(f"Error loading trace {self.trace_file}: {e}")
|
|
69
|
+
|
|
70
|
+
self._span_count = span_count
|
|
71
|
+
self._min_start_nano = min_start
|
|
72
|
+
self._max_end_nano = max_end
|
|
73
|
+
self._service_name = service_name
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def span_count(self) -> int:
|
|
77
|
+
"""Get total span count."""
|
|
78
|
+
return self._span_count or 0
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def start_time(self) -> Optional[float]:
|
|
82
|
+
"""Get trace start time in seconds (Unix timestamp)."""
|
|
83
|
+
if self._min_start_nano:
|
|
84
|
+
return self._min_start_nano / 1_000_000_000
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def end_time(self) -> Optional[float]:
|
|
89
|
+
"""Get trace end time in seconds (Unix timestamp)."""
|
|
90
|
+
if self._max_end_nano:
|
|
91
|
+
return self._max_end_nano / 1_000_000_000
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def duration_ms(self) -> Optional[float]:
|
|
96
|
+
"""Get trace duration in milliseconds."""
|
|
97
|
+
if self._min_start_nano and self._max_end_nano:
|
|
98
|
+
return (self._max_end_nano - self._min_start_nano) / 1_000_000
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def service_name(self) -> Optional[str]:
|
|
103
|
+
"""Get service name from first span with resource attributes."""
|
|
104
|
+
return self._service_name
|
|
105
|
+
|
|
106
|
+
def to_dict(self) -> dict:
|
|
107
|
+
"""Convert to dictionary for API response."""
|
|
108
|
+
return {
|
|
109
|
+
"trace_id": self.trace_id,
|
|
110
|
+
"service_name": self.service_name,
|
|
111
|
+
"span_count": self.span_count,
|
|
112
|
+
"start_time": self.start_time,
|
|
113
|
+
"end_time": self.end_time,
|
|
114
|
+
"duration_ms": self.duration_ms,
|
|
115
|
+
}
|