flock-core 0.5.0b53__py3-none-any.whl → 0.5.0b54__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/agent.py +6 -2
- flock/components.py +17 -1
- flock/dashboard/service.py +293 -0
- flock/frontend/README.md +86 -0
- flock/frontend/src/components/modules/JsonAttributeRenderer.tsx +140 -0
- flock/frontend/src/components/modules/ModuleWindow.tsx +97 -29
- flock/frontend/src/components/modules/TraceModuleJaeger.tsx +1971 -0
- flock/frontend/src/components/modules/TraceModuleJaegerWrapper.tsx +13 -0
- flock/frontend/src/components/modules/registerModules.ts +10 -0
- flock/frontend/src/components/settings/MultiSelect.tsx +235 -0
- flock/frontend/src/components/settings/SettingsPanel.css +1 -1
- flock/frontend/src/components/settings/TracingSettings.tsx +404 -0
- flock/frontend/src/types/modules.ts +3 -0
- flock/logging/auto_trace.py +159 -0
- flock/logging/telemetry.py +17 -0
- flock/logging/telemetry_exporter/duckdb_exporter.py +216 -0
- flock/logging/telemetry_exporter/file_exporter.py +7 -1
- flock/logging/trace_and_logged.py +263 -14
- flock/orchestrator.py +130 -1
- {flock_core-0.5.0b53.dist-info → flock_core-0.5.0b54.dist-info}/METADATA +187 -18
- {flock_core-0.5.0b53.dist-info → flock_core-0.5.0b54.dist-info}/RECORD +24 -17
- {flock_core-0.5.0b53.dist-info → flock_core-0.5.0b54.dist-info}/WHEEL +0 -0
- {flock_core-0.5.0b53.dist-info → flock_core-0.5.0b54.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.0b53.dist-info → flock_core-0.5.0b54.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""Metaclass for automatic method tracing via OpenTelemetry."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
from flock.logging.trace_and_logged import _trace_filter_config, traced_and_logged
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Check if auto-tracing is enabled via environment variable
|
|
12
|
+
ENABLE_AUTO_TRACE = os.getenv("FLOCK_AUTO_TRACE", "true").lower() in {"true", "1", "yes", "on"}
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Parse trace filter configuration from environment variables
|
|
16
|
+
def _parse_trace_filters():
|
|
17
|
+
"""Parse FLOCK_TRACE_SERVICES and FLOCK_TRACE_IGNORE from environment."""
|
|
18
|
+
# Parse FLOCK_TRACE_SERVICES (whitelist)
|
|
19
|
+
services_env = os.getenv("FLOCK_TRACE_SERVICES", "")
|
|
20
|
+
if services_env:
|
|
21
|
+
try:
|
|
22
|
+
services_list = json.loads(services_env)
|
|
23
|
+
if isinstance(services_list, list):
|
|
24
|
+
# Store as lowercase set for case-insensitive matching
|
|
25
|
+
_trace_filter_config.services = {
|
|
26
|
+
s.lower() for s in services_list if isinstance(s, str)
|
|
27
|
+
}
|
|
28
|
+
except (json.JSONDecodeError, ValueError):
|
|
29
|
+
print(f"Warning: Invalid FLOCK_TRACE_SERVICES format: {services_env}")
|
|
30
|
+
|
|
31
|
+
# Parse FLOCK_TRACE_IGNORE (blacklist)
|
|
32
|
+
ignore_env = os.getenv("FLOCK_TRACE_IGNORE", "")
|
|
33
|
+
if ignore_env:
|
|
34
|
+
try:
|
|
35
|
+
ignore_list = json.loads(ignore_env)
|
|
36
|
+
if isinstance(ignore_list, list):
|
|
37
|
+
_trace_filter_config.ignore_operations = {
|
|
38
|
+
op for op in ignore_list if isinstance(op, str)
|
|
39
|
+
}
|
|
40
|
+
except (json.JSONDecodeError, ValueError):
|
|
41
|
+
print(f"Warning: Invalid FLOCK_TRACE_IGNORE format: {ignore_env}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
# Auto-configure logging and telemetry when auto-tracing is enabled
|
|
45
|
+
if ENABLE_AUTO_TRACE:
|
|
46
|
+
# Configure trace filters first
|
|
47
|
+
_parse_trace_filters()
|
|
48
|
+
from flock.logging.logging import configure_logging
|
|
49
|
+
from flock.logging.telemetry import TelemetryConfig
|
|
50
|
+
|
|
51
|
+
# Configure logging to DEBUG
|
|
52
|
+
configure_logging(
|
|
53
|
+
flock_level="DEBUG",
|
|
54
|
+
external_level="WARNING",
|
|
55
|
+
specific_levels={
|
|
56
|
+
"tools": "DEBUG",
|
|
57
|
+
"agent": "DEBUG",
|
|
58
|
+
"flock": "DEBUG",
|
|
59
|
+
},
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Initialize telemetry for OTEL trace context
|
|
63
|
+
# Only enable exporters if explicitly configured via env vars
|
|
64
|
+
enable_file_export = os.getenv("FLOCK_TRACE_FILE", "false").lower() in {
|
|
65
|
+
"true",
|
|
66
|
+
"1",
|
|
67
|
+
"yes",
|
|
68
|
+
"on",
|
|
69
|
+
}
|
|
70
|
+
enable_otlp_export = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT") is not None
|
|
71
|
+
|
|
72
|
+
# Parse TTL (Time To Live) for trace cleanup
|
|
73
|
+
trace_ttl_days = None
|
|
74
|
+
ttl_env = os.getenv("FLOCK_TRACE_TTL_DAYS", "")
|
|
75
|
+
if ttl_env:
|
|
76
|
+
try:
|
|
77
|
+
trace_ttl_days = int(ttl_env)
|
|
78
|
+
except ValueError:
|
|
79
|
+
print(f"Warning: Invalid FLOCK_TRACE_TTL_DAYS value: {ttl_env}")
|
|
80
|
+
|
|
81
|
+
telemetry_config = TelemetryConfig(
|
|
82
|
+
service_name="flock-auto-trace",
|
|
83
|
+
enable_jaeger=False,
|
|
84
|
+
enable_file=False, # Disable file export, use DuckDB instead
|
|
85
|
+
enable_sql=False,
|
|
86
|
+
enable_duckdb=enable_file_export, # Use DuckDB when file export is enabled
|
|
87
|
+
enable_otlp=enable_otlp_export,
|
|
88
|
+
otlp_endpoint=os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4317"),
|
|
89
|
+
local_logging_dir=".flock",
|
|
90
|
+
duckdb_name="traces.duckdb",
|
|
91
|
+
duckdb_ttl_days=trace_ttl_days,
|
|
92
|
+
)
|
|
93
|
+
telemetry_config.setup_tracing()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class AutoTracedMeta(type):
|
|
97
|
+
"""Metaclass that automatically applies @traced_and_logged to all public methods.
|
|
98
|
+
|
|
99
|
+
This enables automatic OpenTelemetry span creation and debug logging for all
|
|
100
|
+
method calls on classes using this metaclass.
|
|
101
|
+
|
|
102
|
+
Control via environment variable:
|
|
103
|
+
FLOCK_AUTO_TRACE=true - Enable auto-tracing (default)
|
|
104
|
+
FLOCK_AUTO_TRACE=false - Disable auto-tracing
|
|
105
|
+
|
|
106
|
+
Example:
|
|
107
|
+
class Agent(metaclass=AutoTracedMeta):
|
|
108
|
+
def execute(self, ctx, artifacts):
|
|
109
|
+
# Automatically traced and logged
|
|
110
|
+
...
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
def __new__(mcs, name, bases, namespace, **kwargs):
|
|
114
|
+
"""Create a new class with auto-traced methods."""
|
|
115
|
+
if not ENABLE_AUTO_TRACE:
|
|
116
|
+
# If auto-tracing is disabled, return the class unchanged
|
|
117
|
+
return super().__new__(mcs, name, bases, namespace, **kwargs)
|
|
118
|
+
|
|
119
|
+
# Apply @traced_and_logged to all public methods
|
|
120
|
+
for attr_name, attr_value in list(namespace.items()):
|
|
121
|
+
# Skip private methods (starting with _)
|
|
122
|
+
if attr_name.startswith("_"):
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Skip non-callables
|
|
126
|
+
if not callable(attr_value):
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
# Skip if already traced
|
|
130
|
+
if getattr(attr_value, "_traced", False):
|
|
131
|
+
continue
|
|
132
|
+
|
|
133
|
+
# Skip if explicitly marked to skip tracing
|
|
134
|
+
if getattr(attr_value, "_skip_trace", False):
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
# Apply the decorator
|
|
138
|
+
traced_func = traced_and_logged(attr_value)
|
|
139
|
+
traced_func._traced = True # Mark as traced to avoid double-wrapping
|
|
140
|
+
namespace[attr_name] = traced_func
|
|
141
|
+
|
|
142
|
+
return super().__new__(mcs, name, bases, namespace, **kwargs)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def skip_trace(func):
|
|
146
|
+
"""Decorator to mark a method to skip auto-tracing.
|
|
147
|
+
|
|
148
|
+
Use this for methods that are called very frequently or are not
|
|
149
|
+
interesting for debugging purposes.
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
class Agent(metaclass=AutoTracedMeta):
|
|
153
|
+
@skip_trace
|
|
154
|
+
def _internal_helper(self):
|
|
155
|
+
# Not traced
|
|
156
|
+
...
|
|
157
|
+
"""
|
|
158
|
+
func._skip_trace = True
|
|
159
|
+
return func
|
flock/logging/telemetry.py
CHANGED
|
@@ -12,6 +12,9 @@ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
|
|
|
12
12
|
from flock.logging.span_middleware.baggage_span_processor import (
|
|
13
13
|
BaggageAttributeSpanProcessor,
|
|
14
14
|
)
|
|
15
|
+
from flock.logging.telemetry_exporter.duckdb_exporter import (
|
|
16
|
+
DuckDBSpanExporter,
|
|
17
|
+
)
|
|
15
18
|
|
|
16
19
|
# with workflow.unsafe.imports_passed_through():
|
|
17
20
|
from flock.logging.telemetry_exporter.file_exporter import (
|
|
@@ -40,9 +43,12 @@ class TelemetryConfig:
|
|
|
40
43
|
local_logging_dir: str | None = None,
|
|
41
44
|
file_export_name: str | None = None,
|
|
42
45
|
sqlite_db_name: str | None = None,
|
|
46
|
+
duckdb_name: str | None = None,
|
|
47
|
+
duckdb_ttl_days: int | None = None,
|
|
43
48
|
enable_jaeger: bool = True,
|
|
44
49
|
enable_file: bool = True,
|
|
45
50
|
enable_sql: bool = True,
|
|
51
|
+
enable_duckdb: bool = True,
|
|
46
52
|
enable_otlp: bool = True,
|
|
47
53
|
otlp_protocol: str = "grpc",
|
|
48
54
|
otlp_endpoint: str = "http://localhost:4317",
|
|
@@ -53,6 +59,7 @@ class TelemetryConfig:
|
|
|
53
59
|
:param jaeger_endpoint: The Jaeger collector gRPC endpoint (e.g., "localhost:14250").
|
|
54
60
|
:param file_export_path: If provided, spans will be written to this file.
|
|
55
61
|
:param sqlite_db_path: If provided, spans will be stored in this SQLite DB.
|
|
62
|
+
:param duckdb_ttl_days: Delete traces older than this many days (default: None = keep forever).
|
|
56
63
|
:param batch_processor_options: Dict of options for BatchSpanProcessor (e.g., {"max_export_batch_size": 10}).
|
|
57
64
|
"""
|
|
58
65
|
self.service_name = service_name
|
|
@@ -60,11 +67,14 @@ class TelemetryConfig:
|
|
|
60
67
|
self.jaeger_transport = jaeger_transport
|
|
61
68
|
self.file_export_name = file_export_name
|
|
62
69
|
self.sqlite_db_name = sqlite_db_name
|
|
70
|
+
self.duckdb_name = duckdb_name
|
|
71
|
+
self.duckdb_ttl_days = duckdb_ttl_days
|
|
63
72
|
self.local_logging_dir = local_logging_dir
|
|
64
73
|
self.batch_processor_options = batch_processor_options or {}
|
|
65
74
|
self.enable_jaeger = enable_jaeger
|
|
66
75
|
self.enable_file = enable_file
|
|
67
76
|
self.enable_sql = enable_sql
|
|
77
|
+
self.enable_duckdb = enable_duckdb
|
|
68
78
|
self.enable_otlp = enable_otlp
|
|
69
79
|
self.otlp_protocol = otlp_protocol
|
|
70
80
|
self.otlp_endpoint = otlp_endpoint
|
|
@@ -166,6 +176,13 @@ class TelemetryConfig:
|
|
|
166
176
|
sqlite_exporter = SqliteTelemetryExporter(self.local_logging_dir, self.sqlite_db_name)
|
|
167
177
|
span_processors.append(SimpleSpanProcessor(sqlite_exporter))
|
|
168
178
|
|
|
179
|
+
# If a DuckDB database path is provided, add the DuckDB exporter.
|
|
180
|
+
if self.duckdb_name and self.enable_duckdb:
|
|
181
|
+
duckdb_exporter = DuckDBSpanExporter(
|
|
182
|
+
self.local_logging_dir, self.duckdb_name, ttl_days=self.duckdb_ttl_days
|
|
183
|
+
)
|
|
184
|
+
span_processors.append(SimpleSpanProcessor(duckdb_exporter))
|
|
185
|
+
|
|
169
186
|
# Register all span processors with the provider.
|
|
170
187
|
for processor in span_processors:
|
|
171
188
|
provider.add_span_processor(processor)
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
"""DuckDB exporter for OpenTelemetry spans - optimized for analytical queries."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import duckdb
|
|
7
|
+
from opentelemetry.sdk.trace.export import SpanExportResult
|
|
8
|
+
from opentelemetry.trace import Status, StatusCode
|
|
9
|
+
|
|
10
|
+
from flock.logging.telemetry_exporter.base_exporter import TelemetryExporter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DuckDBSpanExporter(TelemetryExporter):
|
|
14
|
+
"""Export spans to DuckDB for fast analytical queries.
|
|
15
|
+
|
|
16
|
+
DuckDB is a columnar analytical database optimized for OLAP workloads,
|
|
17
|
+
making it 10-100x faster than SQLite for trace analytics like:
|
|
18
|
+
- Aggregations (avg/p95/p99 duration)
|
|
19
|
+
- Time-range queries
|
|
20
|
+
- Service/operation filtering
|
|
21
|
+
- Complex analytical queries
|
|
22
|
+
|
|
23
|
+
The database is a single file with zero configuration required.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, dir: str, db_name: str = "traces.duckdb", ttl_days: int | None = None):
|
|
27
|
+
"""Initialize the DuckDB exporter.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
dir: Directory where the database file will be created
|
|
31
|
+
db_name: Name of the DuckDB file (default: traces.duckdb)
|
|
32
|
+
ttl_days: Delete traces older than this many days (default: None = keep forever)
|
|
33
|
+
"""
|
|
34
|
+
super().__init__()
|
|
35
|
+
self.telemetry_path = Path(dir)
|
|
36
|
+
self.telemetry_path.mkdir(parents=True, exist_ok=True)
|
|
37
|
+
self.db_path = self.telemetry_path / db_name
|
|
38
|
+
self.ttl_days = ttl_days
|
|
39
|
+
|
|
40
|
+
# Initialize database and create schema
|
|
41
|
+
self._init_database()
|
|
42
|
+
|
|
43
|
+
def _init_database(self):
|
|
44
|
+
"""Create the spans table if it doesn't exist."""
|
|
45
|
+
with duckdb.connect(str(self.db_path)) as conn:
|
|
46
|
+
conn.execute("""
|
|
47
|
+
CREATE TABLE IF NOT EXISTS spans (
|
|
48
|
+
trace_id VARCHAR NOT NULL,
|
|
49
|
+
span_id VARCHAR PRIMARY KEY,
|
|
50
|
+
parent_id VARCHAR,
|
|
51
|
+
name VARCHAR NOT NULL,
|
|
52
|
+
service VARCHAR,
|
|
53
|
+
operation VARCHAR,
|
|
54
|
+
kind VARCHAR,
|
|
55
|
+
start_time BIGINT NOT NULL,
|
|
56
|
+
end_time BIGINT NOT NULL,
|
|
57
|
+
duration_ms DOUBLE NOT NULL,
|
|
58
|
+
status_code VARCHAR NOT NULL,
|
|
59
|
+
status_description VARCHAR,
|
|
60
|
+
attributes JSON,
|
|
61
|
+
events JSON,
|
|
62
|
+
links JSON,
|
|
63
|
+
resource JSON,
|
|
64
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
65
|
+
)
|
|
66
|
+
""")
|
|
67
|
+
|
|
68
|
+
# Create indexes for common query patterns
|
|
69
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_trace_id ON spans(trace_id)")
|
|
70
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_service ON spans(service)")
|
|
71
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_start_time ON spans(start_time)")
|
|
72
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_name ON spans(name)")
|
|
73
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_created_at ON spans(created_at)")
|
|
74
|
+
|
|
75
|
+
# Cleanup old traces if TTL is configured
|
|
76
|
+
if self.ttl_days is not None:
|
|
77
|
+
self._cleanup_old_traces()
|
|
78
|
+
|
|
79
|
+
def _cleanup_old_traces(self):
|
|
80
|
+
"""Delete traces older than TTL_DAYS.
|
|
81
|
+
|
|
82
|
+
This runs on exporter initialization to keep the database size manageable.
|
|
83
|
+
"""
|
|
84
|
+
if self.ttl_days is None:
|
|
85
|
+
return
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
with duckdb.connect(str(self.db_path)) as conn:
|
|
89
|
+
# Delete spans older than TTL
|
|
90
|
+
result = conn.execute(
|
|
91
|
+
"""
|
|
92
|
+
DELETE FROM spans
|
|
93
|
+
WHERE created_at < CURRENT_TIMESTAMP - INTERVAL ? DAYS
|
|
94
|
+
""",
|
|
95
|
+
(self.ttl_days,),
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
deleted_count = result.fetchall()[0][0] if result else 0
|
|
99
|
+
|
|
100
|
+
if deleted_count > 0:
|
|
101
|
+
print(
|
|
102
|
+
f"[DuckDB TTL] Deleted {deleted_count} spans older than {self.ttl_days} days"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
print(f"[DuckDB TTL] Error cleaning up old traces: {e}")
|
|
107
|
+
|
|
108
|
+
def _span_to_record(self, span):
|
|
109
|
+
"""Convert a ReadableSpan to a database record."""
|
|
110
|
+
context = span.get_span_context()
|
|
111
|
+
status = span.status or Status(StatusCode.UNSET)
|
|
112
|
+
|
|
113
|
+
# Extract service and operation from span name
|
|
114
|
+
# Format: "ServiceName.operation_name"
|
|
115
|
+
parts = span.name.split(".", 1)
|
|
116
|
+
service = parts[0] if len(parts) > 0 else "unknown"
|
|
117
|
+
operation = parts[1] if len(parts) > 1 else span.name
|
|
118
|
+
|
|
119
|
+
# Calculate duration in milliseconds
|
|
120
|
+
duration_ms = (span.end_time - span.start_time) / 1_000_000
|
|
121
|
+
|
|
122
|
+
# Serialize complex fields to JSON
|
|
123
|
+
attributes_json = json.dumps(dict(span.attributes or {}))
|
|
124
|
+
events_json = json.dumps(
|
|
125
|
+
[
|
|
126
|
+
{
|
|
127
|
+
"name": event.name,
|
|
128
|
+
"timestamp": event.timestamp,
|
|
129
|
+
"attributes": dict(event.attributes or {}),
|
|
130
|
+
}
|
|
131
|
+
for event in span.events
|
|
132
|
+
]
|
|
133
|
+
)
|
|
134
|
+
links_json = json.dumps(
|
|
135
|
+
[
|
|
136
|
+
{
|
|
137
|
+
"context": {
|
|
138
|
+
"trace_id": format(link.context.trace_id, "032x"),
|
|
139
|
+
"span_id": format(link.context.span_id, "016x"),
|
|
140
|
+
},
|
|
141
|
+
"attributes": dict(link.attributes or {}),
|
|
142
|
+
}
|
|
143
|
+
for link in span.links
|
|
144
|
+
]
|
|
145
|
+
)
|
|
146
|
+
resource_json = json.dumps(dict(span.resource.attributes.items()))
|
|
147
|
+
|
|
148
|
+
# Get parent span ID if exists
|
|
149
|
+
parent_id = None
|
|
150
|
+
if span.parent and span.parent.span_id != 0:
|
|
151
|
+
parent_id = format(span.parent.span_id, "016x")
|
|
152
|
+
|
|
153
|
+
return {
|
|
154
|
+
"trace_id": format(context.trace_id, "032x"),
|
|
155
|
+
"span_id": format(context.span_id, "016x"),
|
|
156
|
+
"parent_id": parent_id,
|
|
157
|
+
"name": span.name,
|
|
158
|
+
"service": service,
|
|
159
|
+
"operation": operation,
|
|
160
|
+
"kind": span.kind.name if span.kind else None,
|
|
161
|
+
"start_time": span.start_time,
|
|
162
|
+
"end_time": span.end_time,
|
|
163
|
+
"duration_ms": duration_ms,
|
|
164
|
+
"status_code": status.status_code.name,
|
|
165
|
+
"status_description": status.description,
|
|
166
|
+
"attributes": attributes_json,
|
|
167
|
+
"events": events_json,
|
|
168
|
+
"links": links_json,
|
|
169
|
+
"resource": resource_json,
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
def export(self, spans):
|
|
173
|
+
"""Export spans to DuckDB."""
|
|
174
|
+
try:
|
|
175
|
+
with duckdb.connect(str(self.db_path)) as conn:
|
|
176
|
+
for span in spans:
|
|
177
|
+
record = self._span_to_record(span)
|
|
178
|
+
|
|
179
|
+
# Insert span record
|
|
180
|
+
conn.execute(
|
|
181
|
+
"""
|
|
182
|
+
INSERT OR REPLACE INTO spans (
|
|
183
|
+
trace_id, span_id, parent_id, name, service, operation,
|
|
184
|
+
kind, start_time, end_time, duration_ms,
|
|
185
|
+
status_code, status_description,
|
|
186
|
+
attributes, events, links, resource
|
|
187
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
188
|
+
""",
|
|
189
|
+
(
|
|
190
|
+
record["trace_id"],
|
|
191
|
+
record["span_id"],
|
|
192
|
+
record["parent_id"],
|
|
193
|
+
record["name"],
|
|
194
|
+
record["service"],
|
|
195
|
+
record["operation"],
|
|
196
|
+
record["kind"],
|
|
197
|
+
record["start_time"],
|
|
198
|
+
record["end_time"],
|
|
199
|
+
record["duration_ms"],
|
|
200
|
+
record["status_code"],
|
|
201
|
+
record["status_description"],
|
|
202
|
+
record["attributes"],
|
|
203
|
+
record["events"],
|
|
204
|
+
record["links"],
|
|
205
|
+
record["resource"],
|
|
206
|
+
),
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
return SpanExportResult.SUCCESS
|
|
210
|
+
except Exception as e:
|
|
211
|
+
print(f"Error exporting spans to DuckDB: {e}")
|
|
212
|
+
return SpanExportResult.FAILURE
|
|
213
|
+
|
|
214
|
+
def shutdown(self) -> None:
|
|
215
|
+
"""Cleanup resources."""
|
|
216
|
+
# DuckDB connections are managed per-transaction, no cleanup needed
|
|
@@ -29,7 +29,7 @@ class FileSpanExporter(TelemetryExporter):
|
|
|
29
29
|
context = span.get_span_context()
|
|
30
30
|
status = span.status or Status(StatusCode.UNSET)
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
result = {
|
|
33
33
|
"name": span.name,
|
|
34
34
|
"context": {
|
|
35
35
|
"trace_id": format(context.trace_id, "032x"),
|
|
@@ -66,6 +66,12 @@ class FileSpanExporter(TelemetryExporter):
|
|
|
66
66
|
"resource": dict(span.resource.attributes.items()),
|
|
67
67
|
}
|
|
68
68
|
|
|
69
|
+
# Add parent_id if this span has a parent
|
|
70
|
+
if span.parent and span.parent.span_id != 0:
|
|
71
|
+
result["parent_id"] = format(span.parent.span_id, "016x")
|
|
72
|
+
|
|
73
|
+
return result
|
|
74
|
+
|
|
69
75
|
def export(self, spans):
|
|
70
76
|
"""Write spans to a log file."""
|
|
71
77
|
try:
|