empathy-framework 3.8.2__py3-none-any.whl → 3.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/METADATA +55 -16
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/RECORD +51 -40
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/top_level.txt +0 -4
- empathy_os/.empathy/costs.json +60 -0
- empathy_os/.empathy/discovery_stats.json +15 -0
- empathy_os/.empathy/workflow_runs.json +45 -0
- empathy_os/__init__.py +1 -1
- empathy_os/cli.py +372 -13
- empathy_os/cli_unified.py +111 -0
- empathy_os/config/xml_config.py +45 -3
- empathy_os/config.py +46 -2
- empathy_os/memory/control_panel.py +128 -8
- empathy_os/memory/long_term.py +26 -4
- empathy_os/memory/short_term.py +110 -0
- empathy_os/models/token_estimator.py +25 -0
- empathy_os/pattern_library.py +81 -8
- empathy_os/patterns/debugging/all_patterns.json +81 -0
- empathy_os/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- empathy_os/patterns/refactoring_memory.json +89 -0
- empathy_os/telemetry/__init__.py +11 -0
- empathy_os/telemetry/cli.py +451 -0
- empathy_os/telemetry/usage_tracker.py +475 -0
- empathy_os/tier_recommender.py +422 -0
- empathy_os/workflows/base.py +220 -23
- empathy_os/workflows/config.py +50 -5
- empathy_os/workflows/tier_tracking.py +408 -0
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.8.2.dist-info → empathy_framework-3.9.0.dist-info}/licenses/LICENSE +0 -0
- {hot_reload → empathy_os/hot_reload}/README.md +0 -0
- {hot_reload → empathy_os/hot_reload}/__init__.py +0 -0
- {hot_reload → empathy_os/hot_reload}/config.py +0 -0
- {hot_reload → empathy_os/hot_reload}/integration.py +0 -0
- {hot_reload → empathy_os/hot_reload}/reloader.py +0 -0
- {hot_reload → empathy_os/hot_reload}/watcher.py +0 -0
- {hot_reload → empathy_os/hot_reload}/websocket.py +0 -0
- {scaffolding → empathy_os/scaffolding}/README.md +0 -0
- {scaffolding → empathy_os/scaffolding}/__init__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/__main__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/__init__.py +0 -0
- {test_generator → empathy_os/test_generator}/__main__.py +0 -0
- {test_generator → empathy_os/test_generator}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/generator.py +0 -0
- {test_generator → empathy_os/test_generator}/risk_analyzer.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/__init__.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/behavior.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/core.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/output.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/registry.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/structural.py +0 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
"""CLI commands for telemetry tracking.
|
|
2
|
+
|
|
3
|
+
Provides commands to view, analyze, and manage local usage telemetry data.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import csv
|
|
10
|
+
import json
|
|
11
|
+
import sys
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
from rich.panel import Panel
|
|
19
|
+
from rich.table import Table
|
|
20
|
+
from rich.text import Text
|
|
21
|
+
RICH_AVAILABLE = True
|
|
22
|
+
except ImportError:
|
|
23
|
+
RICH_AVAILABLE = False
|
|
24
|
+
Console = None # type: ignore
|
|
25
|
+
|
|
26
|
+
from .usage_tracker import UsageTracker
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _validate_file_path(path: str, allowed_dir: str | None = None) -> Path:
|
|
30
|
+
"""Validate file path to prevent path traversal and arbitrary writes.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
path: File path to validate
|
|
34
|
+
allowed_dir: Optional directory to restrict writes to
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
Validated Path object
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
ValueError: If path is invalid or unsafe
|
|
41
|
+
"""
|
|
42
|
+
if not path or not isinstance(path, str):
|
|
43
|
+
raise ValueError("path must be a non-empty string")
|
|
44
|
+
|
|
45
|
+
# Check for null bytes
|
|
46
|
+
if "\x00" in path:
|
|
47
|
+
raise ValueError("path contains null bytes")
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
resolved = Path(path).resolve()
|
|
51
|
+
except (OSError, RuntimeError) as e:
|
|
52
|
+
raise ValueError(f"Invalid path: {e}")
|
|
53
|
+
|
|
54
|
+
# Check if within allowed directory
|
|
55
|
+
if allowed_dir:
|
|
56
|
+
try:
|
|
57
|
+
allowed = Path(allowed_dir).resolve()
|
|
58
|
+
resolved.relative_to(allowed)
|
|
59
|
+
except ValueError:
|
|
60
|
+
raise ValueError(f"path must be within {allowed_dir}")
|
|
61
|
+
|
|
62
|
+
# Check for dangerous system paths
|
|
63
|
+
dangerous_paths = ["/etc", "/sys", "/proc", "/dev"]
|
|
64
|
+
for dangerous in dangerous_paths:
|
|
65
|
+
if str(resolved).startswith(dangerous):
|
|
66
|
+
raise ValueError(f"Cannot write to system directory: {dangerous}")
|
|
67
|
+
|
|
68
|
+
return resolved
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def cmd_telemetry_show(args: Any) -> int:
|
|
72
|
+
"""Show recent telemetry entries.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
args: Parsed command-line arguments
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Exit code (0 for success)
|
|
79
|
+
|
|
80
|
+
"""
|
|
81
|
+
tracker = UsageTracker.get_instance()
|
|
82
|
+
limit = getattr(args, "limit", 20)
|
|
83
|
+
days = getattr(args, "days", None)
|
|
84
|
+
|
|
85
|
+
entries = tracker.get_recent_entries(limit=limit, days=days)
|
|
86
|
+
|
|
87
|
+
if not entries:
|
|
88
|
+
print("No telemetry data found.")
|
|
89
|
+
print(f"Data location: {tracker.telemetry_dir}")
|
|
90
|
+
return 0
|
|
91
|
+
|
|
92
|
+
if RICH_AVAILABLE and Console is not None:
|
|
93
|
+
console = Console()
|
|
94
|
+
table = Table(title="Recent LLM Calls", show_header=True, header_style="bold magenta")
|
|
95
|
+
table.add_column("Time", style="cyan", width=19)
|
|
96
|
+
table.add_column("Workflow", style="green")
|
|
97
|
+
table.add_column("Stage", style="blue")
|
|
98
|
+
table.add_column("Tier", style="yellow")
|
|
99
|
+
table.add_column("Cost", style="red", justify="right")
|
|
100
|
+
table.add_column("Tokens", justify="right")
|
|
101
|
+
table.add_column("Cache", style="green")
|
|
102
|
+
table.add_column("Duration", justify="right")
|
|
103
|
+
|
|
104
|
+
total_cost = 0.0
|
|
105
|
+
total_duration = 0
|
|
106
|
+
|
|
107
|
+
for entry in entries:
|
|
108
|
+
ts = entry.get("ts", "")
|
|
109
|
+
# Format timestamp
|
|
110
|
+
try:
|
|
111
|
+
dt = datetime.fromisoformat(ts.rstrip("Z"))
|
|
112
|
+
ts_display = dt.strftime("%Y-%m-%d %H:%M:%S")
|
|
113
|
+
except (ValueError, AttributeError):
|
|
114
|
+
ts_display = ts[:19] if len(ts) >= 19 else ts
|
|
115
|
+
|
|
116
|
+
workflow = entry.get("workflow", "unknown")
|
|
117
|
+
stage = entry.get("stage", "-")
|
|
118
|
+
tier = entry.get("tier", "unknown")
|
|
119
|
+
cost = entry.get("cost", 0.0)
|
|
120
|
+
tokens = entry.get("tokens", {})
|
|
121
|
+
cache = entry.get("cache", {})
|
|
122
|
+
duration_ms = entry.get("duration_ms", 0)
|
|
123
|
+
|
|
124
|
+
tokens_str = f"{tokens.get('input', 0)}/{tokens.get('output', 0)}"
|
|
125
|
+
cache_str = "HIT" if cache.get("hit") else "MISS"
|
|
126
|
+
if cache.get("hit"):
|
|
127
|
+
cache_type = cache.get("type", "")
|
|
128
|
+
if cache_type:
|
|
129
|
+
cache_str += f" ({cache_type})"
|
|
130
|
+
|
|
131
|
+
table.add_row(
|
|
132
|
+
ts_display,
|
|
133
|
+
workflow[:20],
|
|
134
|
+
stage[:15] if stage else "-",
|
|
135
|
+
tier,
|
|
136
|
+
f"${cost:.4f}",
|
|
137
|
+
tokens_str,
|
|
138
|
+
cache_str,
|
|
139
|
+
f"{duration_ms}ms",
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
total_cost += cost
|
|
143
|
+
total_duration += duration_ms
|
|
144
|
+
|
|
145
|
+
console.print(table)
|
|
146
|
+
console.print()
|
|
147
|
+
console.print(f"[bold]Total Cost:[/bold] ${total_cost:.4f}")
|
|
148
|
+
console.print(f"[bold]Avg Duration:[/bold] {total_duration // len(entries)}ms")
|
|
149
|
+
console.print(f"\n[dim]Data location: {tracker.telemetry_dir}[/dim]")
|
|
150
|
+
else:
|
|
151
|
+
# Fallback to plain text
|
|
152
|
+
print(f"\n{'Time':<19} {'Workflow':<20} {'Stage':<15} {'Tier':<10} {'Cost':>10} {'Cache':<10} {'Duration':>10}")
|
|
153
|
+
print("-" * 120)
|
|
154
|
+
total_cost = 0.0
|
|
155
|
+
for entry in entries:
|
|
156
|
+
ts = entry.get("ts", "")[:19]
|
|
157
|
+
workflow = entry.get("workflow", "unknown")[:20]
|
|
158
|
+
stage = entry.get("stage", "-")[:15]
|
|
159
|
+
tier = entry.get("tier", "unknown")
|
|
160
|
+
cost = entry.get("cost", 0.0)
|
|
161
|
+
cache = entry.get("cache", {})
|
|
162
|
+
duration_ms = entry.get("duration_ms", 0)
|
|
163
|
+
|
|
164
|
+
cache_str = "HIT" if cache.get("hit") else "MISS"
|
|
165
|
+
print(f"{ts:<19} {workflow:<20} {stage:<15} {tier:<10} ${cost:>9.4f} {cache_str:<10} {duration_ms:>9}ms")
|
|
166
|
+
total_cost += cost
|
|
167
|
+
|
|
168
|
+
print("-" * 120)
|
|
169
|
+
print(f"Total Cost: ${total_cost:.4f}")
|
|
170
|
+
print(f"\nData location: {tracker.telemetry_dir}")
|
|
171
|
+
|
|
172
|
+
return 0
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def cmd_telemetry_savings(args: Any) -> int:
|
|
176
|
+
"""Calculate and display cost savings.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
args: Parsed command-line arguments
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Exit code (0 for success)
|
|
183
|
+
|
|
184
|
+
"""
|
|
185
|
+
tracker = UsageTracker.get_instance()
|
|
186
|
+
days = getattr(args, "days", 30)
|
|
187
|
+
|
|
188
|
+
savings = tracker.calculate_savings(days=days)
|
|
189
|
+
|
|
190
|
+
if savings["total_calls"] == 0:
|
|
191
|
+
print("No telemetry data found for the specified period.")
|
|
192
|
+
return 0
|
|
193
|
+
|
|
194
|
+
if RICH_AVAILABLE and Console is not None:
|
|
195
|
+
console = Console()
|
|
196
|
+
|
|
197
|
+
# Create savings report
|
|
198
|
+
title = Text("Cost Savings Analysis", style="bold magenta")
|
|
199
|
+
content_lines = []
|
|
200
|
+
|
|
201
|
+
content_lines.append(f"Period: Last {days} days")
|
|
202
|
+
content_lines.append("")
|
|
203
|
+
content_lines.append("Usage Pattern:")
|
|
204
|
+
for tier, pct in sorted(savings["tier_distribution"].items()):
|
|
205
|
+
content_lines.append(f" {tier:8}: {pct:5.1f}%")
|
|
206
|
+
content_lines.append("")
|
|
207
|
+
content_lines.append("Cost Comparison:")
|
|
208
|
+
content_lines.append(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
|
|
209
|
+
content_lines.append(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
|
|
210
|
+
content_lines.append("")
|
|
211
|
+
savings_color = "green" if savings["savings"] > 0 else "red"
|
|
212
|
+
content_lines.append(f"[bold {savings_color}]YOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)[/bold {savings_color}]")
|
|
213
|
+
content_lines.append("")
|
|
214
|
+
content_lines.append(f"Cache savings: ${savings['cache_savings']:.2f}")
|
|
215
|
+
content_lines.append(f"Total calls: {savings['total_calls']}")
|
|
216
|
+
|
|
217
|
+
panel = Panel(
|
|
218
|
+
"\n".join(content_lines),
|
|
219
|
+
title=title,
|
|
220
|
+
border_style="cyan",
|
|
221
|
+
)
|
|
222
|
+
console.print(panel)
|
|
223
|
+
else:
|
|
224
|
+
# Fallback to plain text
|
|
225
|
+
print("\n" + "=" * 60)
|
|
226
|
+
print("COST SAVINGS ANALYSIS")
|
|
227
|
+
print("=" * 60)
|
|
228
|
+
print(f"Period: Last {days} days\n")
|
|
229
|
+
print("Usage Pattern:")
|
|
230
|
+
for tier, pct in sorted(savings["tier_distribution"].items()):
|
|
231
|
+
print(f" {tier:8}: {pct:5.1f}%")
|
|
232
|
+
print("\nCost Comparison:")
|
|
233
|
+
print(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
|
|
234
|
+
print(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
|
|
235
|
+
print(f"\nYOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)")
|
|
236
|
+
print(f"\nCache savings: ${savings['cache_savings']:.2f}")
|
|
237
|
+
print(f"Total calls: {savings['total_calls']}")
|
|
238
|
+
print("=" * 60)
|
|
239
|
+
|
|
240
|
+
return 0
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def cmd_telemetry_compare(args: Any) -> int:
|
|
244
|
+
"""Compare telemetry across two time periods.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
args: Parsed command-line arguments
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Exit code (0 for success)
|
|
251
|
+
|
|
252
|
+
"""
|
|
253
|
+
tracker = UsageTracker.get_instance()
|
|
254
|
+
period1_days = getattr(args, "period1", 7)
|
|
255
|
+
period2_days = getattr(args, "period2", 30)
|
|
256
|
+
|
|
257
|
+
# Get stats for both periods
|
|
258
|
+
stats1 = tracker.get_stats(days=period1_days)
|
|
259
|
+
stats2 = tracker.get_stats(days=period2_days)
|
|
260
|
+
|
|
261
|
+
if stats1["total_calls"] == 0 or stats2["total_calls"] == 0:
|
|
262
|
+
print("Insufficient telemetry data for comparison.")
|
|
263
|
+
return 0
|
|
264
|
+
|
|
265
|
+
if RICH_AVAILABLE and Console is not None:
|
|
266
|
+
console = Console()
|
|
267
|
+
table = Table(title="Telemetry Comparison", show_header=True, header_style="bold magenta")
|
|
268
|
+
table.add_column("Metric", style="cyan")
|
|
269
|
+
table.add_column(f"Last {period1_days} days", justify="right", style="green")
|
|
270
|
+
table.add_column(f"Last {period2_days} days", justify="right", style="yellow")
|
|
271
|
+
table.add_column("Change", justify="right", style="blue")
|
|
272
|
+
|
|
273
|
+
# Total calls
|
|
274
|
+
calls_change = ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100) if stats2["total_calls"] > 0 else 0
|
|
275
|
+
table.add_row(
|
|
276
|
+
"Total Calls",
|
|
277
|
+
str(stats1["total_calls"]),
|
|
278
|
+
str(stats2["total_calls"]),
|
|
279
|
+
f"{calls_change:+.1f}%",
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Total cost
|
|
283
|
+
cost_change = ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100) if stats2["total_cost"] > 0 else 0
|
|
284
|
+
table.add_row(
|
|
285
|
+
"Total Cost",
|
|
286
|
+
f"${stats1['total_cost']:.2f}",
|
|
287
|
+
f"${stats2['total_cost']:.2f}",
|
|
288
|
+
f"{cost_change:+.1f}%",
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Avg cost per call
|
|
292
|
+
avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
|
|
293
|
+
avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
|
|
294
|
+
avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
|
|
295
|
+
table.add_row(
|
|
296
|
+
"Avg Cost/Call",
|
|
297
|
+
f"${avg1:.4f}",
|
|
298
|
+
f"${avg2:.4f}",
|
|
299
|
+
f"{avg_change:+.1f}%",
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Cache hit rate
|
|
303
|
+
cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
|
|
304
|
+
table.add_row(
|
|
305
|
+
"Cache Hit Rate",
|
|
306
|
+
f"{stats1['cache_hit_rate']:.1f}%",
|
|
307
|
+
f"{stats2['cache_hit_rate']:.1f}%",
|
|
308
|
+
f"{cache_change:+.1f}pp",
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
console.print(table)
|
|
312
|
+
else:
|
|
313
|
+
# Fallback to plain text
|
|
314
|
+
print("\n" + "=" * 80)
|
|
315
|
+
print("TELEMETRY COMPARISON")
|
|
316
|
+
print("=" * 80)
|
|
317
|
+
print(f"{'Metric':<20} {'Last ' + str(period1_days) + ' days':>20} {'Last ' + str(period2_days) + ' days':>20} {'Change':>15}")
|
|
318
|
+
print("-" * 80)
|
|
319
|
+
|
|
320
|
+
calls_change = ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100) if stats2["total_calls"] > 0 else 0
|
|
321
|
+
print(f"{'Total Calls':<20} {stats1['total_calls']:>20} {stats2['total_calls']:>20} {calls_change:>14.1f}%")
|
|
322
|
+
|
|
323
|
+
cost_change = ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100) if stats2["total_cost"] > 0 else 0
|
|
324
|
+
print(f"{'Total Cost':<20} ${stats1['total_cost']:>19.2f} ${stats2['total_cost']:>19.2f} {cost_change:>14.1f}%")
|
|
325
|
+
|
|
326
|
+
avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
|
|
327
|
+
avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
|
|
328
|
+
avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
|
|
329
|
+
print(f"{'Avg Cost/Call':<20} ${avg1:>19.4f} ${avg2:>19.4f} {avg_change:>14.1f}%")
|
|
330
|
+
|
|
331
|
+
cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
|
|
332
|
+
print(f"{'Cache Hit Rate':<20} {stats1['cache_hit_rate']:>19.1f}% {stats2['cache_hit_rate']:>19.1f}% {cache_change:>14.1f}pp")
|
|
333
|
+
|
|
334
|
+
print("=" * 80)
|
|
335
|
+
|
|
336
|
+
return 0
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def cmd_telemetry_reset(args: Any) -> int:
|
|
340
|
+
"""Reset/clear all telemetry data.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
args: Parsed command-line arguments
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Exit code (0 for success)
|
|
347
|
+
|
|
348
|
+
"""
|
|
349
|
+
tracker = UsageTracker.get_instance()
|
|
350
|
+
confirm = getattr(args, "confirm", False)
|
|
351
|
+
|
|
352
|
+
if not confirm:
|
|
353
|
+
print("WARNING: This will permanently delete all telemetry data.")
|
|
354
|
+
print(f"Location: {tracker.telemetry_dir}")
|
|
355
|
+
print("\nUse --confirm to proceed.")
|
|
356
|
+
return 1
|
|
357
|
+
|
|
358
|
+
count = tracker.reset()
|
|
359
|
+
print(f"Deleted {count} telemetry entries.")
|
|
360
|
+
print("New tracking starts now.")
|
|
361
|
+
return 0
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def cmd_telemetry_export(args: Any) -> int:
|
|
365
|
+
"""Export telemetry data to JSON or CSV.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
args: Parsed command-line arguments
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
Exit code (0 for success)
|
|
372
|
+
|
|
373
|
+
"""
|
|
374
|
+
tracker = UsageTracker.get_instance()
|
|
375
|
+
format_type = getattr(args, "format", "json")
|
|
376
|
+
output_file = getattr(args, "output", None)
|
|
377
|
+
days = getattr(args, "days", None)
|
|
378
|
+
|
|
379
|
+
entries = tracker.export_to_dict(days=days)
|
|
380
|
+
|
|
381
|
+
if not entries:
|
|
382
|
+
print("No telemetry data to export.")
|
|
383
|
+
return 0
|
|
384
|
+
|
|
385
|
+
if format_type == "json":
|
|
386
|
+
# Export as JSON
|
|
387
|
+
if output_file:
|
|
388
|
+
validated_path = _validate_file_path(output_file)
|
|
389
|
+
with open(validated_path, "w", encoding="utf-8") as f:
|
|
390
|
+
json.dump(entries, f, indent=2)
|
|
391
|
+
print(f"Exported {len(entries)} entries to {validated_path}")
|
|
392
|
+
else:
|
|
393
|
+
print(json.dumps(entries, indent=2))
|
|
394
|
+
elif format_type == "csv":
|
|
395
|
+
# Export as CSV
|
|
396
|
+
if not entries:
|
|
397
|
+
print("No data to export.")
|
|
398
|
+
return 0
|
|
399
|
+
|
|
400
|
+
# Get all possible fields
|
|
401
|
+
fieldnames = ["ts", "workflow", "stage", "tier", "model", "provider", "cost",
|
|
402
|
+
"tokens_input", "tokens_output", "cache_hit", "cache_type", "duration_ms"]
|
|
403
|
+
|
|
404
|
+
if output_file:
|
|
405
|
+
validated_path = _validate_file_path(output_file)
|
|
406
|
+
with open(validated_path, "w", newline="", encoding="utf-8") as f:
|
|
407
|
+
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
|
408
|
+
writer.writeheader()
|
|
409
|
+
for entry in entries:
|
|
410
|
+
row = {
|
|
411
|
+
"ts": entry.get("ts", ""),
|
|
412
|
+
"workflow": entry.get("workflow", ""),
|
|
413
|
+
"stage": entry.get("stage", ""),
|
|
414
|
+
"tier": entry.get("tier", ""),
|
|
415
|
+
"model": entry.get("model", ""),
|
|
416
|
+
"provider": entry.get("provider", ""),
|
|
417
|
+
"cost": entry.get("cost", 0.0),
|
|
418
|
+
"tokens_input": entry.get("tokens", {}).get("input", 0),
|
|
419
|
+
"tokens_output": entry.get("tokens", {}).get("output", 0),
|
|
420
|
+
"cache_hit": entry.get("cache", {}).get("hit", False),
|
|
421
|
+
"cache_type": entry.get("cache", {}).get("type", ""),
|
|
422
|
+
"duration_ms": entry.get("duration_ms", 0),
|
|
423
|
+
}
|
|
424
|
+
writer.writerow(row)
|
|
425
|
+
print(f"Exported {len(entries)} entries to {validated_path}")
|
|
426
|
+
else:
|
|
427
|
+
# Print to stdout
|
|
428
|
+
writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames)
|
|
429
|
+
writer.writeheader()
|
|
430
|
+
for entry in entries:
|
|
431
|
+
row = {
|
|
432
|
+
"ts": entry.get("ts", ""),
|
|
433
|
+
"workflow": entry.get("workflow", ""),
|
|
434
|
+
"stage": entry.get("stage", ""),
|
|
435
|
+
"tier": entry.get("tier", ""),
|
|
436
|
+
"model": entry.get("model", ""),
|
|
437
|
+
"provider": entry.get("provider", ""),
|
|
438
|
+
"cost": entry.get("cost", 0.0),
|
|
439
|
+
"tokens_input": entry.get("tokens", {}).get("input", 0),
|
|
440
|
+
"tokens_output": entry.get("tokens", {}).get("output", 0),
|
|
441
|
+
"cache_hit": entry.get("cache", {}).get("hit", False),
|
|
442
|
+
"cache_type": entry.get("cache", {}).get("type", ""),
|
|
443
|
+
"duration_ms": entry.get("duration_ms", 0),
|
|
444
|
+
}
|
|
445
|
+
writer.writerow(row)
|
|
446
|
+
else:
|
|
447
|
+
print(f"Unknown format: {format_type}")
|
|
448
|
+
print("Supported formats: json, csv")
|
|
449
|
+
return 1
|
|
450
|
+
|
|
451
|
+
return 0
|