overcode 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- overcode/__init__.py +1 -1
- overcode/cli.py +42 -3
- overcode/config.py +49 -0
- overcode/daemon_logging.py +144 -0
- overcode/daemon_utils.py +84 -0
- overcode/history_reader.py +17 -5
- overcode/implementations.py +11 -0
- overcode/launcher.py +3 -0
- overcode/mocks.py +4 -0
- overcode/monitor_daemon.py +25 -126
- overcode/pid_utils.py +10 -3
- overcode/protocols.py +12 -0
- overcode/session_manager.py +3 -0
- overcode/settings.py +20 -1
- overcode/standing_instructions.py +15 -6
- overcode/status_constants.py +11 -0
- overcode/status_detector.py +38 -0
- overcode/status_patterns.py +12 -0
- overcode/supervisor_daemon.py +40 -171
- overcode/tui.py +326 -39
- overcode/tui_helpers.py +18 -0
- overcode/web_api.py +486 -2
- overcode/web_chartjs.py +32 -0
- overcode/web_server.py +355 -3
- overcode/web_server_runner.py +104 -0
- overcode/web_templates.py +1093 -0
- {overcode-0.1.0.dist-info → overcode-0.1.2.dist-info}/METADATA +13 -1
- overcode-0.1.2.dist-info/RECORD +45 -0
- {overcode-0.1.0.dist-info → overcode-0.1.2.dist-info}/WHEEL +1 -1
- overcode/daemon.py +0 -1184
- overcode/daemon_state.py +0 -113
- overcode-0.1.0.dist-info/RECORD +0 -43
- {overcode-0.1.0.dist-info → overcode-0.1.2.dist-info}/entry_points.txt +0 -0
- {overcode-0.1.0.dist-info → overcode-0.1.2.dist-info}/licenses/LICENSE +0 -0
- {overcode-0.1.0.dist-info → overcode-0.1.2.dist-info}/top_level.txt +0 -0
overcode/tui_helpers.py
CHANGED
|
@@ -96,6 +96,24 @@ def format_tokens(tokens: int) -> str:
|
|
|
96
96
|
return str(tokens)
|
|
97
97
|
|
|
98
98
|
|
|
99
|
+
def format_line_count(count: int) -> str:
|
|
100
|
+
"""Format line count (insertions/deletions) to human readable (K/M).
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
count: Number of lines
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Formatted string like "173K", "1.2M", or "500" for small counts.
|
|
107
|
+
Uses no decimal for K values to keep display compact.
|
|
108
|
+
"""
|
|
109
|
+
if count >= 1_000_000:
|
|
110
|
+
return f"{count / 1_000_000:.1f}M"
|
|
111
|
+
elif count >= 1_000:
|
|
112
|
+
return f"{count // 1_000}K"
|
|
113
|
+
else:
|
|
114
|
+
return str(count)
|
|
115
|
+
|
|
116
|
+
|
|
99
117
|
def calculate_uptime(start_time: str, now: Optional[datetime] = None) -> str:
|
|
100
118
|
"""Calculate uptime from ISO format start_time.
|
|
101
119
|
|
overcode/web_api.py
CHANGED
|
@@ -12,6 +12,7 @@ from .monitor_daemon_state import (
|
|
|
12
12
|
MonitorDaemonState,
|
|
13
13
|
SessionDaemonState,
|
|
14
14
|
)
|
|
15
|
+
from .settings import get_agent_history_path
|
|
15
16
|
from .status_history import read_agent_status_history
|
|
16
17
|
from .tui_helpers import (
|
|
17
18
|
format_duration,
|
|
@@ -232,8 +233,9 @@ def get_timeline_data(tmux_session: str, hours: float = 3.0, slots: int = 60) ->
|
|
|
232
233
|
"status_colors": {k: get_web_color(get_status_color(k)) for k in AGENT_TIMELINE_CHARS},
|
|
233
234
|
}
|
|
234
235
|
|
|
235
|
-
# Get agent history
|
|
236
|
-
|
|
236
|
+
# Get agent history from session-specific file
|
|
237
|
+
history_path = get_agent_history_path(tmux_session)
|
|
238
|
+
all_history = read_agent_status_history(hours=hours, history_file=history_path)
|
|
237
239
|
|
|
238
240
|
# Group by agent
|
|
239
241
|
agent_histories: Dict[str, List] = {}
|
|
@@ -277,3 +279,485 @@ def get_health_data() -> Dict[str, Any]:
|
|
|
277
279
|
"status": "ok",
|
|
278
280
|
"timestamp": datetime.now().isoformat(),
|
|
279
281
|
}
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
# =============================================================================
|
|
285
|
+
# Analytics API Endpoints (for `overcode web` historical analytics dashboard)
|
|
286
|
+
# =============================================================================
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def get_analytics_sessions(
|
|
290
|
+
start: Optional[datetime] = None,
|
|
291
|
+
end: Optional[datetime] = None,
|
|
292
|
+
) -> Dict[str, Any]:
|
|
293
|
+
"""Get all sessions (active + archived) within a time range.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
start: Filter sessions that started after this time
|
|
297
|
+
end: Filter sessions that started before this time
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
Dictionary with sessions list and summary stats
|
|
301
|
+
"""
|
|
302
|
+
from .session_manager import SessionManager
|
|
303
|
+
from .history_reader import get_session_stats
|
|
304
|
+
|
|
305
|
+
sessions_mgr = SessionManager()
|
|
306
|
+
all_sessions = []
|
|
307
|
+
|
|
308
|
+
# Get active sessions
|
|
309
|
+
for s in sessions_mgr.list_sessions():
|
|
310
|
+
record = _session_to_analytics_record(s, is_archived=False)
|
|
311
|
+
# Get detailed stats from Claude Code history
|
|
312
|
+
stats = get_session_stats(s)
|
|
313
|
+
if stats:
|
|
314
|
+
record["work_times"] = stats.work_times
|
|
315
|
+
record["median_work_time"] = stats.median_work_time
|
|
316
|
+
all_sessions.append(record)
|
|
317
|
+
|
|
318
|
+
# Get archived sessions
|
|
319
|
+
for s in sessions_mgr.list_archived_sessions():
|
|
320
|
+
record = _session_to_analytics_record(s, is_archived=True)
|
|
321
|
+
record["end_time"] = getattr(s, "_end_time", None)
|
|
322
|
+
all_sessions.append(record)
|
|
323
|
+
|
|
324
|
+
# Filter by time range
|
|
325
|
+
if start or end:
|
|
326
|
+
filtered = []
|
|
327
|
+
for s in all_sessions:
|
|
328
|
+
try:
|
|
329
|
+
session_start = datetime.fromisoformat(s["start_time"])
|
|
330
|
+
if start and session_start < start:
|
|
331
|
+
continue
|
|
332
|
+
if end and session_start > end:
|
|
333
|
+
continue
|
|
334
|
+
filtered.append(s)
|
|
335
|
+
except (ValueError, TypeError):
|
|
336
|
+
continue
|
|
337
|
+
all_sessions = filtered
|
|
338
|
+
|
|
339
|
+
# Sort by start_time descending (newest first)
|
|
340
|
+
all_sessions.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
341
|
+
|
|
342
|
+
# Calculate summary stats
|
|
343
|
+
total_tokens = sum(s.get("total_tokens", 0) for s in all_sessions)
|
|
344
|
+
total_cost = sum(s.get("estimated_cost_usd", 0) for s in all_sessions)
|
|
345
|
+
total_green_time = sum(s.get("green_time_seconds", 0) for s in all_sessions)
|
|
346
|
+
total_non_green_time = sum(s.get("non_green_time_seconds", 0) for s in all_sessions)
|
|
347
|
+
total_time = total_green_time + total_non_green_time
|
|
348
|
+
avg_green_pct = (total_green_time / total_time * 100) if total_time > 0 else 0
|
|
349
|
+
|
|
350
|
+
return {
|
|
351
|
+
"sessions": all_sessions,
|
|
352
|
+
"summary": {
|
|
353
|
+
"session_count": len(all_sessions),
|
|
354
|
+
"total_tokens": total_tokens,
|
|
355
|
+
"total_cost_usd": round(total_cost, 2),
|
|
356
|
+
"total_green_time_seconds": total_green_time,
|
|
357
|
+
"total_non_green_time_seconds": total_non_green_time,
|
|
358
|
+
"avg_green_percent": round(avg_green_pct, 1),
|
|
359
|
+
},
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
def _session_to_analytics_record(session, is_archived: bool) -> Dict[str, Any]:
|
|
364
|
+
"""Convert a Session to a analytics record dictionary."""
|
|
365
|
+
stats = session.stats
|
|
366
|
+
green_time = stats.green_time_seconds
|
|
367
|
+
non_green_time = stats.non_green_time_seconds
|
|
368
|
+
total_time = green_time + non_green_time
|
|
369
|
+
green_pct = (green_time / total_time * 100) if total_time > 0 else 0
|
|
370
|
+
|
|
371
|
+
return {
|
|
372
|
+
"id": session.id,
|
|
373
|
+
"name": session.name,
|
|
374
|
+
"start_time": session.start_time,
|
|
375
|
+
"end_time": None,
|
|
376
|
+
"repo_name": session.repo_name,
|
|
377
|
+
"branch": session.branch,
|
|
378
|
+
"is_archived": is_archived,
|
|
379
|
+
"interaction_count": stats.interaction_count,
|
|
380
|
+
"steers_count": stats.steers_count,
|
|
381
|
+
"total_tokens": stats.total_tokens,
|
|
382
|
+
"input_tokens": stats.input_tokens,
|
|
383
|
+
"output_tokens": stats.output_tokens,
|
|
384
|
+
"cache_creation_tokens": stats.cache_creation_tokens,
|
|
385
|
+
"cache_read_tokens": stats.cache_read_tokens,
|
|
386
|
+
"estimated_cost_usd": round(stats.estimated_cost_usd, 4),
|
|
387
|
+
"green_time_seconds": green_time,
|
|
388
|
+
"non_green_time_seconds": non_green_time,
|
|
389
|
+
"green_percent": round(green_pct, 1),
|
|
390
|
+
"work_times": [], # Will be populated if available
|
|
391
|
+
"median_work_time": 0.0,
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def get_analytics_timeline(
|
|
396
|
+
tmux_session: str,
|
|
397
|
+
start: Optional[datetime] = None,
|
|
398
|
+
end: Optional[datetime] = None,
|
|
399
|
+
) -> Dict[str, Any]:
|
|
400
|
+
"""Get agent status timeline within a time range.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
tmux_session: tmux session name
|
|
404
|
+
start: Start of time range
|
|
405
|
+
end: End of time range
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
Dictionary with timeline events grouped by agent
|
|
409
|
+
"""
|
|
410
|
+
from .presence_logger import read_presence_history
|
|
411
|
+
|
|
412
|
+
# Default to last 24 hours if no range specified
|
|
413
|
+
if start is None:
|
|
414
|
+
start = datetime.now() - timedelta(hours=24)
|
|
415
|
+
if end is None:
|
|
416
|
+
end = datetime.now()
|
|
417
|
+
|
|
418
|
+
hours = (end - start).total_seconds() / 3600.0
|
|
419
|
+
|
|
420
|
+
# Get agent status history from session-specific file
|
|
421
|
+
history_path = get_agent_history_path(tmux_session)
|
|
422
|
+
all_history = read_agent_status_history(hours=hours, history_file=history_path)
|
|
423
|
+
|
|
424
|
+
# Filter to time range and group by agent
|
|
425
|
+
agent_events: Dict[str, List[Dict[str, Any]]] = {}
|
|
426
|
+
for ts, agent_name, status, activity in all_history:
|
|
427
|
+
if ts < start or ts > end:
|
|
428
|
+
continue
|
|
429
|
+
|
|
430
|
+
if agent_name not in agent_events:
|
|
431
|
+
agent_events[agent_name] = []
|
|
432
|
+
|
|
433
|
+
agent_events[agent_name].append({
|
|
434
|
+
"timestamp": ts.isoformat(),
|
|
435
|
+
"status": status,
|
|
436
|
+
"activity": activity[:100] if activity else "",
|
|
437
|
+
"color": get_web_color(get_status_color(status)),
|
|
438
|
+
})
|
|
439
|
+
|
|
440
|
+
# Get presence history
|
|
441
|
+
presence_history = read_presence_history(hours=hours)
|
|
442
|
+
presence_events = []
|
|
443
|
+
state_names = {1: "locked", 2: "inactive", 3: "active"}
|
|
444
|
+
presence_colors = {1: "#6b7280", 2: "#eab308", 3: "#22c55e"}
|
|
445
|
+
|
|
446
|
+
for ts, state in presence_history:
|
|
447
|
+
if ts < start or ts > end:
|
|
448
|
+
continue
|
|
449
|
+
presence_events.append({
|
|
450
|
+
"timestamp": ts.isoformat(),
|
|
451
|
+
"state": state,
|
|
452
|
+
"state_name": state_names.get(state, "unknown"),
|
|
453
|
+
"color": presence_colors.get(state, "#6b7280"),
|
|
454
|
+
})
|
|
455
|
+
|
|
456
|
+
return {
|
|
457
|
+
"start": start.isoformat(),
|
|
458
|
+
"end": end.isoformat(),
|
|
459
|
+
"agents": agent_events,
|
|
460
|
+
"presence": presence_events,
|
|
461
|
+
"status_colors": {k: get_web_color(get_status_color(k)) for k in AGENT_TIMELINE_CHARS},
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def get_analytics_stats(
|
|
466
|
+
tmux_session: str,
|
|
467
|
+
start: Optional[datetime] = None,
|
|
468
|
+
end: Optional[datetime] = None,
|
|
469
|
+
) -> Dict[str, Any]:
|
|
470
|
+
"""Get aggregate statistics for a time range.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
tmux_session: tmux session name
|
|
474
|
+
start: Start of time range
|
|
475
|
+
end: End of time range
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
Dictionary with aggregate efficiency metrics
|
|
479
|
+
"""
|
|
480
|
+
# Get sessions in range
|
|
481
|
+
sessions_data = get_analytics_sessions(start, end)
|
|
482
|
+
sessions = sessions_data["sessions"]
|
|
483
|
+
summary = sessions_data["summary"]
|
|
484
|
+
|
|
485
|
+
# Calculate efficiency metrics
|
|
486
|
+
total_interactions = sum(s.get("interaction_count", 0) for s in sessions)
|
|
487
|
+
total_steers = sum(s.get("steers_count", 0) for s in sessions)
|
|
488
|
+
total_cost = summary["total_cost_usd"]
|
|
489
|
+
|
|
490
|
+
# Cost efficiency
|
|
491
|
+
cost_per_interaction = (total_cost / total_interactions) if total_interactions > 0 else 0
|
|
492
|
+
total_hours = (summary["total_green_time_seconds"] + summary["total_non_green_time_seconds"]) / 3600
|
|
493
|
+
cost_per_hour = (total_cost / total_hours) if total_hours > 0 else 0
|
|
494
|
+
|
|
495
|
+
# Spin rate (steers / interactions)
|
|
496
|
+
spin_rate = (total_steers / total_interactions * 100) if total_interactions > 0 else 0
|
|
497
|
+
|
|
498
|
+
# Work time percentiles
|
|
499
|
+
all_work_times = []
|
|
500
|
+
for s in sessions:
|
|
501
|
+
work_times = s.get("work_times", [])
|
|
502
|
+
if work_times:
|
|
503
|
+
all_work_times.extend(work_times)
|
|
504
|
+
|
|
505
|
+
work_time_stats = _calculate_percentiles(all_work_times)
|
|
506
|
+
|
|
507
|
+
# Calculate presence-based efficiency metrics
|
|
508
|
+
presence_efficiency = _calculate_presence_efficiency(tmux_session, start, end)
|
|
509
|
+
|
|
510
|
+
return {
|
|
511
|
+
"time_range": {
|
|
512
|
+
"start": start.isoformat() if start else None,
|
|
513
|
+
"end": end.isoformat() if end else None,
|
|
514
|
+
},
|
|
515
|
+
"summary": summary,
|
|
516
|
+
"efficiency": {
|
|
517
|
+
"green_percent": summary["avg_green_percent"],
|
|
518
|
+
"cost_per_interaction": round(cost_per_interaction, 4),
|
|
519
|
+
"cost_per_hour": round(cost_per_hour, 2),
|
|
520
|
+
"spin_rate_percent": round(spin_rate, 1),
|
|
521
|
+
},
|
|
522
|
+
"presence_efficiency": presence_efficiency,
|
|
523
|
+
"interactions": {
|
|
524
|
+
"total": total_interactions,
|
|
525
|
+
"human": total_interactions - total_steers,
|
|
526
|
+
"robot_steers": total_steers,
|
|
527
|
+
},
|
|
528
|
+
"work_times": work_time_stats,
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
def _calculate_percentiles(values: List[float]) -> Dict[str, float]:
|
|
533
|
+
"""Calculate work time percentiles."""
|
|
534
|
+
if not values:
|
|
535
|
+
return {
|
|
536
|
+
"mean": 0.0,
|
|
537
|
+
"median": 0.0,
|
|
538
|
+
"p5": 0.0,
|
|
539
|
+
"p95": 0.0,
|
|
540
|
+
"min": 0.0,
|
|
541
|
+
"max": 0.0,
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
sorted_vals = sorted(values)
|
|
545
|
+
n = len(sorted_vals)
|
|
546
|
+
|
|
547
|
+
def percentile(p: float) -> float:
|
|
548
|
+
idx = int(p * (n - 1))
|
|
549
|
+
return sorted_vals[idx]
|
|
550
|
+
|
|
551
|
+
return {
|
|
552
|
+
"mean": round(sum(values) / n, 1),
|
|
553
|
+
"median": round(percentile(0.5), 1),
|
|
554
|
+
"p5": round(percentile(0.05), 1),
|
|
555
|
+
"p95": round(percentile(0.95), 1),
|
|
556
|
+
"min": round(sorted_vals[0], 1),
|
|
557
|
+
"max": round(sorted_vals[-1], 1),
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
def _calculate_presence_efficiency(
|
|
562
|
+
tmux_session: str,
|
|
563
|
+
start: Optional[datetime] = None,
|
|
564
|
+
end: Optional[datetime] = None,
|
|
565
|
+
sample_interval_seconds: int = 60,
|
|
566
|
+
) -> Dict[str, Any]:
|
|
567
|
+
"""Calculate agent efficiency metrics segmented by user presence.
|
|
568
|
+
|
|
569
|
+
Samples agent status at regular intervals and calculates what percentage
|
|
570
|
+
of agents were "green" (running) during:
|
|
571
|
+
- Present periods: user presence state = 3 (active)
|
|
572
|
+
- AFK periods: user presence state = 1 (locked) or 2 (inactive)
|
|
573
|
+
|
|
574
|
+
Args:
|
|
575
|
+
tmux_session: tmux session name
|
|
576
|
+
start: Start of time range
|
|
577
|
+
end: End of time range
|
|
578
|
+
sample_interval_seconds: How often to sample (default 60s)
|
|
579
|
+
|
|
580
|
+
Returns:
|
|
581
|
+
Dictionary with presence and AFK efficiency metrics
|
|
582
|
+
"""
|
|
583
|
+
from .presence_logger import read_presence_history
|
|
584
|
+
|
|
585
|
+
# Default to last 24 hours if no range specified
|
|
586
|
+
if end is None:
|
|
587
|
+
end = datetime.now()
|
|
588
|
+
if start is None:
|
|
589
|
+
start = end - timedelta(hours=24)
|
|
590
|
+
|
|
591
|
+
hours = (end - start).total_seconds() / 3600.0
|
|
592
|
+
|
|
593
|
+
# Get agent status history from session-specific file
|
|
594
|
+
history_path = get_agent_history_path(tmux_session)
|
|
595
|
+
agent_history = read_agent_status_history(hours=hours, history_file=history_path)
|
|
596
|
+
|
|
597
|
+
# Get presence history: list of (timestamp, state)
|
|
598
|
+
presence_history = read_presence_history(hours=hours)
|
|
599
|
+
|
|
600
|
+
# Filter to time range
|
|
601
|
+
agent_history = [(ts, name, status, act) for ts, name, status, act in agent_history
|
|
602
|
+
if start <= ts <= end]
|
|
603
|
+
presence_history = [(ts, state) for ts, state in presence_history
|
|
604
|
+
if start <= ts <= end]
|
|
605
|
+
|
|
606
|
+
# If no data, return zeros
|
|
607
|
+
if not agent_history or not presence_history:
|
|
608
|
+
return {
|
|
609
|
+
"present_efficiency": 0.0,
|
|
610
|
+
"afk_efficiency": 0.0,
|
|
611
|
+
"present_samples": 0,
|
|
612
|
+
"afk_samples": 0,
|
|
613
|
+
"has_data": False,
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
# Sort histories by timestamp
|
|
617
|
+
agent_history.sort(key=lambda x: x[0])
|
|
618
|
+
presence_history.sort(key=lambda x: x[0])
|
|
619
|
+
|
|
620
|
+
# Get unique agent names
|
|
621
|
+
agent_names = sorted(set(name for _, name, _, _ in agent_history))
|
|
622
|
+
|
|
623
|
+
# Build lookup: for each agent, sorted list of (timestamp, status)
|
|
624
|
+
agent_status_timeline: Dict[str, List[tuple]] = {name: [] for name in agent_names}
|
|
625
|
+
for ts, name, status, _ in agent_history:
|
|
626
|
+
agent_status_timeline[name].append((ts, status))
|
|
627
|
+
|
|
628
|
+
# Sample at regular intervals
|
|
629
|
+
present_green_percents: List[float] = []
|
|
630
|
+
afk_green_percents: List[float] = []
|
|
631
|
+
|
|
632
|
+
current_time = start
|
|
633
|
+
while current_time <= end:
|
|
634
|
+
# Find user presence state at this time (most recent entry before current_time)
|
|
635
|
+
user_state = None
|
|
636
|
+
for ts, state in reversed(presence_history):
|
|
637
|
+
if ts <= current_time:
|
|
638
|
+
user_state = state
|
|
639
|
+
break
|
|
640
|
+
|
|
641
|
+
# If no presence data before this time, skip
|
|
642
|
+
if user_state is None:
|
|
643
|
+
current_time += timedelta(seconds=sample_interval_seconds)
|
|
644
|
+
continue
|
|
645
|
+
|
|
646
|
+
# Find agent statuses at this time
|
|
647
|
+
green_count = 0
|
|
648
|
+
total_agents = 0
|
|
649
|
+
for name in agent_names:
|
|
650
|
+
timeline = agent_status_timeline[name]
|
|
651
|
+
agent_status = None
|
|
652
|
+
for ts, status in reversed(timeline):
|
|
653
|
+
if ts <= current_time:
|
|
654
|
+
agent_status = status
|
|
655
|
+
break
|
|
656
|
+
|
|
657
|
+
if agent_status is not None:
|
|
658
|
+
total_agents += 1
|
|
659
|
+
if agent_status == "running":
|
|
660
|
+
green_count += 1
|
|
661
|
+
|
|
662
|
+
# Calculate green percentage for this sample
|
|
663
|
+
if total_agents > 0:
|
|
664
|
+
green_percent = (green_count / total_agents) * 100
|
|
665
|
+
|
|
666
|
+
# Bucket by presence state
|
|
667
|
+
if user_state == 3: # Active/present
|
|
668
|
+
present_green_percents.append(green_percent)
|
|
669
|
+
else: # state 1 (locked) or 2 (inactive) = AFK
|
|
670
|
+
afk_green_percents.append(green_percent)
|
|
671
|
+
|
|
672
|
+
current_time += timedelta(seconds=sample_interval_seconds)
|
|
673
|
+
|
|
674
|
+
# Calculate averages
|
|
675
|
+
present_efficiency = (
|
|
676
|
+
sum(present_green_percents) / len(present_green_percents)
|
|
677
|
+
if present_green_percents else 0.0
|
|
678
|
+
)
|
|
679
|
+
afk_efficiency = (
|
|
680
|
+
sum(afk_green_percents) / len(afk_green_percents)
|
|
681
|
+
if afk_green_percents else 0.0
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
return {
|
|
685
|
+
"present_efficiency": round(present_efficiency, 1),
|
|
686
|
+
"afk_efficiency": round(afk_efficiency, 1),
|
|
687
|
+
"present_samples": len(present_green_percents),
|
|
688
|
+
"afk_samples": len(afk_green_percents),
|
|
689
|
+
"has_data": len(present_green_percents) + len(afk_green_percents) > 0,
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
def get_analytics_daily(
|
|
694
|
+
start: Optional[datetime] = None,
|
|
695
|
+
end: Optional[datetime] = None,
|
|
696
|
+
) -> Dict[str, Any]:
|
|
697
|
+
"""Get daily aggregated stats for charting.
|
|
698
|
+
|
|
699
|
+
Args:
|
|
700
|
+
start: Start of time range
|
|
701
|
+
end: End of time range
|
|
702
|
+
|
|
703
|
+
Returns:
|
|
704
|
+
Dictionary with daily stats arrays
|
|
705
|
+
"""
|
|
706
|
+
# Get sessions in range
|
|
707
|
+
sessions_data = get_analytics_sessions(start, end)
|
|
708
|
+
sessions = sessions_data["sessions"]
|
|
709
|
+
|
|
710
|
+
# Group sessions by date
|
|
711
|
+
daily_stats: Dict[str, Dict[str, Any]] = {}
|
|
712
|
+
|
|
713
|
+
for s in sessions:
|
|
714
|
+
try:
|
|
715
|
+
session_start = datetime.fromisoformat(s["start_time"])
|
|
716
|
+
date_key = session_start.strftime("%Y-%m-%d")
|
|
717
|
+
|
|
718
|
+
if date_key not in daily_stats:
|
|
719
|
+
daily_stats[date_key] = {
|
|
720
|
+
"date": date_key,
|
|
721
|
+
"sessions": 0,
|
|
722
|
+
"tokens": 0,
|
|
723
|
+
"cost_usd": 0.0,
|
|
724
|
+
"green_time_seconds": 0.0,
|
|
725
|
+
"non_green_time_seconds": 0.0,
|
|
726
|
+
"interactions": 0,
|
|
727
|
+
"steers": 0,
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
daily_stats[date_key]["sessions"] += 1
|
|
731
|
+
daily_stats[date_key]["tokens"] += s.get("total_tokens", 0)
|
|
732
|
+
daily_stats[date_key]["cost_usd"] += s.get("estimated_cost_usd", 0)
|
|
733
|
+
daily_stats[date_key]["green_time_seconds"] += s.get("green_time_seconds", 0)
|
|
734
|
+
daily_stats[date_key]["non_green_time_seconds"] += s.get("non_green_time_seconds", 0)
|
|
735
|
+
daily_stats[date_key]["interactions"] += s.get("interaction_count", 0)
|
|
736
|
+
daily_stats[date_key]["steers"] += s.get("steers_count", 0)
|
|
737
|
+
except (ValueError, TypeError):
|
|
738
|
+
continue
|
|
739
|
+
|
|
740
|
+
# Sort by date and convert to list
|
|
741
|
+
sorted_dates = sorted(daily_stats.keys())
|
|
742
|
+
daily_list = []
|
|
743
|
+
|
|
744
|
+
for date_key in sorted_dates:
|
|
745
|
+
day = daily_stats[date_key]
|
|
746
|
+
total_time = day["green_time_seconds"] + day["non_green_time_seconds"]
|
|
747
|
+
day["green_percent"] = round(
|
|
748
|
+
(day["green_time_seconds"] / total_time * 100) if total_time > 0 else 0, 1
|
|
749
|
+
)
|
|
750
|
+
day["cost_usd"] = round(day["cost_usd"], 2)
|
|
751
|
+
daily_list.append(day)
|
|
752
|
+
|
|
753
|
+
return {
|
|
754
|
+
"days": daily_list,
|
|
755
|
+
"labels": sorted_dates,
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def get_time_presets() -> List[Dict[str, str]]:
|
|
760
|
+
"""Get configured time presets from config or defaults."""
|
|
761
|
+
from .config import get_web_time_presets
|
|
762
|
+
|
|
763
|
+
return get_web_time_presets()
|