empathy-framework 4.8.0__py3-none-any.whl → 4.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/METADATA +64 -25
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/RECORD +28 -39
- empathy_os/__init__.py +2 -2
- empathy_os/cache/hash_only.py +3 -6
- empathy_os/cache/hybrid.py +3 -6
- empathy_os/cli_legacy.py +1 -27
- empathy_os/cli_unified.py +0 -25
- empathy_os/memory/__init__.py +5 -19
- empathy_os/memory/short_term.py +132 -10
- empathy_os/memory/types.py +4 -0
- empathy_os/models/registry.py +4 -4
- empathy_os/project_index/scanner.py +3 -2
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/workflow_commands.py +9 -9
- empathy_os/workflows/__init__.py +4 -4
- empathy_os/workflows/base.py +8 -54
- empathy_os/workflows/bug_predict.py +2 -2
- empathy_os/workflows/history.py +5 -3
- empathy_os/workflows/perf_audit.py +4 -4
- empathy_os/workflows/progress.py +22 -324
- empathy_os/workflows/routing.py +0 -5
- empathy_os/workflows/security_audit.py +0 -189
- empathy_os/workflows/security_audit_phase3.py +26 -2
- empathy_os/workflows/test_gen.py +7 -7
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/output.py +0 -410
- empathy_os/workflows/progressive/README 2.md +0 -454
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/WHEEL +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/top_level.txt +0 -0
empathy_os/memory/short_term.py
CHANGED
|
@@ -164,6 +164,14 @@ class RedisShortTermMemory:
|
|
|
164
164
|
self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
|
|
165
165
|
self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
|
|
166
166
|
|
|
167
|
+
# Local LRU cache for two-tier caching (memory + Redis)
|
|
168
|
+
# Reduces network I/O from 37ms to <0.001ms for frequently accessed keys
|
|
169
|
+
self._local_cache_enabled = self._config.local_cache_enabled
|
|
170
|
+
self._local_cache_max_size = self._config.local_cache_size
|
|
171
|
+
self._local_cache: dict[str, tuple[str, float, float]] = {} # key -> (value, timestamp, last_access)
|
|
172
|
+
self._local_cache_hits = 0
|
|
173
|
+
self._local_cache_misses = 0
|
|
174
|
+
|
|
167
175
|
# Security: Initialize PII scrubber and secrets detector
|
|
168
176
|
self._pii_scrubber: PIIScrubber | None = None
|
|
169
177
|
self._secrets_detector: SecretsDetector | None = None
|
|
@@ -262,43 +270,105 @@ class RedisShortTermMemory:
|
|
|
262
270
|
raise last_error if last_error else ConnectionError("Redis operation failed")
|
|
263
271
|
|
|
264
272
|
def _get(self, key: str) -> str | None:
|
|
265
|
-
"""Get value from Redis or mock"""
|
|
273
|
+
"""Get value from Redis or mock with two-tier caching (local + Redis)"""
|
|
274
|
+
# Check local cache first (0.001ms vs 37ms for Redis/mock)
|
|
275
|
+
# This works for BOTH mock and real Redis modes
|
|
276
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
277
|
+
value, timestamp, last_access = self._local_cache[key]
|
|
278
|
+
now = time.time()
|
|
279
|
+
|
|
280
|
+
# Update last access time for LRU
|
|
281
|
+
self._local_cache[key] = (value, timestamp, now)
|
|
282
|
+
self._local_cache_hits += 1
|
|
283
|
+
|
|
284
|
+
return value
|
|
285
|
+
|
|
286
|
+
# Cache miss - fetch from storage (mock or Redis)
|
|
287
|
+
self._local_cache_misses += 1
|
|
288
|
+
|
|
289
|
+
# Mock mode path
|
|
266
290
|
if self.use_mock:
|
|
267
291
|
if key in self._mock_storage:
|
|
268
292
|
value, expires = self._mock_storage[key]
|
|
269
293
|
if expires is None or datetime.now().timestamp() < expires:
|
|
270
|
-
|
|
294
|
+
result = str(value) if value is not None else None
|
|
295
|
+
# Add to local cache for next access
|
|
296
|
+
if result and self._local_cache_enabled:
|
|
297
|
+
self._add_to_local_cache(key, result)
|
|
298
|
+
return result
|
|
271
299
|
del self._mock_storage[key]
|
|
272
300
|
return None
|
|
301
|
+
|
|
302
|
+
# Real Redis path
|
|
273
303
|
if self._client is None:
|
|
274
304
|
return None
|
|
305
|
+
|
|
275
306
|
result = self._client.get(key)
|
|
307
|
+
|
|
308
|
+
# Add to local cache if successful
|
|
309
|
+
if result and self._local_cache_enabled:
|
|
310
|
+
self._add_to_local_cache(key, str(result))
|
|
311
|
+
|
|
276
312
|
return str(result) if result else None
|
|
277
313
|
|
|
278
314
|
def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
|
|
279
|
-
"""Set value in Redis or mock"""
|
|
315
|
+
"""Set value in Redis or mock with two-tier caching"""
|
|
316
|
+
# Mock mode path
|
|
280
317
|
if self.use_mock:
|
|
281
318
|
expires = datetime.now().timestamp() + ttl if ttl else None
|
|
282
319
|
self._mock_storage[key] = (value, expires)
|
|
320
|
+
|
|
321
|
+
# Update local cache in mock mode too
|
|
322
|
+
if self._local_cache_enabled:
|
|
323
|
+
self._add_to_local_cache(key, value)
|
|
324
|
+
|
|
283
325
|
return True
|
|
326
|
+
|
|
327
|
+
# Real Redis path
|
|
284
328
|
if self._client is None:
|
|
285
329
|
return False
|
|
330
|
+
|
|
331
|
+
# Set in Redis
|
|
286
332
|
if ttl:
|
|
287
333
|
self._client.setex(key, ttl, value)
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
334
|
+
else:
|
|
335
|
+
result = self._client.set(key, value)
|
|
336
|
+
if not result:
|
|
337
|
+
return False
|
|
338
|
+
|
|
339
|
+
# Update local cache if enabled
|
|
340
|
+
if self._local_cache_enabled:
|
|
341
|
+
self._add_to_local_cache(key, value)
|
|
342
|
+
|
|
343
|
+
return True
|
|
291
344
|
|
|
292
345
|
def _delete(self, key: str) -> bool:
|
|
293
|
-
"""Delete key from Redis or mock"""
|
|
346
|
+
"""Delete key from Redis or mock and local cache"""
|
|
347
|
+
# Mock mode path
|
|
294
348
|
if self.use_mock:
|
|
349
|
+
deleted = False
|
|
295
350
|
if key in self._mock_storage:
|
|
296
351
|
del self._mock_storage[key]
|
|
297
|
-
|
|
298
|
-
|
|
352
|
+
deleted = True
|
|
353
|
+
|
|
354
|
+
# Remove from local cache if present
|
|
355
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
356
|
+
del self._local_cache[key]
|
|
357
|
+
|
|
358
|
+
return deleted
|
|
359
|
+
|
|
360
|
+
# Real Redis path
|
|
299
361
|
if self._client is None:
|
|
300
362
|
return False
|
|
301
|
-
|
|
363
|
+
|
|
364
|
+
# Delete from Redis
|
|
365
|
+
result = bool(self._client.delete(key) > 0)
|
|
366
|
+
|
|
367
|
+
# Also remove from local cache if present
|
|
368
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
369
|
+
del self._local_cache[key]
|
|
370
|
+
|
|
371
|
+
return result
|
|
302
372
|
|
|
303
373
|
def _keys(self, pattern: str) -> list[str]:
|
|
304
374
|
"""Get keys matching pattern"""
|
|
@@ -313,6 +383,58 @@ class RedisShortTermMemory:
|
|
|
313
383
|
# Convert bytes to strings - needed for API return type
|
|
314
384
|
return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
|
|
315
385
|
|
|
386
|
+
# === Local LRU Cache Methods ===
|
|
387
|
+
|
|
388
|
+
def _add_to_local_cache(self, key: str, value: str) -> None:
|
|
389
|
+
"""Add entry to local cache with LRU eviction.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
key: Cache key
|
|
393
|
+
value: Value to cache
|
|
394
|
+
"""
|
|
395
|
+
now = time.time()
|
|
396
|
+
|
|
397
|
+
# Evict oldest entry if cache is full
|
|
398
|
+
if len(self._local_cache) >= self._local_cache_max_size:
|
|
399
|
+
# Find key with oldest last_access time
|
|
400
|
+
oldest_key = min(self._local_cache, key=lambda k: self._local_cache[k][2])
|
|
401
|
+
del self._local_cache[oldest_key]
|
|
402
|
+
|
|
403
|
+
# Add new entry: (value, timestamp, last_access)
|
|
404
|
+
self._local_cache[key] = (value, now, now)
|
|
405
|
+
|
|
406
|
+
def clear_local_cache(self) -> int:
|
|
407
|
+
"""Clear all entries from local cache.
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
Number of entries cleared
|
|
411
|
+
"""
|
|
412
|
+
count = len(self._local_cache)
|
|
413
|
+
self._local_cache.clear()
|
|
414
|
+
self._local_cache_hits = 0
|
|
415
|
+
self._local_cache_misses = 0
|
|
416
|
+
logger.info("local_cache_cleared", entries_cleared=count)
|
|
417
|
+
return count
|
|
418
|
+
|
|
419
|
+
def get_local_cache_stats(self) -> dict:
|
|
420
|
+
"""Get local cache performance statistics.
|
|
421
|
+
|
|
422
|
+
Returns:
|
|
423
|
+
Dict with cache stats (hits, misses, hit_rate, size)
|
|
424
|
+
"""
|
|
425
|
+
total = self._local_cache_hits + self._local_cache_misses
|
|
426
|
+
hit_rate = (self._local_cache_hits / total * 100) if total > 0 else 0.0
|
|
427
|
+
|
|
428
|
+
return {
|
|
429
|
+
"enabled": self._local_cache_enabled,
|
|
430
|
+
"size": len(self._local_cache),
|
|
431
|
+
"max_size": self._local_cache_max_size,
|
|
432
|
+
"hits": self._local_cache_hits,
|
|
433
|
+
"misses": self._local_cache_misses,
|
|
434
|
+
"hit_rate": hit_rate,
|
|
435
|
+
"total_requests": total,
|
|
436
|
+
}
|
|
437
|
+
|
|
316
438
|
# === Security Methods ===
|
|
317
439
|
|
|
318
440
|
def _sanitize_data(self, data: Any) -> tuple[Any, int]:
|
empathy_os/memory/types.py
CHANGED
|
@@ -96,6 +96,10 @@ class RedisConfig:
|
|
|
96
96
|
retry_base_delay: float = 0.1 # seconds
|
|
97
97
|
retry_max_delay: float = 2.0 # seconds
|
|
98
98
|
|
|
99
|
+
# Local LRU cache settings (two-tier caching)
|
|
100
|
+
local_cache_enabled: bool = True # Enable local memory cache (reduces Redis network I/O)
|
|
101
|
+
local_cache_size: int = 500 # Maximum number of cached keys (~50KB memory)
|
|
102
|
+
|
|
99
103
|
# Sentinel settings (for HA)
|
|
100
104
|
sentinel_hosts: list[tuple[str, int]] | None = None
|
|
101
105
|
sentinel_master_name: str | None = None
|
empathy_os/models/registry.py
CHANGED
|
@@ -209,11 +209,11 @@ class ModelRegistry:
|
|
|
209
209
|
"""Build tier and model ID caches for O(1) lookups."""
|
|
210
210
|
# Cache for get_models_by_tier (tier -> list[ModelInfo])
|
|
211
211
|
self._tier_cache: dict[str, list[ModelInfo]] = {}
|
|
212
|
-
for
|
|
213
|
-
self._tier_cache[
|
|
214
|
-
provider_models[
|
|
212
|
+
for tier_value in [t.value for t in ModelTier]:
|
|
213
|
+
self._tier_cache[tier_value] = [
|
|
214
|
+
provider_models[tier_value]
|
|
215
215
|
for provider_models in self._registry.values()
|
|
216
|
-
if
|
|
216
|
+
if tier_value in provider_models
|
|
217
217
|
]
|
|
218
218
|
|
|
219
219
|
# Cache for get_model_by_id (model_id -> ModelInfo)
|
|
@@ -470,8 +470,9 @@ class ProjectScanner:
|
|
|
470
470
|
try:
|
|
471
471
|
content = path.read_text(encoding="utf-8", errors="ignore")
|
|
472
472
|
lines = content.split("\n")
|
|
473
|
-
|
|
474
|
-
|
|
473
|
+
# Use generator expression for memory efficiency (no intermediate list)
|
|
474
|
+
metrics["lines_of_code"] = sum(
|
|
475
|
+
1 for line in lines if line.strip() and not line.strip().startswith("#")
|
|
475
476
|
)
|
|
476
477
|
|
|
477
478
|
# Optimization: Skip expensive AST analysis for test files
|
empathy_os/workflow_commands.py
CHANGED
|
@@ -137,7 +137,7 @@ def morning_workflow(
|
|
|
137
137
|
print("-" * 40)
|
|
138
138
|
|
|
139
139
|
total_bugs = len(patterns.get("debugging", []))
|
|
140
|
-
resolved_bugs =
|
|
140
|
+
resolved_bugs = sum(1 for p in patterns.get("debugging", []) if p.get("status") == "resolved")
|
|
141
141
|
security_decisions = len(patterns.get("security", []))
|
|
142
142
|
|
|
143
143
|
print(f" Bug patterns: {total_bugs} ({resolved_bugs} resolved)")
|
|
@@ -207,14 +207,14 @@ def morning_workflow(
|
|
|
207
207
|
checks_passed += 1
|
|
208
208
|
print(" Lint: OK")
|
|
209
209
|
else:
|
|
210
|
-
issues =
|
|
210
|
+
issues = sum(1 for line in output.split("\n") if line.strip())
|
|
211
211
|
print(f" Lint: {issues} issues")
|
|
212
212
|
|
|
213
213
|
# Check for uncommitted changes
|
|
214
214
|
checks_total += 1
|
|
215
215
|
success, output = _run_command(["git", "status", "--porcelain"])
|
|
216
216
|
if success:
|
|
217
|
-
changes =
|
|
217
|
+
changes = sum(1 for line in output.split("\n") if line.strip())
|
|
218
218
|
if changes == 0:
|
|
219
219
|
checks_passed += 1
|
|
220
220
|
print(" Git: Clean")
|
|
@@ -312,7 +312,7 @@ def _run_security_only(project_root: str = ".", verbose: bool = False) -> int:
|
|
|
312
312
|
if not success or not output.strip():
|
|
313
313
|
print(" PASS - No obvious hardcoded secrets")
|
|
314
314
|
else:
|
|
315
|
-
lines =
|
|
315
|
+
lines = sum(1 for line in output.split("\n") if line.strip())
|
|
316
316
|
issues.append(f"Secrets: {lines} potential hardcoded secrets")
|
|
317
317
|
print(f" WARN - {lines} potential hardcoded values found")
|
|
318
318
|
|
|
@@ -322,7 +322,7 @@ def _run_security_only(project_root: str = ".", verbose: bool = False) -> int:
|
|
|
322
322
|
if not output.strip():
|
|
323
323
|
print(" PASS - No sensitive files tracked")
|
|
324
324
|
else:
|
|
325
|
-
files =
|
|
325
|
+
files = sum(1 for line in output.split("\n") if line.strip())
|
|
326
326
|
issues.append(f"Files: {files} sensitive files in git")
|
|
327
327
|
print(f" WARN - {files} sensitive files tracked in git")
|
|
328
328
|
|
|
@@ -427,10 +427,10 @@ def ship_workflow(
|
|
|
427
427
|
print("4. Checking git status...")
|
|
428
428
|
success, output = _run_command(["git", "status", "--porcelain"])
|
|
429
429
|
if success:
|
|
430
|
-
staged =
|
|
431
|
-
|
|
430
|
+
staged = sum(
|
|
431
|
+
1 for line in output.split("\n") if line.startswith(("A ", "M ", "D ", "R "))
|
|
432
432
|
)
|
|
433
|
-
unstaged =
|
|
433
|
+
unstaged = sum(1 for line in output.split("\n") if line.startswith((" M", " D", "??")))
|
|
434
434
|
if staged > 0:
|
|
435
435
|
print(f" INFO - {staged} staged, {unstaged} unstaged")
|
|
436
436
|
elif unstaged > 0:
|
|
@@ -523,7 +523,7 @@ def fix_all_workflow(project_root: str = ".", dry_run: bool = False, verbose: bo
|
|
|
523
523
|
print(f" Fixed {fixed} issues")
|
|
524
524
|
else:
|
|
525
525
|
# Some issues couldn't be auto-fixed
|
|
526
|
-
unfixable =
|
|
526
|
+
unfixable = sum(1 for line in output.split("\n") if "error" in line.lower())
|
|
527
527
|
print(f" {unfixable} issues require manual fix")
|
|
528
528
|
if verbose:
|
|
529
529
|
print(output)
|
empathy_os/workflows/__init__.py
CHANGED
|
@@ -80,11 +80,9 @@ from .base import (
|
|
|
80
80
|
get_workflow_stats,
|
|
81
81
|
)
|
|
82
82
|
|
|
83
|
-
# Builder pattern for workflow construction
|
|
84
|
-
from .builder import WorkflowBuilder, workflow_builder
|
|
85
|
-
|
|
86
83
|
# Config is small and frequently needed
|
|
87
84
|
from .config import DEFAULT_MODELS, ModelConfig, WorkflowConfig, create_example_config, get_model
|
|
85
|
+
from .step_config import WorkflowStepConfig, steps_from_tier_map, validate_step_config
|
|
88
86
|
|
|
89
87
|
# Routing strategies (small, frequently needed for builder pattern)
|
|
90
88
|
from .routing import (
|
|
@@ -94,7 +92,9 @@ from .routing import (
|
|
|
94
92
|
RoutingContext,
|
|
95
93
|
TierRoutingStrategy,
|
|
96
94
|
)
|
|
97
|
-
|
|
95
|
+
|
|
96
|
+
# Builder pattern for workflow construction
|
|
97
|
+
from .builder import WorkflowBuilder, workflow_builder
|
|
98
98
|
|
|
99
99
|
# Lazy import mapping for workflow classes
|
|
100
100
|
_LAZY_WORKFLOW_IMPORTS: dict[str, tuple[str, str]] = {
|
empathy_os/workflows/base.py
CHANGED
|
@@ -17,7 +17,6 @@ from __future__ import annotations
|
|
|
17
17
|
|
|
18
18
|
import json
|
|
19
19
|
import logging
|
|
20
|
-
import sys
|
|
21
20
|
import time
|
|
22
21
|
import uuid
|
|
23
22
|
from abc import ABC, abstractmethod
|
|
@@ -58,12 +57,7 @@ from empathy_os.models import ModelTier as UnifiedModelTier
|
|
|
58
57
|
from .caching import CachedResponse, CachingMixin
|
|
59
58
|
|
|
60
59
|
# Import progress tracking
|
|
61
|
-
from .progress import
|
|
62
|
-
RICH_AVAILABLE,
|
|
63
|
-
ProgressCallback,
|
|
64
|
-
ProgressTracker,
|
|
65
|
-
RichProgressReporter,
|
|
66
|
-
)
|
|
60
|
+
from .progress import ProgressCallback, ProgressTracker
|
|
67
61
|
from .telemetry_mixin import TelemetryMixin
|
|
68
62
|
|
|
69
63
|
# Import telemetry tracking
|
|
@@ -550,7 +544,6 @@ class BaseWorkflow(CachingMixin, TelemetryMixin, ABC):
|
|
|
550
544
|
enable_tier_tracking: bool = True,
|
|
551
545
|
enable_tier_fallback: bool = False,
|
|
552
546
|
routing_strategy: TierRoutingStrategy | None = None,
|
|
553
|
-
enable_rich_progress: bool = False,
|
|
554
547
|
):
|
|
555
548
|
"""Initialize workflow with optional cost tracker, provider, and config.
|
|
556
549
|
|
|
@@ -576,11 +569,6 @@ class BaseWorkflow(CachingMixin, TelemetryMixin, ABC):
|
|
|
576
569
|
When provided, overrides static tier_map for stage tier decisions.
|
|
577
570
|
Strategies: CostOptimizedRouting, PerformanceOptimizedRouting,
|
|
578
571
|
BalancedRouting, HybridRouting.
|
|
579
|
-
enable_rich_progress: Whether to enable Rich-based live progress display
|
|
580
|
-
(default False). When enabled and output is a TTY, shows live
|
|
581
|
-
progress bars with spinners. Default is False because most users
|
|
582
|
-
run workflows from IDEs (VSCode, etc.) where TTY is not available.
|
|
583
|
-
The console reporter works reliably in all environments.
|
|
584
572
|
|
|
585
573
|
"""
|
|
586
574
|
from .config import WorkflowConfig
|
|
@@ -591,8 +579,6 @@ class BaseWorkflow(CachingMixin, TelemetryMixin, ABC):
|
|
|
591
579
|
# Progress tracking
|
|
592
580
|
self._progress_callback = progress_callback
|
|
593
581
|
self._progress_tracker: ProgressTracker | None = None
|
|
594
|
-
self._enable_rich_progress = enable_rich_progress
|
|
595
|
-
self._rich_reporter: RichProgressReporter | None = None
|
|
596
582
|
|
|
597
583
|
# New: LLMExecutor support
|
|
598
584
|
self._executor = executor
|
|
@@ -1069,39 +1055,15 @@ class BaseWorkflow(CachingMixin, TelemetryMixin, ABC):
|
|
|
1069
1055
|
current_data = kwargs
|
|
1070
1056
|
error = None
|
|
1071
1057
|
|
|
1072
|
-
# Initialize progress tracker
|
|
1073
|
-
# Always show progress by default (IDE-friendly console output)
|
|
1074
|
-
# Rich live display only when explicitly enabled AND in TTY
|
|
1075
|
-
from .progress import ConsoleProgressReporter
|
|
1076
|
-
|
|
1077
|
-
self._progress_tracker = ProgressTracker(
|
|
1078
|
-
workflow_name=self.name,
|
|
1079
|
-
workflow_id=self._run_id,
|
|
1080
|
-
stage_names=self.stages,
|
|
1081
|
-
)
|
|
1082
|
-
|
|
1083
|
-
# Add user's callback if provided
|
|
1058
|
+
# Initialize progress tracker if callback provided
|
|
1084
1059
|
if self._progress_callback:
|
|
1060
|
+
self._progress_tracker = ProgressTracker(
|
|
1061
|
+
workflow_name=self.name,
|
|
1062
|
+
workflow_id=self._run_id,
|
|
1063
|
+
stage_names=self.stages,
|
|
1064
|
+
)
|
|
1085
1065
|
self._progress_tracker.add_callback(self._progress_callback)
|
|
1086
|
-
|
|
1087
|
-
# Rich progress: only when explicitly enabled AND in a TTY
|
|
1088
|
-
if self._enable_rich_progress and RICH_AVAILABLE and sys.stdout.isatty():
|
|
1089
|
-
try:
|
|
1090
|
-
self._rich_reporter = RichProgressReporter(self.name, self.stages)
|
|
1091
|
-
self._progress_tracker.add_callback(self._rich_reporter.report)
|
|
1092
|
-
self._rich_reporter.start()
|
|
1093
|
-
except Exception as e:
|
|
1094
|
-
# Fall back to console reporter
|
|
1095
|
-
logger.debug(f"Rich progress unavailable: {e}")
|
|
1096
|
-
self._rich_reporter = None
|
|
1097
|
-
console_reporter = ConsoleProgressReporter(verbose=False)
|
|
1098
|
-
self._progress_tracker.add_callback(console_reporter.report)
|
|
1099
|
-
else:
|
|
1100
|
-
# Default: use console reporter (works in IDEs, terminals, everywhere)
|
|
1101
|
-
console_reporter = ConsoleProgressReporter(verbose=False)
|
|
1102
|
-
self._progress_tracker.add_callback(console_reporter.report)
|
|
1103
|
-
|
|
1104
|
-
self._progress_tracker.start_workflow()
|
|
1066
|
+
self._progress_tracker.start_workflow()
|
|
1105
1067
|
|
|
1106
1068
|
try:
|
|
1107
1069
|
# Tier fallback mode: try CHEAP → CAPABLE → PREMIUM with validation
|
|
@@ -1431,14 +1393,6 @@ class BaseWorkflow(CachingMixin, TelemetryMixin, ABC):
|
|
|
1431
1393
|
if self._progress_tracker and error is None:
|
|
1432
1394
|
self._progress_tracker.complete_workflow()
|
|
1433
1395
|
|
|
1434
|
-
# Stop Rich progress display if active
|
|
1435
|
-
if self._rich_reporter:
|
|
1436
|
-
try:
|
|
1437
|
-
self._rich_reporter.stop()
|
|
1438
|
-
except Exception:
|
|
1439
|
-
pass # Best effort cleanup
|
|
1440
|
-
self._rich_reporter = None
|
|
1441
|
-
|
|
1442
1396
|
# Save to workflow history for dashboard
|
|
1443
1397
|
try:
|
|
1444
1398
|
_save_workflow_run(self.name, provider_str, result)
|
|
@@ -695,7 +695,7 @@ class BugPredictionWorkflow(BaseWorkflow):
|
|
|
695
695
|
{
|
|
696
696
|
"correlations": correlations,
|
|
697
697
|
"correlation_count": len(correlations),
|
|
698
|
-
"high_confidence_count":
|
|
698
|
+
"high_confidence_count": sum(1 for c in correlations if c["confidence"] > 0.6),
|
|
699
699
|
**input_data,
|
|
700
700
|
},
|
|
701
701
|
input_tokens,
|
|
@@ -759,7 +759,7 @@ class BugPredictionWorkflow(BaseWorkflow):
|
|
|
759
759
|
{
|
|
760
760
|
"predictions": predictions[:20], # Top 20 risky files
|
|
761
761
|
"overall_risk_score": round(self._risk_score, 2),
|
|
762
|
-
"high_risk_files":
|
|
762
|
+
"high_risk_files": sum(1 for p in predictions if float(p["risk_score"]) > 0.7),
|
|
763
763
|
**input_data,
|
|
764
764
|
},
|
|
765
765
|
input_tokens,
|
empathy_os/workflows/history.py
CHANGED
|
@@ -459,6 +459,10 @@ class WorkflowHistoryStore:
|
|
|
459
459
|
Returns:
|
|
460
460
|
Number of runs deleted
|
|
461
461
|
"""
|
|
462
|
+
cutoff = datetime.now().replace(
|
|
463
|
+
hour=0, minute=0, second=0, microsecond=0
|
|
464
|
+
).isoformat()
|
|
465
|
+
|
|
462
466
|
cursor = self.conn.cursor()
|
|
463
467
|
|
|
464
468
|
# Get run IDs to delete
|
|
@@ -476,14 +480,12 @@ class WorkflowHistoryStore:
|
|
|
476
480
|
return 0
|
|
477
481
|
|
|
478
482
|
# Delete stages for these runs
|
|
479
|
-
# Security Note: f-string builds placeholder list only ("?, ?, ?")
|
|
480
|
-
# Actual data (run_ids) passed as parameters - SQL injection safe
|
|
481
483
|
placeholders = ",".join("?" * len(run_ids))
|
|
482
484
|
cursor.execute(
|
|
483
485
|
f"DELETE FROM workflow_stages WHERE run_id IN ({placeholders})", run_ids
|
|
484
486
|
)
|
|
485
487
|
|
|
486
|
-
# Delete runs
|
|
488
|
+
# Delete runs
|
|
487
489
|
cursor.execute(
|
|
488
490
|
f"DELETE FROM workflow_runs WHERE run_id IN ({placeholders})", run_ids
|
|
489
491
|
)
|
|
@@ -269,10 +269,10 @@ class PerformanceAuditWorkflow(BaseWorkflow):
|
|
|
269
269
|
# Analyze each file
|
|
270
270
|
analysis: list[dict] = []
|
|
271
271
|
for file_path, file_findings in by_file.items():
|
|
272
|
-
# Calculate file complexity score
|
|
273
|
-
high_count =
|
|
274
|
-
medium_count =
|
|
275
|
-
low_count =
|
|
272
|
+
# Calculate file complexity score (generator expressions for memory efficiency)
|
|
273
|
+
high_count = sum(1 for f in file_findings if f["impact"] == "high")
|
|
274
|
+
medium_count = sum(1 for f in file_findings if f["impact"] == "medium")
|
|
275
|
+
low_count = sum(1 for f in file_findings if f["impact"] == "low")
|
|
276
276
|
|
|
277
277
|
complexity_score = high_count * 10 + medium_count * 5 + low_count * 1
|
|
278
278
|
|