empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -31,14 +31,9 @@ class ModelTier(Enum):
31
31
 
32
32
 
33
33
  class ModelProvider(Enum):
34
- """Supported model providers."""
34
+ """Supported model provider (Claude-native architecture as of v5.0.0)."""
35
35
 
36
36
  ANTHROPIC = "anthropic"
37
- OPENAI = "openai"
38
- GOOGLE = "google"
39
- OLLAMA = "ollama"
40
- HYBRID = "hybrid"
41
- CUSTOM = "custom"
42
37
 
43
38
 
44
39
  @dataclass(frozen=True)
@@ -164,156 +159,6 @@ MODEL_REGISTRY: dict[str, dict[str, ModelInfo]] = {
164
159
  supports_tools=True,
165
160
  ),
166
161
  },
167
- # -------------------------------------------------------------------------
168
- # OpenAI Models
169
- # -------------------------------------------------------------------------
170
- "openai": {
171
- "cheap": ModelInfo(
172
- id="gpt-4o-mini",
173
- provider="openai",
174
- tier="cheap",
175
- input_cost_per_million=0.15,
176
- output_cost_per_million=0.60,
177
- max_tokens=4096,
178
- supports_vision=False,
179
- supports_tools=True,
180
- ),
181
- "capable": ModelInfo(
182
- id="gpt-4o",
183
- provider="openai",
184
- tier="capable",
185
- input_cost_per_million=2.50,
186
- output_cost_per_million=10.00,
187
- max_tokens=4096,
188
- supports_vision=True,
189
- supports_tools=True,
190
- ),
191
- "premium": ModelInfo(
192
- id="o1",
193
- provider="openai",
194
- tier="premium",
195
- input_cost_per_million=15.00,
196
- output_cost_per_million=60.00,
197
- max_tokens=32768,
198
- supports_vision=False,
199
- supports_tools=False, # o1 doesn't support tools yet
200
- ),
201
- },
202
- # -------------------------------------------------------------------------
203
- # Google Gemini Models
204
- # Key feature: Massive context windows (1M-2M tokens)
205
- # Model recommendations by tier:
206
- # cheap: gemini-2.0-flash-exp (1M context, fast, very cheap)
207
- # capable: gemini-1.5-pro (2M context, excellent for large codebases)
208
- # premium: gemini-2.5-pro (Google's most capable reasoning model)
209
- # -------------------------------------------------------------------------
210
- "google": {
211
- "cheap": ModelInfo(
212
- id="gemini-2.0-flash-exp",
213
- provider="google",
214
- tier="cheap",
215
- input_cost_per_million=0.10,
216
- output_cost_per_million=0.40,
217
- max_tokens=8192,
218
- supports_vision=True,
219
- supports_tools=True,
220
- ),
221
- "capable": ModelInfo(
222
- id="gemini-1.5-pro",
223
- provider="google",
224
- tier="capable",
225
- input_cost_per_million=1.25,
226
- output_cost_per_million=5.00,
227
- max_tokens=8192,
228
- supports_vision=True,
229
- supports_tools=True,
230
- ),
231
- "premium": ModelInfo(
232
- id="gemini-2.5-pro",
233
- provider="google",
234
- tier="premium",
235
- input_cost_per_million=1.25,
236
- output_cost_per_million=10.00,
237
- max_tokens=8192,
238
- supports_vision=True,
239
- supports_tools=True,
240
- ),
241
- },
242
- # -------------------------------------------------------------------------
243
- # Ollama (Local) Models - Zero cost
244
- # Model recommendations by tier:
245
- # cheap: Small, fast models (3B params) - llama3.2:3b
246
- # capable: Mid-size models (8B params) - llama3.1:8b
247
- # premium: Large models (70B params) - llama3.1:70b
248
- # Users need to pull models: ollama pull llama3.2:3b llama3.1:8b llama3.1:70b
249
- # -------------------------------------------------------------------------
250
- "ollama": {
251
- "cheap": ModelInfo(
252
- id="llama3.2:3b",
253
- provider="ollama",
254
- tier="cheap",
255
- input_cost_per_million=0.0,
256
- output_cost_per_million=0.0,
257
- max_tokens=4096,
258
- supports_vision=False,
259
- supports_tools=True,
260
- ),
261
- "capable": ModelInfo(
262
- id="llama3.1:8b",
263
- provider="ollama",
264
- tier="capable",
265
- input_cost_per_million=0.0,
266
- output_cost_per_million=0.0,
267
- max_tokens=8192,
268
- supports_vision=False,
269
- supports_tools=True,
270
- ),
271
- "premium": ModelInfo(
272
- id="llama3.1:70b",
273
- provider="ollama",
274
- tier="premium",
275
- input_cost_per_million=0.0,
276
- output_cost_per_million=0.0,
277
- max_tokens=8192,
278
- supports_vision=False,
279
- supports_tools=True,
280
- ),
281
- },
282
- # -------------------------------------------------------------------------
283
- # Hybrid - Mix of best models from different providers
284
- # -------------------------------------------------------------------------
285
- "hybrid": {
286
- "cheap": ModelInfo(
287
- id="gpt-4o-mini", # OpenAI - cheapest per token
288
- provider="openai",
289
- tier="cheap",
290
- input_cost_per_million=0.15,
291
- output_cost_per_million=0.60,
292
- max_tokens=4096,
293
- supports_vision=False,
294
- supports_tools=True,
295
- ),
296
- "capable": ModelInfo(
297
- id="claude-sonnet-4-5", # Anthropic Sonnet 4.5 - best reasoning (2026)
298
- provider="anthropic",
299
- tier="capable",
300
- input_cost_per_million=3.00,
301
- output_cost_per_million=15.00,
302
- max_tokens=8192,
303
- supports_vision=True,
304
- supports_tools=True,
305
- ),
306
- "premium": ModelInfo(
307
- id="claude-opus-4-5-20251101", # Anthropic - best overall
308
- provider="anthropic",
309
- tier="premium",
310
- input_cost_per_million=15.00,
311
- output_cost_per_million=75.00,
312
- max_tokens=8192,
313
- supports_vision=True,
314
- supports_tools=True,
315
- ),
316
- },
317
162
  }
318
163
 
319
164
 
@@ -364,11 +209,11 @@ class ModelRegistry:
364
209
  """Build tier and model ID caches for O(1) lookups."""
365
210
  # Cache for get_models_by_tier (tier -> list[ModelInfo])
366
211
  self._tier_cache: dict[str, list[ModelInfo]] = {}
367
- for tier_value in [t.value for t in ModelTier]:
368
- self._tier_cache[tier_value] = [
369
- provider_models[tier_value]
212
+ for tier in ModelTier:
213
+ self._tier_cache[tier.value] = [
214
+ provider_models[tier.value]
370
215
  for provider_models in self._registry.values()
371
- if tier_value in provider_models
216
+ if tier.value in provider_models
372
217
  ]
373
218
 
374
219
  # Cache for get_model_by_id (model_id -> ModelInfo)
@@ -381,23 +226,30 @@ class ModelRegistry:
381
226
  """Get model info for a provider/tier combination.
382
227
 
383
228
  Args:
384
- provider: Provider name (anthropic, openai, google, ollama, hybrid)
229
+ provider: Provider name (anthropic only as of v5.0.0)
385
230
  tier: Tier level (cheap, capable, premium)
386
231
 
387
232
  Returns:
388
233
  ModelInfo if found, None otherwise
389
234
 
235
+ Raises:
236
+ ValueError: If provider is not 'anthropic'
237
+
390
238
  Example:
391
239
  >>> registry = ModelRegistry()
392
240
  >>> model = registry.get_model("anthropic", "capable")
393
241
  >>> print(model.id)
394
242
  claude-sonnet-4-5
395
243
 
396
- >>> model = registry.get_model("openai", "cheap")
397
- >>> print(model.cost_per_1k_input)
398
- 0.00015
399
-
400
244
  """
245
+ if provider.lower() != "anthropic":
246
+ raise ValueError(
247
+ f"Provider '{provider}' is not supported. "
248
+ f"Empathy Framework is now Claude-native (v5.0.0). "
249
+ f"Only 'anthropic' provider is available. "
250
+ f"See docs/CLAUDE_NATIVE.md for migration guide."
251
+ )
252
+
401
253
  provider_models = self._registry.get(provider.lower())
402
254
  if provider_models is None:
403
255
  return None
@@ -430,7 +282,7 @@ class ModelRegistry:
430
282
  return self._model_id_cache.get(model_id)
431
283
 
432
284
  def get_models_by_tier(self, tier: str) -> list[ModelInfo]:
433
- """Get all models in a specific tier across all providers.
285
+ """Get all models in a specific tier (Anthropic-only as of v5.0.0).
434
286
 
435
287
  Uses O(1) cache lookup for fast performance.
436
288
 
@@ -444,33 +296,29 @@ class ModelRegistry:
444
296
  >>> registry = ModelRegistry()
445
297
  >>> cheap_models = registry.get_models_by_tier("cheap")
446
298
  >>> print(len(cheap_models))
447
- 5
299
+ 1
448
300
  >>> print([m.provider for m in cheap_models])
449
- ['anthropic', 'openai', 'google', 'ollama', 'openai']
301
+ ['anthropic']
450
302
 
451
303
  >>> premium_models = registry.get_models_by_tier("premium")
452
304
  >>> for model in premium_models:
453
305
  ... print(f"{model.provider}: {model.id}")
454
306
  anthropic: claude-opus-4-5-20251101
455
- openai: o1
456
- google: gemini-2.5-pro
457
- ollama: llama3.1:70b
458
- anthropic: claude-opus-4-5-20251101
459
307
 
460
308
  """
461
309
  return self._tier_cache.get(tier.lower(), [])
462
310
 
463
311
  def list_providers(self) -> list[str]:
464
- """Get list of all provider names.
312
+ """Get list of all provider names (Anthropic-only as of v5.0.0).
465
313
 
466
314
  Returns:
467
- List of provider names (e.g., ['anthropic', 'openai', ...])
315
+ List of provider names (['anthropic'])
468
316
 
469
317
  Example:
470
318
  >>> registry = ModelRegistry()
471
319
  >>> providers = registry.list_providers()
472
320
  >>> print(providers)
473
- ['anthropic', 'openai', 'google', 'ollama', 'hybrid']
321
+ ['anthropic']
474
322
 
475
323
  """
476
324
  return list(self._registry.keys())
@@ -491,7 +339,7 @@ class ModelRegistry:
491
339
  return [tier.value for tier in ModelTier]
492
340
 
493
341
  def get_all_models(self) -> dict[str, dict[str, ModelInfo]]:
494
- """Get the complete model registry.
342
+ """Get the complete model registry (Anthropic-only as of v5.0.0).
495
343
 
496
344
  Returns:
497
345
  Full registry dict (provider -> tier -> ModelInfo)
@@ -500,7 +348,7 @@ class ModelRegistry:
500
348
  >>> registry = ModelRegistry()
501
349
  >>> all_models = registry.get_all_models()
502
350
  >>> print(all_models.keys())
503
- dict_keys(['anthropic', 'openai', 'google', 'ollama', 'hybrid'])
351
+ dict_keys(['anthropic'])
504
352
 
505
353
  """
506
354
  return self._registry
@@ -520,9 +368,9 @@ class ModelRegistry:
520
368
  >>> print(pricing)
521
369
  {'input': 3.0, 'output': 15.0}
522
370
 
523
- >>> pricing = registry.get_pricing_for_model("gpt-4o-mini")
371
+ >>> pricing = registry.get_pricing_for_model("claude-opus-4-5-20251101")
524
372
  >>> print(f"${pricing['input']}/M input, ${pricing['output']}/M output")
525
- $0.15/M input, $0.6/M output
373
+ $15.0/M input, $75.0/M output
526
374
 
527
375
  """
528
376
  model = self.get_model_by_id(model_id)
@@ -549,12 +397,15 @@ def get_model(provider: str, tier: str) -> ModelInfo | None:
549
397
  """Get model info for a provider/tier combination.
550
398
 
551
399
  Args:
552
- provider: Provider name (anthropic, openai, ollama, hybrid)
400
+ provider: Provider name (anthropic only as of v5.0.0)
553
401
  tier: Tier level (cheap, capable, premium)
554
402
 
555
403
  Returns:
556
404
  ModelInfo if found, None otherwise
557
405
 
406
+ Raises:
407
+ ValueError: If provider is not 'anthropic'
408
+
558
409
  Note:
559
410
  This is a convenience wrapper around the default ModelRegistry instance.
560
411
  For more features, consider using ModelRegistry directly.
@@ -117,15 +117,15 @@ def _validate_webhook_url(url: str) -> str:
117
117
  # Block common internal service ports
118
118
  if parsed.port is not None:
119
119
  blocked_ports = {
120
- 22, # SSH
121
- 23, # Telnet
122
- 3306, # MySQL
123
- 5432, # PostgreSQL
124
- 6379, # Redis
120
+ 22, # SSH
121
+ 23, # Telnet
122
+ 3306, # MySQL
123
+ 5432, # PostgreSQL
124
+ 6379, # Redis
125
125
  27017, # MongoDB
126
- 9200, # Elasticsearch
127
- 2379, # etcd
128
- 8500, # Consul
126
+ 9200, # Elasticsearch
127
+ 2379, # etcd
128
+ 8500, # Consul
129
129
  }
130
130
  if parsed.port in blocked_ports:
131
131
  raise ValueError(
@@ -208,9 +208,9 @@ class AlertConfig:
208
208
  enabled=data.get("enabled", True),
209
209
  cooldown_seconds=data.get("cooldown_seconds", 3600),
210
210
  severity=AlertSeverity(data.get("severity", "warning")),
211
- created_at=datetime.fromisoformat(data["created_at"])
212
- if data.get("created_at")
213
- else None,
211
+ created_at=(
212
+ datetime.fromisoformat(data["created_at"]) if data.get("created_at") else None
213
+ ),
214
214
  )
215
215
 
216
216
 
@@ -276,9 +276,7 @@ class AlertEngine:
276
276
  self.db_path.parent.mkdir(parents=True, exist_ok=True)
277
277
 
278
278
  self.telemetry_dir = (
279
- Path(telemetry_dir)
280
- if telemetry_dir
281
- else Path.home() / ".empathy" / "telemetry"
279
+ Path(telemetry_dir) if telemetry_dir else Path.home() / ".empathy" / "telemetry"
282
280
  )
283
281
 
284
282
  self._cooldown_cache: dict[str, float] = {} # alert_id -> last_triggered_time
@@ -529,9 +527,7 @@ class AlertEngine:
529
527
  conn = sqlite3.connect(self.db_path)
530
528
  cursor = conn.cursor()
531
529
 
532
- cursor.execute(
533
- "UPDATE alerts SET enabled = ? WHERE id = ?", (int(enabled), alert_id)
534
- )
530
+ cursor.execute("UPDATE alerts SET enabled = ? WHERE id = ?", (int(enabled), alert_id))
535
531
  updated = cursor.rowcount > 0
536
532
 
537
533
  conn.commit()
@@ -577,9 +573,7 @@ class AlertEngine:
577
573
  continue
578
574
  try:
579
575
  entry = json.loads(line)
580
- timestamp = datetime.fromisoformat(
581
- entry.get("timestamp", "2000-01-01")
582
- )
576
+ timestamp = datetime.fromisoformat(entry.get("timestamp", "2000-01-01"))
583
577
  if timestamp < cutoff:
584
578
  continue
585
579
 
@@ -35,7 +35,9 @@ def alerts():
35
35
 
36
36
  @alerts.command()
37
37
  @click.option("--non-interactive", is_flag=True, help="Skip interactive prompts")
38
- @click.option("--metric", type=click.Choice(["daily_cost", "error_rate", "avg_latency", "token_usage"]))
38
+ @click.option(
39
+ "--metric", type=click.Choice(["daily_cost", "error_rate", "avg_latency", "token_usage"])
40
+ )
39
41
  @click.option("--threshold", type=float)
40
42
  @click.option("--channel", type=click.Choice(["webhook", "email", "stdout"]))
41
43
  @click.option("--webhook-url", help="Webhook URL (for webhook channel)")
@@ -52,7 +54,9 @@ def init(
52
54
  if non_interactive:
53
55
  # Non-interactive mode - require all parameters
54
56
  if not all([metric, threshold, channel]):
55
- click.echo("Error: --metric, --threshold, and --channel required in non-interactive mode")
57
+ click.echo(
58
+ "Error: --metric, --threshold, and --channel required in non-interactive mode"
59
+ )
56
60
  sys.exit(1)
57
61
 
58
62
  if channel == "webhook" and not webhook_url:
@@ -181,6 +185,7 @@ def list_cmd(as_json: bool):
181
185
 
182
186
  if as_json:
183
187
  import json
188
+
184
189
  click.echo(json.dumps([a.to_dict() for a in alerts_list], indent=2))
185
190
  return
186
191
 
@@ -257,14 +262,18 @@ def watch(interval: int, daemon: bool, once: bool):
257
262
  if events:
258
263
  click.echo(f"\n⚠️ {len(events)} alert(s) triggered!")
259
264
  for event in events:
260
- click.echo(f" - {event.alert_name}: {event.current_value:.2f} >= {event.threshold:.2f}")
265
+ click.echo(
266
+ f" - {event.alert_name}: {event.current_value:.2f} >= {event.threshold:.2f}"
267
+ )
261
268
  else:
262
269
  click.echo("✅ All metrics within thresholds")
263
270
  return
264
271
 
265
272
  if daemon:
266
273
  click.echo("🔄 Starting alert watcher as daemon...")
267
- click.echo("⚠️ Daemon mode runs in background. Use 'ps aux | grep empathy' to check status.")
274
+ click.echo(
275
+ "⚠️ Daemon mode runs in background. Use 'ps aux | grep empathy' to check status."
276
+ )
268
277
  # Daemonize
269
278
  _daemonize()
270
279
 
@@ -294,11 +303,15 @@ def watch(interval: int, daemon: bool, once: bool):
294
303
  triggered_count += len(events)
295
304
  for event in events:
296
305
  click.echo(f"⚠️ ALERT: {event.alert_name}")
297
- click.echo(f" {event.metric.value}: {event.current_value:.2f} >= {event.threshold:.2f}")
306
+ click.echo(
307
+ f" {event.metric.value}: {event.current_value:.2f} >= {event.threshold:.2f}"
308
+ )
298
309
 
299
310
  # Status update every 5 checks
300
311
  if check_count % 5 == 0:
301
- click.echo(f" [Check #{check_count}] Monitoring... ({triggered_count} alerts triggered)")
312
+ click.echo(
313
+ f" [Check #{check_count}] Monitoring... ({triggered_count} alerts triggered)"
314
+ )
302
315
 
303
316
  time.sleep(interval)
304
317
  except KeyboardInterrupt:
@@ -367,6 +380,7 @@ def history(alert_id: str | None, limit: int, as_json: bool):
367
380
 
368
381
  if as_json:
369
382
  import json
383
+
370
384
  click.echo(json.dumps(records, indent=2))
371
385
  return
372
386
 
@@ -375,7 +389,9 @@ def history(alert_id: str | None, limit: int, as_json: bool):
375
389
  for record in records:
376
390
  delivered = "✓" if record["delivered"] else "✗"
377
391
  click.echo(f" [{delivered}] {record['alert_id']}")
378
- click.echo(f" Metric: {record['metric']} = {record['current_value']:.2f} (threshold: {record['threshold']:.2f})")
392
+ click.echo(
393
+ f" Metric: {record['metric']} = {record['current_value']:.2f} (threshold: {record['threshold']:.2f})"
394
+ )
379
395
  click.echo(f" Severity: {record['severity']}")
380
396
  click.echo(f" Triggered: {record['triggered_at']}")
381
397
  if record.get("delivery_error"):
@@ -392,6 +408,7 @@ def metrics(as_json: bool):
392
408
 
393
409
  if as_json:
394
410
  import json
411
+
395
412
  click.echo(json.dumps(current_metrics, indent=2))
396
413
  return
397
414
 
@@ -19,10 +19,12 @@ from .index import ProjectIndex
19
19
  from .models import FileRecord, IndexConfig, ProjectSummary
20
20
  from .reports import ReportGenerator
21
21
  from .scanner import ProjectScanner
22
+ from .scanner_parallel import ParallelProjectScanner
22
23
 
23
24
  __all__ = [
24
25
  "FileRecord",
25
26
  "IndexConfig",
27
+ "ParallelProjectScanner",
26
28
  "ProjectIndex",
27
29
  "ProjectScanner",
28
30
  "ProjectSummary",