empathy-framework 4.4.0__py3-none-any.whl → 4.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,6 +33,10 @@ from typing import Any
33
33
 
34
34
  import structlog
35
35
 
36
+ from .security.pii_scrubber import PIIScrubber
37
+ from .security.secrets_detector import SecretsDetector
38
+ from .security.secrets_detector import Severity as SecretSeverity
39
+
36
40
  logger = structlog.get_logger(__name__)
37
41
 
38
42
  try:
@@ -100,6 +104,10 @@ class RedisConfig:
100
104
  password: str | None = None
101
105
  use_mock: bool = False
102
106
 
107
+ # Security settings
108
+ pii_scrub_enabled: bool = True # Scrub PII before storing (HIPAA/GDPR compliance)
109
+ secrets_detection_enabled: bool = True # Block storage of detected secrets
110
+
103
111
  # SSL/TLS settings
104
112
  ssl: bool = False
105
113
  ssl_cert_reqs: str | None = None # "required", "optional", "none"
@@ -166,6 +174,11 @@ class RedisMetrics:
166
174
  publish_count: int = 0
167
175
  stream_append_count: int = 0
168
176
 
177
+ # Security metrics
178
+ pii_scrubbed_total: int = 0 # Total PII instances scrubbed
179
+ pii_scrub_operations: int = 0 # Operations that had PII scrubbed
180
+ secrets_blocked_total: int = 0 # Total secrets blocked from storage
181
+
169
182
  def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
170
183
  """Record an operation metric."""
171
184
  self.operations_total += 1
@@ -217,6 +230,11 @@ class RedisMetrics:
217
230
  "publish": self.publish_count,
218
231
  "stream_append": self.stream_append_count,
219
232
  },
233
+ "security": {
234
+ "pii_scrubbed_total": self.pii_scrubbed_total,
235
+ "pii_scrub_operations": self.pii_scrub_operations,
236
+ "secrets_blocked_total": self.secrets_blocked_total,
237
+ },
220
238
  }
221
239
 
222
240
 
@@ -387,6 +405,10 @@ class ConflictContext:
387
405
  )
388
406
 
389
407
 
408
+ class SecurityError(Exception):
409
+ """Raised when a security policy is violated (e.g., secrets detected in data)."""
410
+
411
+
390
412
  class RedisShortTermMemory:
391
413
  """Redis-backed short-term memory for agent coordination
392
414
 
@@ -488,6 +510,18 @@ class RedisShortTermMemory:
488
510
  self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
489
511
  self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
490
512
 
513
+ # Security: Initialize PII scrubber and secrets detector
514
+ self._pii_scrubber: PIIScrubber | None = None
515
+ self._secrets_detector: SecretsDetector | None = None
516
+
517
+ if self._config.pii_scrub_enabled:
518
+ self._pii_scrubber = PIIScrubber(enable_name_detection=False)
519
+ logger.debug("pii_scrubber_enabled", message="PII scrubbing active for short-term memory")
520
+
521
+ if self._config.secrets_detection_enabled:
522
+ self._secrets_detector = SecretsDetector()
523
+ logger.debug("secrets_detector_enabled", message="Secrets detection active for short-term memory")
524
+
491
525
  if self.use_mock:
492
526
  self._client = None
493
527
  else:
@@ -621,6 +655,82 @@ class RedisShortTermMemory:
621
655
  # Convert bytes to strings - needed for API return type
622
656
  return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
623
657
 
658
+ # === Security Methods ===
659
+
660
+ def _sanitize_data(self, data: Any) -> tuple[Any, int]:
661
+ """Sanitize data by scrubbing PII and checking for secrets.
662
+
663
+ Args:
664
+ data: Data to sanitize (dict, list, or str)
665
+
666
+ Returns:
667
+ Tuple of (sanitized_data, pii_count)
668
+
669
+ Raises:
670
+ SecurityError: If secrets are detected and blocking is enabled
671
+
672
+ """
673
+ pii_count = 0
674
+
675
+ if data is None:
676
+ return data, 0
677
+
678
+ # Convert data to string for scanning
679
+ if isinstance(data, dict):
680
+ data_str = json.dumps(data)
681
+ elif isinstance(data, list):
682
+ data_str = json.dumps(data)
683
+ elif isinstance(data, str):
684
+ data_str = data
685
+ else:
686
+ # For other types, convert to string
687
+ data_str = str(data)
688
+
689
+ # Check for secrets first (before modifying data)
690
+ if self._secrets_detector is not None:
691
+ detections = self._secrets_detector.detect(data_str)
692
+ # Block critical and high severity secrets
693
+ critical_secrets = [
694
+ d for d in detections
695
+ if d.severity in (SecretSeverity.CRITICAL, SecretSeverity.HIGH)
696
+ ]
697
+ if critical_secrets:
698
+ self._metrics.secrets_blocked_total += len(critical_secrets)
699
+ secret_types = [d.secret_type.value for d in critical_secrets]
700
+ logger.warning(
701
+ "secrets_detected_blocked",
702
+ secret_types=secret_types,
703
+ count=len(critical_secrets),
704
+ )
705
+ raise SecurityError(
706
+ f"Cannot store data containing secrets: {secret_types}. "
707
+ "Remove sensitive credentials before storing."
708
+ )
709
+
710
+ # Scrub PII
711
+ if self._pii_scrubber is not None:
712
+ sanitized_str, pii_detections = self._pii_scrubber.scrub(data_str)
713
+ pii_count = len(pii_detections)
714
+
715
+ if pii_count > 0:
716
+ self._metrics.pii_scrubbed_total += pii_count
717
+ self._metrics.pii_scrub_operations += 1
718
+ logger.debug(
719
+ "pii_scrubbed",
720
+ pii_count=pii_count,
721
+ pii_types=[d.pii_type for d in pii_detections],
722
+ )
723
+
724
+ # Convert back to original type
725
+ if isinstance(data, dict):
726
+ return json.loads(sanitized_str), pii_count
727
+ elif isinstance(data, list):
728
+ return json.loads(sanitized_str), pii_count
729
+ else:
730
+ return sanitized_str, pii_count
731
+
732
+ return data, pii_count
733
+
624
734
  # === Working Memory (Stash/Retrieve) ===
625
735
 
626
736
  def stash(
@@ -629,6 +739,7 @@ class RedisShortTermMemory:
629
739
  data: Any,
630
740
  credentials: AgentCredentials,
631
741
  ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
742
+ skip_sanitization: bool = False,
632
743
  ) -> bool:
633
744
  """Stash data in short-term memory
634
745
 
@@ -637,6 +748,7 @@ class RedisShortTermMemory:
637
748
  data: Data to store (will be JSON serialized)
638
749
  credentials: Agent credentials
639
750
  ttl: Time-to-live strategy
751
+ skip_sanitization: Skip PII scrubbing and secrets detection (use with caution)
640
752
 
641
753
  Returns:
642
754
  True if successful
@@ -644,6 +756,12 @@ class RedisShortTermMemory:
644
756
  Raises:
645
757
  ValueError: If key is empty or invalid
646
758
  PermissionError: If credentials lack write access
759
+ SecurityError: If secrets are detected in data (when secrets_detection_enabled)
760
+
761
+ Note:
762
+ PII (emails, SSNs, phone numbers, etc.) is automatically scrubbed
763
+ before storage unless skip_sanitization=True or pii_scrub_enabled=False.
764
+ Secrets (API keys, passwords, etc.) will block storage by default.
647
765
 
648
766
  Example:
649
767
  >>> memory.stash("analysis_v1", {"findings": [...]}, creds)
@@ -659,6 +777,17 @@ class RedisShortTermMemory:
659
777
  "cannot write to memory. Requires CONTRIBUTOR or higher.",
660
778
  )
661
779
 
780
+ # Sanitize data (PII scrubbing + secrets detection)
781
+ if not skip_sanitization:
782
+ data, pii_count = self._sanitize_data(data)
783
+ if pii_count > 0:
784
+ logger.info(
785
+ "stash_pii_scrubbed",
786
+ key=key,
787
+ agent_id=credentials.agent_id,
788
+ pii_count=pii_count,
789
+ )
790
+
662
791
  full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
663
792
  payload = {
664
793
  "data": data,
@@ -2218,6 +2347,63 @@ class RedisShortTermMemory:
2218
2347
  except Exception:
2219
2348
  pass
2220
2349
 
2350
+ # =========================================================================
2351
+ # CROSS-SESSION COMMUNICATION
2352
+ # =========================================================================
2353
+
2354
+ def enable_cross_session(
2355
+ self,
2356
+ access_tier: AccessTier = AccessTier.CONTRIBUTOR,
2357
+ auto_announce: bool = True,
2358
+ ):
2359
+ """Enable cross-session communication for this memory instance.
2360
+
2361
+ This allows agents in different Claude Code sessions to communicate
2362
+ and coordinate via Redis.
2363
+
2364
+ Args:
2365
+ access_tier: Access tier for this session
2366
+ auto_announce: Whether to announce presence automatically
2367
+
2368
+ Returns:
2369
+ CrossSessionCoordinator instance
2370
+
2371
+ Raises:
2372
+ ValueError: If in mock mode (Redis required for cross-session)
2373
+
2374
+ Example:
2375
+ >>> memory = RedisShortTermMemory()
2376
+ >>> coordinator = memory.enable_cross_session(AccessTier.CONTRIBUTOR)
2377
+ >>> print(f"Session ID: {coordinator.agent_id}")
2378
+ >>> sessions = coordinator.get_active_sessions()
2379
+
2380
+ """
2381
+ if self.use_mock:
2382
+ raise ValueError(
2383
+ "Cross-session communication requires Redis. "
2384
+ "Set REDIS_HOST/REDIS_PORT or disable mock mode."
2385
+ )
2386
+
2387
+ from .cross_session import CrossSessionCoordinator, SessionType
2388
+
2389
+ coordinator = CrossSessionCoordinator(
2390
+ memory=self,
2391
+ session_type=SessionType.CLAUDE,
2392
+ access_tier=access_tier,
2393
+ auto_announce=auto_announce,
2394
+ )
2395
+
2396
+ return coordinator
2397
+
2398
+ def cross_session_available(self) -> bool:
2399
+ """Check if cross-session communication is available.
2400
+
2401
+ Returns:
2402
+ True if Redis is connected (not mock mode)
2403
+
2404
+ """
2405
+ return not self.use_mock and self._client is not None
2406
+
2221
2407
  # =========================================================================
2222
2408
  # CLEANUP AND LIFECYCLE
2223
2409
  # =========================================================================
@@ -80,7 +80,7 @@ RELEASE_PREP_TEMPLATE = MetaWorkflowTemplate(
80
80
  agent_composition_rules=[
81
81
  AgentCompositionRule(
82
82
  role="Security Auditor",
83
- base_template="security-analyst",
83
+ base_template="security_auditor",
84
84
  tier_strategy=TierStrategy.CAPABLE_FIRST,
85
85
  tools=["grep", "bandit", "safety"],
86
86
  required_responses={"security_scan": "Yes"},
@@ -92,7 +92,7 @@ RELEASE_PREP_TEMPLATE = MetaWorkflowTemplate(
92
92
  ),
93
93
  AgentCompositionRule(
94
94
  role="Test Coverage Analyst",
95
- base_template="test-analyst",
95
+ base_template="test_coverage_analyzer",
96
96
  tier_strategy=TierStrategy.PROGRESSIVE,
97
97
  tools=["pytest", "coverage"],
98
98
  required_responses={"test_coverage_check": "Yes"},
@@ -104,7 +104,7 @@ RELEASE_PREP_TEMPLATE = MetaWorkflowTemplate(
104
104
  ),
105
105
  AgentCompositionRule(
106
106
  role="Code Quality Reviewer",
107
- base_template="code-reviewer",
107
+ base_template="code_reviewer",
108
108
  tier_strategy=TierStrategy.PROGRESSIVE,
109
109
  tools=["ruff", "mypy"],
110
110
  required_responses={"quality_review": "Yes"},
@@ -116,7 +116,7 @@ RELEASE_PREP_TEMPLATE = MetaWorkflowTemplate(
116
116
  ),
117
117
  AgentCompositionRule(
118
118
  role="Documentation Specialist",
119
- base_template="doc-reviewer",
119
+ base_template="documentation_writer",
120
120
  tier_strategy=TierStrategy.CHEAP_ONLY,
121
121
  tools=["pydocstyle"],
122
122
  required_responses={"doc_verification": "Yes"},
@@ -181,7 +181,7 @@ TEST_COVERAGE_BOOST_TEMPLATE = MetaWorkflowTemplate(
181
181
  agent_composition_rules=[
182
182
  AgentCompositionRule(
183
183
  role="Gap Analyzer",
184
- base_template="coverage-analyst",
184
+ base_template="test_coverage_analyzer",
185
185
  tier_strategy=TierStrategy.PROGRESSIVE,
186
186
  tools=["pytest-cov", "coverage"],
187
187
  required_responses={},
@@ -193,7 +193,7 @@ TEST_COVERAGE_BOOST_TEMPLATE = MetaWorkflowTemplate(
193
193
  ),
194
194
  AgentCompositionRule(
195
195
  role="Test Generator",
196
- base_template="test-generator",
196
+ base_template="test_generator",
197
197
  tier_strategy=TierStrategy.CAPABLE_FIRST,
198
198
  tools=["ast", "pytest"],
199
199
  required_responses={},
@@ -208,7 +208,7 @@ TEST_COVERAGE_BOOST_TEMPLATE = MetaWorkflowTemplate(
208
208
  ),
209
209
  AgentCompositionRule(
210
210
  role="Test Validator",
211
- base_template="test-validator",
211
+ base_template="test_validator",
212
212
  tier_strategy=TierStrategy.CHEAP_ONLY,
213
213
  tools=["pytest"],
214
214
  required_responses={},
@@ -250,8 +250,8 @@ TEST_MAINTENANCE_TEMPLATE = MetaWorkflowTemplate(
250
250
  id="max_files",
251
251
  text="Maximum files to process",
252
252
  type=QuestionType.SINGLE_SELECT,
253
- options=["5", "10", "20", "50"],
254
- default="10",
253
+ options=["5", "10", "20", "30", "50"],
254
+ default="30",
255
255
  help_text="Limit number of files to process per run",
256
256
  ),
257
257
  FormQuestion(
@@ -274,7 +274,7 @@ TEST_MAINTENANCE_TEMPLATE = MetaWorkflowTemplate(
274
274
  agent_composition_rules=[
275
275
  AgentCompositionRule(
276
276
  role="Test Analyst",
277
- base_template="test-analyst",
277
+ base_template="test_coverage_analyzer",
278
278
  tier_strategy=TierStrategy.PROGRESSIVE,
279
279
  tools=["pytest-cov", "coverage"],
280
280
  required_responses={"mode": ["full", "analyze"]},
@@ -289,7 +289,7 @@ TEST_MAINTENANCE_TEMPLATE = MetaWorkflowTemplate(
289
289
  ),
290
290
  AgentCompositionRule(
291
291
  role="Test Generator",
292
- base_template="test-generator",
292
+ base_template="test_generator",
293
293
  tier_strategy=TierStrategy.CAPABLE_FIRST,
294
294
  tools=["ast", "pytest"],
295
295
  required_responses={"mode": ["full", "generate"]},
@@ -300,7 +300,7 @@ TEST_MAINTENANCE_TEMPLATE = MetaWorkflowTemplate(
300
300
  ),
301
301
  AgentCompositionRule(
302
302
  role="Test Validator",
303
- base_template="test-validator",
303
+ base_template="test_validator",
304
304
  tier_strategy=TierStrategy.CHEAP_ONLY,
305
305
  tools=["pytest"],
306
306
  required_responses={"mode": ["full", "validate"], "auto_validation": "Yes"},
@@ -311,7 +311,7 @@ TEST_MAINTENANCE_TEMPLATE = MetaWorkflowTemplate(
311
311
  ),
312
312
  AgentCompositionRule(
313
313
  role="Test Reporter",
314
- base_template="reporter",
314
+ base_template="report_generator",
315
315
  tier_strategy=TierStrategy.CHEAP_ONLY,
316
316
  tools=[],
317
317
  required_responses={},
@@ -373,7 +373,7 @@ MANAGE_DOCS_TEMPLATE = MetaWorkflowTemplate(
373
373
  agent_composition_rules=[
374
374
  AgentCompositionRule(
375
375
  role="Documentation Analyst",
376
- base_template="doc-analyst",
376
+ base_template="documentation_analyst",
377
377
  tier_strategy=TierStrategy.PROGRESSIVE,
378
378
  tools=["ast", "pydocstyle"],
379
379
  required_responses={},
@@ -388,7 +388,7 @@ MANAGE_DOCS_TEMPLATE = MetaWorkflowTemplate(
388
388
  ),
389
389
  AgentCompositionRule(
390
390
  role="Documentation Reviewer",
391
- base_template="doc-reviewer",
391
+ base_template="documentation_writer",
392
392
  tier_strategy=TierStrategy.PROGRESSIVE,
393
393
  tools=[],
394
394
  required_responses={},
@@ -463,7 +463,7 @@ FEATURE_OVERVIEW_TEMPLATE = MetaWorkflowTemplate(
463
463
  agent_composition_rules=[
464
464
  AgentCompositionRule(
465
465
  role="Code Scanner",
466
- base_template="generic",
466
+ base_template="generic_agent",
467
467
  tier_strategy=TierStrategy.CAPABLE_FIRST,
468
468
  tools=["read", "grep", "glob"],
469
469
  required_responses={},
@@ -475,7 +475,7 @@ FEATURE_OVERVIEW_TEMPLATE = MetaWorkflowTemplate(
475
475
  ),
476
476
  AgentCompositionRule(
477
477
  role="Insights Reporter",
478
- base_template="generic",
478
+ base_template="generic_agent",
479
479
  tier_strategy=TierStrategy.CAPABLE_FIRST,
480
480
  tools=["read"],
481
481
  required_responses={},
@@ -487,7 +487,7 @@ FEATURE_OVERVIEW_TEMPLATE = MetaWorkflowTemplate(
487
487
  ),
488
488
  AgentCompositionRule(
489
489
  role="Architecture Analyst",
490
- base_template="generic",
490
+ base_template="architecture_analyst",
491
491
  tier_strategy=TierStrategy.CAPABLE_FIRST,
492
492
  tools=["read"],
493
493
  required_responses={"include_diagrams": "Yes"},
@@ -499,7 +499,7 @@ FEATURE_OVERVIEW_TEMPLATE = MetaWorkflowTemplate(
499
499
  ),
500
500
  AgentCompositionRule(
501
501
  role="Quality Reviewer",
502
- base_template="generic",
502
+ base_template="code_reviewer",
503
503
  tier_strategy=TierStrategy.PREMIUM_ONLY,
504
504
  tools=["read"],
505
505
  required_responses={},
@@ -511,7 +511,7 @@ FEATURE_OVERVIEW_TEMPLATE = MetaWorkflowTemplate(
511
511
  ),
512
512
  AgentCompositionRule(
513
513
  role="Blog Content Creator",
514
- base_template="generic",
514
+ base_template="generic_agent",
515
515
  tier_strategy=TierStrategy.PREMIUM_ONLY,
516
516
  tools=["write"],
517
517
  required_responses={"include_blog_summary": "Yes"},
@@ -246,6 +246,12 @@ def run_workflow(
246
246
  "-u",
247
247
  help="User ID for memory integration",
248
248
  ),
249
+ json_output: bool = typer.Option(
250
+ False,
251
+ "--json",
252
+ "-j",
253
+ help="Output result as JSON (for programmatic use)",
254
+ ),
249
255
  ):
250
256
  """Execute a meta-workflow from template.
251
257
 
@@ -261,32 +267,43 @@ def run_workflow(
261
267
  empathy meta-workflow run release-prep
262
268
  empathy meta-workflow run test-coverage-boost --real
263
269
  empathy meta-workflow run manage-docs --use-defaults
270
+ empathy meta-workflow run release-prep --json --use-defaults
264
271
  """
272
+ import json
273
+
265
274
  try:
266
275
  # Load template
267
- console.print(f"\n[bold]Loading template:[/bold] {template_id}")
276
+ if not json_output:
277
+ console.print(f"\n[bold]Loading template:[/bold] {template_id}")
268
278
  registry = TemplateRegistry()
269
279
  template = registry.load_template(template_id)
270
280
 
271
281
  if not template:
272
- console.print(f"[red]Template not found:[/red] {template_id}")
282
+ if json_output:
283
+ print(json.dumps({"success": False, "error": f"Template not found: {template_id}"}))
284
+ else:
285
+ console.print(f"[red]Template not found:[/red] {template_id}")
273
286
  raise typer.Exit(code=1)
274
287
 
275
- console.print(f"[green]✓[/green] {template.name}")
288
+ if not json_output:
289
+ console.print(f"[green]✓[/green] {template.name}")
276
290
 
277
291
  # Setup memory if requested
278
292
  pattern_learner = None
279
293
  if use_memory:
280
- console.print("\n[bold]Initializing memory integration...[/bold]")
294
+ if not json_output:
295
+ console.print("\n[bold]Initializing memory integration...[/bold]")
281
296
  from empathy_os.memory.unified import UnifiedMemory
282
297
 
283
298
  try:
284
299
  memory = UnifiedMemory(user_id=user_id)
285
300
  pattern_learner = PatternLearner(memory=memory)
286
- console.print("[green]✓[/green] Memory enabled")
301
+ if not json_output:
302
+ console.print("[green]✓[/green] Memory enabled")
287
303
  except Exception as e:
288
- console.print(f"[yellow]Warning:[/yellow] Memory initialization failed: {e}")
289
- console.print("[yellow]Continuing without memory integration[/yellow]")
304
+ if not json_output:
305
+ console.print(f"[yellow]Warning:[/yellow] Memory initialization failed: {e}")
306
+ console.print("[yellow]Continuing without memory integration[/yellow]")
290
307
 
291
308
  # Create workflow
292
309
  workflow = MetaWorkflow(
@@ -295,14 +312,49 @@ def run_workflow(
295
312
  )
296
313
 
297
314
  # Execute (will ask questions via AskUserQuestion unless --use-defaults)
298
- console.print("\n[bold]Executing workflow...[/bold]")
299
- console.print(f"Mode: {'Mock' if mock else 'Real'}")
300
- if use_defaults:
301
- console.print("[cyan]Using default values (non-interactive)[/cyan]")
315
+ if not json_output:
316
+ console.print("\n[bold]Executing workflow...[/bold]")
317
+ console.print(f"Mode: {'Mock' if mock else 'Real'}")
318
+ if use_defaults:
319
+ console.print("[cyan]Using default values (non-interactive)[/cyan]")
302
320
 
303
321
  result = workflow.execute(mock_execution=mock, use_defaults=use_defaults)
304
322
 
305
- # Display summary
323
+ # JSON output mode - print result as JSON and exit
324
+ if json_output:
325
+ output = {
326
+ "run_id": result.run_id,
327
+ "template_id": template_id,
328
+ "timestamp": result.timestamp,
329
+ "success": result.success,
330
+ "error": result.error,
331
+ "total_cost": result.total_cost,
332
+ "total_duration": result.total_duration,
333
+ "agents_created": len(result.agents_created),
334
+ "form_responses": {
335
+ "template_id": result.form_responses.template_id,
336
+ "responses": result.form_responses.responses,
337
+ "timestamp": result.form_responses.timestamp,
338
+ "response_id": result.form_responses.response_id,
339
+ },
340
+ "agent_results": [
341
+ {
342
+ "agent_id": ar.agent_id,
343
+ "role": ar.role,
344
+ "success": ar.success,
345
+ "cost": ar.cost,
346
+ "duration": ar.duration,
347
+ "tier_used": ar.tier_used,
348
+ "output": ar.output,
349
+ "error": ar.error,
350
+ }
351
+ for ar in result.agent_results
352
+ ],
353
+ }
354
+ print(json.dumps(output))
355
+ return
356
+
357
+ # Display summary (normal mode)
306
358
  console.print("\n[bold green]Execution Complete![/bold green]\n")
307
359
 
308
360
  summary_lines = [
@@ -340,9 +392,12 @@ def run_workflow(
340
392
  console.print()
341
393
 
342
394
  except Exception as e:
343
- console.print(f"\n[red]Error:[/red] {e}")
344
- import traceback
345
- traceback.print_exc()
395
+ if json_output:
396
+ print(json.dumps({"success": False, "error": str(e)}))
397
+ else:
398
+ console.print(f"\n[red]Error:[/red] {e}")
399
+ import traceback
400
+ traceback.print_exc()
346
401
  raise typer.Exit(code=1)
347
402
 
348
403
 
@@ -12,6 +12,28 @@ Updated: 2026-01-18 (v4.3.0 - Real LLM execution with Anthropic client)
12
12
  Purpose: Core orchestration for meta-workflows
13
13
  """
14
14
 
15
+ # Load environment variables from .env file
16
+ # Try multiple locations: project root, home directory, empathy config
17
+ try:
18
+ from pathlib import Path
19
+
20
+ from dotenv import load_dotenv
21
+
22
+ # Try common .env locations
23
+ _env_paths = [
24
+ Path.cwd() / ".env", # Current working directory
25
+ Path(__file__).parent.parent.parent.parent / ".env", # Project root
26
+ Path.home() / ".env", # Home directory
27
+ Path.home() / ".empathy" / ".env", # Empathy config directory
28
+ ]
29
+
30
+ for _env_path in _env_paths:
31
+ if _env_path.exists():
32
+ load_dotenv(_env_path)
33
+ break
34
+ except ImportError:
35
+ pass # dotenv not installed, use environment variables directly
36
+
15
37
  import json
16
38
  import logging
17
39
  import time
@@ -616,7 +638,14 @@ class MetaWorkflow:
616
638
  try:
617
639
  from anthropic import Anthropic
618
640
 
619
- client = Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
641
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
642
+ if not api_key:
643
+ raise ValueError(
644
+ "ANTHROPIC_API_KEY not found. Set it in environment or .env file "
645
+ "(checked: ./env, ~/.env, ~/.empathy/.env)"
646
+ )
647
+
648
+ client = Anthropic(api_key=api_key)
620
649
 
621
650
  # Execute the LLM call
622
651
  response = client.messages.create(