empathy-framework 3.5.6__py3-none-any.whl → 3.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. agents/compliance_anticipation_agent.py +113 -118
  2. agents/compliance_db.py +339 -0
  3. agents/epic_integration_wizard.py +37 -48
  4. agents/notifications.py +291 -0
  5. agents/trust_building_behaviors.py +66 -85
  6. coach_wizards/__init__.py +11 -12
  7. coach_wizards/accessibility_wizard.py +12 -12
  8. coach_wizards/api_wizard.py +12 -12
  9. coach_wizards/base_wizard.py +26 -20
  10. coach_wizards/cicd_wizard.py +15 -13
  11. coach_wizards/compliance_wizard.py +12 -12
  12. coach_wizards/database_wizard.py +12 -12
  13. coach_wizards/debugging_wizard.py +12 -12
  14. coach_wizards/documentation_wizard.py +12 -12
  15. coach_wizards/generate_wizards.py +1 -2
  16. coach_wizards/localization_wizard.py +21 -14
  17. coach_wizards/migration_wizard.py +12 -12
  18. coach_wizards/monitoring_wizard.py +12 -12
  19. coach_wizards/observability_wizard.py +12 -12
  20. coach_wizards/performance_wizard.py +12 -12
  21. coach_wizards/prompt_engineering_wizard.py +22 -25
  22. coach_wizards/refactoring_wizard.py +12 -12
  23. coach_wizards/scaling_wizard.py +12 -12
  24. coach_wizards/security_wizard.py +12 -12
  25. coach_wizards/testing_wizard.py +12 -12
  26. {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/METADATA +234 -30
  27. empathy_framework-3.7.0.dist-info/RECORD +105 -0
  28. empathy_healthcare_plugin/__init__.py +1 -2
  29. empathy_llm_toolkit/__init__.py +5 -6
  30. empathy_llm_toolkit/claude_memory.py +14 -15
  31. empathy_llm_toolkit/code_health.py +27 -19
  32. empathy_llm_toolkit/contextual_patterns.py +11 -12
  33. empathy_llm_toolkit/core.py +43 -49
  34. empathy_llm_toolkit/git_pattern_extractor.py +16 -12
  35. empathy_llm_toolkit/levels.py +6 -13
  36. empathy_llm_toolkit/pattern_confidence.py +14 -18
  37. empathy_llm_toolkit/pattern_resolver.py +10 -12
  38. empathy_llm_toolkit/pattern_summary.py +13 -11
  39. empathy_llm_toolkit/providers.py +27 -38
  40. empathy_llm_toolkit/session_status.py +18 -20
  41. empathy_llm_toolkit/state.py +20 -21
  42. empathy_os/__init__.py +72 -73
  43. empathy_os/cli.py +193 -98
  44. empathy_os/cli_unified.py +68 -41
  45. empathy_os/config.py +31 -31
  46. empathy_os/coordination.py +48 -54
  47. empathy_os/core.py +90 -99
  48. empathy_os/cost_tracker.py +20 -23
  49. empathy_os/discovery.py +9 -11
  50. empathy_os/emergence.py +20 -21
  51. empathy_os/exceptions.py +18 -30
  52. empathy_os/feedback_loops.py +27 -30
  53. empathy_os/levels.py +31 -34
  54. empathy_os/leverage_points.py +27 -28
  55. empathy_os/logging_config.py +11 -12
  56. empathy_os/monitoring.py +27 -27
  57. empathy_os/pattern_library.py +29 -28
  58. empathy_os/persistence.py +30 -34
  59. empathy_os/platform_utils.py +46 -47
  60. empathy_os/redis_config.py +14 -15
  61. empathy_os/redis_memory.py +53 -56
  62. empathy_os/templates.py +12 -11
  63. empathy_os/trust_building.py +44 -36
  64. empathy_os/workflow_commands.py +123 -31
  65. empathy_software_plugin/__init__.py +1 -2
  66. empathy_software_plugin/cli.py +32 -25
  67. empathy_software_plugin/plugin.py +4 -8
  68. empathy_framework-3.5.6.dist-info/RECORD +0 -103
  69. {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/WHEEL +0 -0
  70. {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/entry_points.txt +0 -0
  71. {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/licenses/LICENSE +0 -0
  72. {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,4 @@
1
- """
2
- Code Health Assistant Module
1
+ """Code Health Assistant Module
3
2
 
4
3
  A comprehensive system for running health checks, tracking trends,
5
4
  and auto-fixing common issues in codebases.
@@ -224,12 +223,12 @@ class HealthCheckRunner:
224
223
  project_root: str = ".",
225
224
  config: dict | None = None,
226
225
  ):
227
- """
228
- Initialize the health check runner.
226
+ """Initialize the health check runner.
229
227
 
230
228
  Args:
231
229
  project_root: Root directory of the project
232
230
  config: Configuration dictionary (uses defaults if not provided)
231
+
233
232
  """
234
233
  self.project_root = Path(project_root).resolve()
235
234
  self.config = {**DEFAULT_CONFIG, **(config or {})}
@@ -345,6 +344,7 @@ class HealthCheckRunner:
345
344
  if tool == "ruff":
346
345
  result = subprocess.run(
347
346
  ["ruff", "check", "--output-format=json", str(self.project_root)],
347
+ check=False,
348
348
  capture_output=True,
349
349
  text=True,
350
350
  cwd=str(self.project_root),
@@ -364,12 +364,13 @@ class HealthCheckRunner:
364
364
  ),
365
365
  fixable=item.get("fix") is not None,
366
366
  fix_command="ruff check --fix" if item.get("fix") else None,
367
- )
367
+ ),
368
368
  )
369
369
  else:
370
370
  # Fallback to flake8
371
371
  result = subprocess.run(
372
372
  ["flake8", "--format=json", str(self.project_root)],
373
+ check=False,
373
374
  capture_output=True,
374
375
  text=True,
375
376
  cwd=str(self.project_root),
@@ -416,6 +417,7 @@ class HealthCheckRunner:
416
417
  if tool == "black":
417
418
  result = subprocess.run(
418
419
  ["black", "--check", "--diff", str(self.project_root)],
420
+ check=False,
419
421
  capture_output=True,
420
422
  text=True,
421
423
  cwd=str(self.project_root),
@@ -439,7 +441,7 @@ class HealthCheckRunner:
439
441
  severity="warning",
440
442
  fixable=True,
441
443
  fix_command=f"black {current_file}",
442
- )
444
+ ),
443
445
  )
444
446
 
445
447
  # Also check stderr for files that would be reformatted
@@ -458,7 +460,7 @@ class HealthCheckRunner:
458
460
  severity="warning",
459
461
  fixable=True,
460
462
  fix_command=f"black {file_path}",
461
- )
463
+ ),
462
464
  )
463
465
 
464
466
  score = max(0, 100 - len(issues) * 10) # -10 per file
@@ -500,6 +502,7 @@ class HealthCheckRunner:
500
502
  if tool == "pyright":
501
503
  result = subprocess.run(
502
504
  ["pyright", "--outputjson"],
505
+ check=False,
503
506
  capture_output=True,
504
507
  text=True,
505
508
  cwd=str(self.project_root),
@@ -519,7 +522,7 @@ class HealthCheckRunner:
519
522
  message=diag.get("message", ""),
520
523
  severity="error" if diag.get("severity") == 1 else "warning",
521
524
  fixable=False,
522
- )
525
+ ),
523
526
  )
524
527
  except json.JSONDecodeError:
525
528
  pass
@@ -527,6 +530,7 @@ class HealthCheckRunner:
527
530
  elif tool == "mypy":
528
531
  result = subprocess.run(
529
532
  ["mypy", "--show-error-codes", "--no-error-summary", str(self.project_root)],
533
+ check=False,
530
534
  capture_output=True,
531
535
  text=True,
532
536
  cwd=str(self.project_root),
@@ -546,7 +550,7 @@ class HealthCheckRunner:
546
550
  message=parts[3].strip() if len(parts) > 3 else "",
547
551
  severity="error",
548
552
  fixable=False,
549
- )
553
+ ),
550
554
  )
551
555
 
552
556
  score = max(0, 100 - len(issues) * 10) # -10 per type error
@@ -590,6 +594,7 @@ class HealthCheckRunner:
590
594
  try:
591
595
  result = subprocess.run(
592
596
  ["pytest", "--tb=no", "-q", "--co", "-q"], # Collect only, quiet
597
+ check=False,
593
598
  capture_output=True,
594
599
  text=True,
595
600
  cwd=str(self.project_root),
@@ -604,6 +609,7 @@ class HealthCheckRunner:
604
609
  # Run actual tests
605
610
  result = subprocess.run(
606
611
  ["pytest", "--tb=short", "-q"],
612
+ check=False,
607
613
  capture_output=True,
608
614
  text=True,
609
615
  cwd=str(self.project_root),
@@ -654,7 +660,7 @@ class HealthCheckRunner:
654
660
  message=f"Test failed: {test_path}",
655
661
  severity="error",
656
662
  fixable=False,
657
- )
663
+ ),
658
664
  )
659
665
 
660
666
  total = passed + failed
@@ -708,6 +714,7 @@ class HealthCheckRunner:
708
714
  try:
709
715
  result = subprocess.run(
710
716
  ["bandit", "-r", "-f", "json", str(self.project_root)],
717
+ check=False,
711
718
  capture_output=True,
712
719
  text=True,
713
720
  cwd=str(self.project_root),
@@ -727,7 +734,7 @@ class HealthCheckRunner:
727
734
  message=item.get("issue_text", ""),
728
735
  severity="error" if severity in ["HIGH", "MEDIUM"] else "warning",
729
736
  fixable=False,
730
- )
737
+ ),
731
738
  )
732
739
  except json.JSONDecodeError:
733
740
  pass
@@ -780,6 +787,7 @@ class HealthCheckRunner:
780
787
  try:
781
788
  result = subprocess.run(
782
789
  ["pip-audit", "--format=json"],
790
+ check=False,
783
791
  capture_output=True,
784
792
  text=True,
785
793
  cwd=str(self.project_root),
@@ -799,7 +807,7 @@ class HealthCheckRunner:
799
807
  severity="error" if item.get("fix_versions") else "warning",
800
808
  fixable=bool(item.get("fix_versions")),
801
809
  fix_command=self._get_fix_cmd(item),
802
- )
810
+ ),
803
811
  )
804
812
  except json.JSONDecodeError:
805
813
  pass
@@ -855,7 +863,7 @@ class AutoFixer:
855
863
  "issue": issue.message,
856
864
  "fix_command": issue.fix_command,
857
865
  "safe": self._is_safe_fix(issue),
858
- }
866
+ },
859
867
  )
860
868
  return fixes
861
869
 
@@ -958,7 +966,7 @@ class HealthTrendTracker:
958
966
  "status": report.status.value,
959
967
  "total_issues": report.total_issues,
960
968
  "results": {r.category.value: r.score for r in report.results},
961
- }
969
+ },
962
970
  )
963
971
 
964
972
  filepath.write_text(json.dumps(history, indent=2))
@@ -989,7 +997,7 @@ class HealthTrendTracker:
989
997
  {
990
998
  "date": date,
991
999
  "score": entry.get("overall_score", 0),
992
- }
1000
+ },
993
1001
  )
994
1002
  except json.JSONDecodeError:
995
1003
  pass
@@ -1043,13 +1051,13 @@ def format_health_output(
1043
1051
  level: int = 1,
1044
1052
  thresholds: dict | None = None,
1045
1053
  ) -> str:
1046
- """
1047
- Format health report for display.
1054
+ """Format health report for display.
1048
1055
 
1049
1056
  Args:
1050
1057
  report: The health report to format
1051
1058
  level: Detail level (1=summary, 2=details, 3=full)
1052
1059
  thresholds: Score thresholds for status icons
1060
+
1053
1061
  """
1054
1062
  thresholds = thresholds or DEFAULT_THRESHOLDS
1055
1063
  lines = []
@@ -1086,7 +1094,7 @@ def format_health_output(
1086
1094
  if result.category == CheckCategory.TESTS:
1087
1095
  details = result.details
1088
1096
  lines.append(
1089
- f"{icon} {category_name}: {details.get('passed', 0)}P/{details.get('failed', 0)}F"
1097
+ f"{icon} {category_name}: {details.get('passed', 0)}P/{details.get('failed', 0)}F",
1090
1098
  )
1091
1099
  elif result.category == CheckCategory.LINT:
1092
1100
  lines.append(f"{icon} {category_name}: {result.issue_count} warnings")
@@ -1152,7 +1160,7 @@ def format_health_output(
1152
1160
  lines.append("")
1153
1161
  lines.append("━" * 40)
1154
1162
  lines.append(
1155
- f"[1] Fix {report.total_fixable} auto-fixable issues [2] See details [3] Full report"
1163
+ f"[1] Fix {report.total_fixable} auto-fixable issues [2] See details [3] Full report",
1156
1164
  )
1157
1165
 
1158
1166
  return "\n".join(lines)
@@ -1,5 +1,4 @@
1
- """
2
- Contextual Pattern Injection
1
+ """Contextual Pattern Injection
3
2
 
4
3
  Filters and injects only relevant patterns based on current context.
5
4
  Instead of loading all patterns, this module selects patterns that
@@ -33,8 +32,7 @@ logger = logging.getLogger(__name__)
33
32
 
34
33
 
35
34
  class ContextualPatternInjector:
36
- """
37
- Injects only relevant patterns based on context.
35
+ """Injects only relevant patterns based on context.
38
36
 
39
37
  Reduces cognitive load by filtering patterns to those
40
38
  most likely to help with the current task.
@@ -54,8 +52,7 @@ class ContextualPatternInjector:
54
52
  max_patterns: int = 5,
55
53
  include_security: bool = True,
56
54
  ) -> str:
57
- """
58
- Get relevant patterns formatted as markdown.
55
+ """Get relevant patterns formatted as markdown.
59
56
 
60
57
  Args:
61
58
  file_path: Current file being worked on
@@ -66,6 +63,7 @@ class ContextualPatternInjector:
66
63
 
67
64
  Returns:
68
65
  Markdown string with relevant patterns
66
+
69
67
  """
70
68
  all_bugs = self._load_all_bugs()
71
69
  all_security = self._load_all_security() if include_security else []
@@ -84,8 +82,7 @@ class ContextualPatternInjector:
84
82
  files: list[str],
85
83
  max_per_file: int = 3,
86
84
  ) -> dict[str, list[dict]]:
87
- """
88
- Get relevant patterns for code review of multiple files.
85
+ """Get relevant patterns for code review of multiple files.
89
86
 
90
87
  Args:
91
88
  files: List of file paths being reviewed
@@ -93,6 +90,7 @@ class ContextualPatternInjector:
93
90
 
94
91
  Returns:
95
92
  Dict mapping file paths to relevant patterns
93
+
96
94
  """
97
95
  all_bugs = self._load_all_bugs()
98
96
  result = {}
@@ -110,11 +108,11 @@ class ContextualPatternInjector:
110
108
  return result
111
109
 
112
110
  def get_patterns_from_git_changes(self, max_patterns: int = 5) -> str:
113
- """
114
- Get relevant patterns based on recently changed files.
111
+ """Get relevant patterns based on recently changed files.
115
112
 
116
113
  Returns:
117
114
  Markdown with patterns relevant to git changes
115
+
118
116
  """
119
117
  changed_files = self._get_git_changed_files()
120
118
  if not changed_files:
@@ -267,6 +265,7 @@ class ContextualPatternInjector:
267
265
  try:
268
266
  result = subprocess.run(
269
267
  ["git", "diff", "--name-only", "HEAD~5", "HEAD"],
268
+ check=False,
270
269
  capture_output=True,
271
270
  text=True,
272
271
  timeout=5,
@@ -312,7 +311,7 @@ class ContextualPatternInjector:
312
311
  lines.append("")
313
312
  for decision in security:
314
313
  lines.append(
315
- f"- **{decision.get('finding_hash', '?')}**: {decision.get('decision', '?')}"
314
+ f"- **{decision.get('finding_hash', '?')}**: {decision.get('decision', '?')}",
316
315
  )
317
316
  lines.append(f" - Reason: {decision.get('reason', 'N/A')}")
318
317
  lines.append("")
@@ -351,7 +350,7 @@ def main():
351
350
  error_type=args.error_type,
352
351
  error_message=args.error_message,
353
352
  max_patterns=args.max,
354
- )
353
+ ),
355
354
  )
356
355
 
357
356
 
@@ -1,5 +1,4 @@
1
- """
2
- Empathy LLM - Core Wrapper
1
+ """Empathy LLM - Core Wrapper
3
2
 
4
3
  Main class that wraps any LLM provider with Empathy Framework levels.
5
4
 
@@ -37,8 +36,7 @@ logger = logging.getLogger(__name__)
37
36
 
38
37
 
39
38
  class EmpathyLLM:
40
- """
41
- Wraps any LLM provider with Empathy Framework levels.
39
+ """Wraps any LLM provider with Empathy Framework levels.
42
40
 
43
41
  Automatically progresses from Level 1 (reactive) to Level 4 (anticipatory)
44
42
  based on user collaboration state.
@@ -92,6 +90,7 @@ class EmpathyLLM:
92
90
  ... user_input="Design the architecture",
93
91
  ... task_type="architectural_decision"
94
92
  ... )
93
+
95
94
  """
96
95
 
97
96
  def __init__(
@@ -108,8 +107,7 @@ class EmpathyLLM:
108
107
  enable_model_routing: bool = False,
109
108
  **kwargs,
110
109
  ):
111
- """
112
- Initialize EmpathyLLM.
110
+ """Initialize EmpathyLLM.
113
111
 
114
112
  Args:
115
113
  provider: "anthropic", "openai", or "local"
@@ -133,6 +131,7 @@ class EmpathyLLM:
133
131
  - CAPABLE (Sonnet): code generation, bug fixes, security review
134
132
  - PREMIUM (Opus): coordination, synthesis, architectural decisions
135
133
  **kwargs: Provider-specific options
134
+
136
135
  """
137
136
  self.target_level = target_level
138
137
  self.pattern_library = pattern_library or {}
@@ -164,7 +163,7 @@ class EmpathyLLM:
164
163
  self._cached_memory = self.claude_memory_loader.load_all_memory(project_root)
165
164
  logger.info(
166
165
  f"EmpathyLLM initialized with Claude memory: "
167
- f"{len(self._cached_memory)} chars loaded"
166
+ f"{len(self._cached_memory)} chars loaded",
168
167
  )
169
168
 
170
169
  # Initialize Phase 3 security controls (v1.8.0+)
@@ -180,7 +179,7 @@ class EmpathyLLM:
180
179
  logger.info(
181
180
  f"EmpathyLLM initialized: provider={provider}, target_level={target_level}, "
182
181
  f"security={'enabled' if enable_security else 'disabled'}, "
183
- f"model_routing={'enabled' if enable_model_routing else 'disabled'}"
182
+ f"model_routing={'enabled' if enable_model_routing else 'disabled'}",
184
183
  )
185
184
 
186
185
  def _initialize_security(self):
@@ -214,26 +213,30 @@ class EmpathyLLM:
214
213
  logger.info(f"Audit Logger initialized: {audit_log_dir}")
215
214
 
216
215
  def _create_provider(
217
- self, provider: str, api_key: str | None, model: str | None, **kwargs
216
+ self,
217
+ provider: str,
218
+ api_key: str | None,
219
+ model: str | None,
220
+ **kwargs,
218
221
  ) -> BaseLLMProvider:
219
222
  """Create appropriate provider instance"""
220
-
221
223
  if provider == "anthropic":
222
224
  return AnthropicProvider(
223
- api_key=api_key, model=model or "claude-sonnet-4-5-20250929", **kwargs
225
+ api_key=api_key,
226
+ model=model or "claude-sonnet-4-5-20250929",
227
+ **kwargs,
224
228
  )
225
- elif provider == "openai":
229
+ if provider == "openai":
226
230
  return OpenAIProvider(api_key=api_key, model=model or "gpt-4-turbo-preview", **kwargs)
227
- elif provider in ("google", "gemini"):
231
+ if provider in ("google", "gemini"):
228
232
  return GeminiProvider(api_key=api_key, model=model or "gemini-1.5-pro", **kwargs)
229
- elif provider == "local":
233
+ if provider == "local":
230
234
  return LocalProvider(
231
235
  endpoint=kwargs.get("endpoint", "http://localhost:11434"),
232
236
  model=model or "llama2",
233
237
  **kwargs,
234
238
  )
235
- else:
236
- raise ValueError(f"Unknown provider: {provider}")
239
+ raise ValueError(f"Unknown provider: {provider}")
237
240
 
238
241
  def _get_or_create_state(self, user_id: str) -> CollaborationState:
239
242
  """Get or create collaboration state for user"""
@@ -242,8 +245,7 @@ class EmpathyLLM:
242
245
  return self.states[user_id]
243
246
 
244
247
  def _determine_level(self, state: CollaborationState) -> int:
245
- """
246
- Determine which empathy level to use.
248
+ """Determine which empathy level to use.
247
249
 
248
250
  Progresses automatically based on state, up to target_level.
249
251
  """
@@ -260,8 +262,7 @@ class EmpathyLLM:
260
262
  return level
261
263
 
262
264
  def _build_system_prompt(self, level: int) -> str:
263
- """
264
- Build system prompt including Claude memory (if enabled).
265
+ """Build system prompt including Claude memory (if enabled).
265
266
 
266
267
  Claude memory is prepended to the level-specific prompt,
267
268
  so instructions from CLAUDE.md files affect all interactions.
@@ -271,6 +272,7 @@ class EmpathyLLM:
271
272
 
272
273
  Returns:
273
274
  Complete system prompt
275
+
274
276
  """
275
277
  level_prompt = EmpathyLevel.get_system_prompt(level)
276
278
 
@@ -284,12 +286,10 @@ class EmpathyLLM:
284
286
 
285
287
  Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
286
288
  """
287
- else:
288
- return level_prompt
289
+ return level_prompt
289
290
 
290
291
  def reload_memory(self):
291
- """
292
- Reload Claude memory files.
292
+ """Reload Claude memory files.
293
293
 
294
294
  Useful if CLAUDE.md files have been updated during runtime.
295
295
  Call this to pick up changes without restarting.
@@ -310,8 +310,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
310
310
  force_level: int | None = None,
311
311
  task_type: str | None = None,
312
312
  ) -> dict[str, Any]:
313
- """
314
- Main interaction method.
313
+ """Main interaction method.
315
314
 
316
315
  Automatically selects appropriate empathy level and responds.
317
316
 
@@ -345,6 +344,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
345
344
 
346
345
  Raises:
347
346
  SecurityError: If secrets detected and block_on_secrets=True
347
+
348
348
  """
349
349
  start_time = time.time()
350
350
  state = self._get_or_create_state(user_id)
@@ -367,7 +367,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
367
367
  "routed_tier": tier.value,
368
368
  }
369
369
  logger.info(
370
- f"Model routing: task={effective_task} -> model={routed_model} (tier={tier.value})"
370
+ f"Model routing: task={effective_task} -> model={routed_model} (tier={tier.value})",
371
371
  )
372
372
 
373
373
  # Initialize security tracking
@@ -383,7 +383,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
383
383
  security_metadata["pii_scrubbed"] = len(pii_detections) > 0
384
384
  if pii_detections:
385
385
  logger.info(
386
- f"PII detected for user {user_id}: {len(pii_detections)} items scrubbed"
386
+ f"PII detected for user {user_id}: {len(pii_detections)} items scrubbed",
387
387
  )
388
388
 
389
389
  # Phase 3: Security Pipeline (Step 2 - Secrets Detection)
@@ -395,7 +395,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
395
395
  block_on_secrets = self.security_config.get("block_on_secrets", True)
396
396
  logger.warning(
397
397
  f"Secrets detected for user {user_id}: {len(secrets_detections)} secrets, "
398
- f"blocking={block_on_secrets}"
398
+ f"blocking={block_on_secrets}",
399
399
  )
400
400
 
401
401
  # Log security violation
@@ -415,7 +415,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
415
415
  if block_on_secrets:
416
416
  raise SecurityError(
417
417
  f"Request blocked: {len(secrets_detections)} secret(s) detected in input. "
418
- f"Please remove sensitive credentials before submitting."
418
+ f"Please remove sensitive credentials before submitting.",
419
419
  )
420
420
 
421
421
  # Determine level to use
@@ -495,8 +495,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
495
495
  context: dict[str, Any],
496
496
  model_override: str | None = None,
497
497
  ) -> dict[str, Any]:
498
- """
499
- Level 1: Reactive - Simple Q&A
498
+ """Level 1: Reactive - Simple Q&A
500
499
 
501
500
  No memory, no patterns, just respond to question.
502
501
  """
@@ -524,8 +523,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
524
523
  context: dict[str, Any],
525
524
  model_override: str | None = None,
526
525
  ) -> dict[str, Any]:
527
- """
528
- Level 2: Guided - Ask clarifying questions
526
+ """Level 2: Guided - Ask clarifying questions
529
527
 
530
528
  Uses conversation history for context.
531
529
  """
@@ -561,8 +559,7 @@ Follow the CLAUDE.md instructions above, then apply the Empathy Framework below.
561
559
  context: dict[str, Any],
562
560
  model_override: str | None = None,
563
561
  ) -> dict[str, Any]:
564
- """
565
- Level 3: Proactive - Act on detected patterns
562
+ """Level 3: Proactive - Act on detected patterns
566
563
 
567
564
  Checks for matching patterns and acts proactively.
568
565
  """
@@ -629,8 +626,7 @@ Was this helpful? If not, I can adjust my pattern detection.
629
626
  context: dict[str, Any],
630
627
  model_override: str | None = None,
631
628
  ) -> dict[str, Any]:
632
- """
633
- Level 4: Anticipatory - Predict future needs
629
+ """Level 4: Anticipatory - Predict future needs
634
630
 
635
631
  Analyzes trajectory and alerts to future bottlenecks.
636
632
  """
@@ -690,8 +686,7 @@ Use anticipatory format:
690
686
  context: dict[str, Any],
691
687
  model_override: str | None = None,
692
688
  ) -> dict[str, Any]:
693
- """
694
- Level 5: Systems - Cross-domain pattern learning
689
+ """Level 5: Systems - Cross-domain pattern learning
695
690
 
696
691
  Leverages shared pattern library across domains.
697
692
  """
@@ -742,8 +737,7 @@ TASK:
742
737
  state: CollaborationState,
743
738
  current_input: str,
744
739
  ) -> None:
745
- """
746
- Detect user behavior patterns in background.
740
+ """Detect user behavior patterns in background.
747
741
 
748
742
  Analyzes conversation history to identify:
749
743
  - Sequential patterns: User always does X then Y
@@ -842,7 +836,7 @@ TASK:
842
836
  state.add_pattern(pattern)
843
837
 
844
838
  logger.debug(
845
- f"Pattern detection complete. Detected {len(state.detected_patterns)} patterns."
839
+ f"Pattern detection complete. Detected {len(state.detected_patterns)} patterns.",
846
840
  )
847
841
 
848
842
  except Exception as e:
@@ -850,13 +844,13 @@ TASK:
850
844
  logger.warning(f"Pattern detection error (non-critical): {e}")
851
845
 
852
846
  def update_trust(self, user_id: str, outcome: str, magnitude: float = 1.0):
853
- """
854
- Update trust level based on interaction outcome.
847
+ """Update trust level based on interaction outcome.
855
848
 
856
849
  Args:
857
850
  user_id: User identifier
858
851
  outcome: "success" or "failure"
859
852
  magnitude: How much to adjust (0.0 to 1.0)
853
+
860
854
  """
861
855
  state = self._get_or_create_state(user_id)
862
856
  state.update_trust(outcome, magnitude)
@@ -864,12 +858,12 @@ TASK:
864
858
  logger.info(f"Trust updated for {user_id}: {outcome} -> {state.trust_level:.2f}")
865
859
 
866
860
  def add_pattern(self, user_id: str, pattern: UserPattern):
867
- """
868
- Manually add a detected pattern.
861
+ """Manually add a detected pattern.
869
862
 
870
863
  Args:
871
864
  user_id: User identifier
872
865
  pattern: UserPattern instance
866
+
873
867
  """
874
868
  state = self._get_or_create_state(user_id)
875
869
  state.add_pattern(pattern)
@@ -877,14 +871,14 @@ TASK:
877
871
  logger.info(f"Pattern added for {user_id}: {pattern.pattern_type.value}")
878
872
 
879
873
  def get_statistics(self, user_id: str) -> dict[str, Any]:
880
- """
881
- Get collaboration statistics for user.
874
+ """Get collaboration statistics for user.
882
875
 
883
876
  Args:
884
877
  user_id: User identifier
885
878
 
886
879
  Returns:
887
880
  Dictionary with stats
881
+
888
882
  """
889
883
  state = self._get_or_create_state(user_id)
890
884
  return state.get_statistics()