empathy-framework 4.7.1__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/METADATA +65 -2
  2. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +73 -52
  3. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -1
  6. empathy_os/__init__.py +2 -0
  7. empathy_os/cache/hash_only.py +6 -3
  8. empathy_os/cache/hybrid.py +6 -3
  9. empathy_os/cli/__init__.py +128 -238
  10. empathy_os/cli/__main__.py +5 -33
  11. empathy_os/cli/commands/__init__.py +1 -8
  12. empathy_os/cli/commands/help.py +331 -0
  13. empathy_os/cli/commands/info.py +140 -0
  14. empathy_os/cli/commands/inspect.py +437 -0
  15. empathy_os/cli/commands/metrics.py +92 -0
  16. empathy_os/cli/commands/orchestrate.py +184 -0
  17. empathy_os/cli/commands/patterns.py +207 -0
  18. empathy_os/cli/commands/provider.py +93 -81
  19. empathy_os/cli/commands/setup.py +96 -0
  20. empathy_os/cli/commands/status.py +235 -0
  21. empathy_os/cli/commands/sync.py +166 -0
  22. empathy_os/cli/commands/tier.py +121 -0
  23. empathy_os/cli/commands/workflow.py +574 -0
  24. empathy_os/cli/parsers/__init__.py +62 -0
  25. empathy_os/cli/parsers/help.py +41 -0
  26. empathy_os/cli/parsers/info.py +26 -0
  27. empathy_os/cli/parsers/inspect.py +66 -0
  28. empathy_os/cli/parsers/metrics.py +42 -0
  29. empathy_os/cli/parsers/orchestrate.py +61 -0
  30. empathy_os/cli/parsers/patterns.py +54 -0
  31. empathy_os/cli/parsers/provider.py +40 -0
  32. empathy_os/cli/parsers/setup.py +42 -0
  33. empathy_os/cli/parsers/status.py +47 -0
  34. empathy_os/cli/parsers/sync.py +31 -0
  35. empathy_os/cli/parsers/tier.py +33 -0
  36. empathy_os/cli/parsers/workflow.py +77 -0
  37. empathy_os/cli/utils/__init__.py +1 -0
  38. empathy_os/cli/utils/data.py +242 -0
  39. empathy_os/cli/utils/helpers.py +68 -0
  40. empathy_os/{cli.py → cli_legacy.py} +27 -27
  41. empathy_os/cli_minimal.py +662 -0
  42. empathy_os/cli_router.py +384 -0
  43. empathy_os/cli_unified.py +38 -2
  44. empathy_os/memory/__init__.py +19 -5
  45. empathy_os/memory/short_term.py +14 -404
  46. empathy_os/memory/types.py +437 -0
  47. empathy_os/memory/unified.py +61 -48
  48. empathy_os/models/fallback.py +1 -1
  49. empathy_os/models/provider_config.py +59 -344
  50. empathy_os/models/registry.py +31 -180
  51. empathy_os/monitoring/alerts.py +14 -20
  52. empathy_os/monitoring/alerts_cli.py +24 -7
  53. empathy_os/project_index/__init__.py +2 -0
  54. empathy_os/project_index/index.py +210 -5
  55. empathy_os/project_index/scanner.py +45 -14
  56. empathy_os/project_index/scanner_parallel.py +291 -0
  57. empathy_os/socratic/ab_testing.py +1 -1
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/routing.py +168 -0
  69. empathy_os/workflows/secure_release.py +1 -0
  70. empathy_os/workflows/security_audit.py +190 -0
  71. empathy_os/workflows/security_audit_phase3.py +328 -0
  72. empathy_os/workflows/telemetry_mixin.py +269 -0
  73. empathy_os/dashboard/__init__.py +0 -15
  74. empathy_os/dashboard/server.py +0 -941
  75. patterns/README.md +0 -119
  76. patterns/__init__.py +0 -95
  77. patterns/behavior.py +0 -298
  78. patterns/code_review_memory.json +0 -441
  79. patterns/core.py +0 -97
  80. patterns/debugging.json +0 -3763
  81. patterns/empathy.py +0 -268
  82. patterns/health_check_memory.json +0 -505
  83. patterns/input.py +0 -161
  84. patterns/memory_graph.json +0 -8
  85. patterns/refactoring_memory.json +0 -1113
  86. patterns/registry.py +0 -663
  87. patterns/security_memory.json +0 -8
  88. patterns/structural.py +0 -415
  89. patterns/validation.py +0 -194
  90. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,437 @@
1
+ """Memory types and data classes for Empathy Framework.
2
+
3
+ This module contains shared data structures used by the memory subsystem:
4
+ - Access control (AccessTier, AgentCredentials)
5
+ - TTL strategies (TTLStrategy)
6
+ - Configuration (RedisConfig)
7
+ - Metrics (RedisMetrics)
8
+ - Query types (PaginatedResult, TimeWindowQuery)
9
+ - Domain objects (StagedPattern, ConflictContext)
10
+ - Exceptions (SecurityError)
11
+
12
+ These types are independent of Redis and can be imported without the redis package.
13
+
14
+ Copyright 2025 Smart AI Memory, LLC
15
+ Licensed under Fair Source 0.9
16
+ """
17
+
18
+ from dataclasses import dataclass, field
19
+ from datetime import datetime
20
+ from enum import Enum
21
+ from typing import Any
22
+
23
+
24
+ class AccessTier(Enum):
25
+ """Role-based access tiers per EMPATHY_PHILOSOPHY.md
26
+
27
+ Tier 1 - Observer: Read-only access to validated patterns
28
+ Tier 2 - Contributor: Can stage patterns for validation
29
+ Tier 3 - Validator: Can promote staged patterns to active
30
+ Tier 4 - Steward: Full access including deprecation and audit
31
+ """
32
+
33
+ OBSERVER = 1
34
+ CONTRIBUTOR = 2
35
+ VALIDATOR = 3
36
+ STEWARD = 4
37
+
38
+
39
+ class TTLStrategy(Enum):
40
+ """TTL strategies for different memory types
41
+
42
+ Per EMPATHY_PHILOSOPHY.md Section 9.3:
43
+ - Working results: 1 hour
44
+ - Staged patterns: 24 hours
45
+ - Coordination signals: 5 minutes
46
+ - Conflict context: Until resolution
47
+ """
48
+
49
+ WORKING_RESULTS = 3600 # 1 hour
50
+ STAGED_PATTERNS = 86400 # 24 hours
51
+ COORDINATION = 300 # 5 minutes
52
+ CONFLICT_CONTEXT = 604800 # 7 days (fallback for unresolved)
53
+ SESSION = 1800 # 30 minutes
54
+ STREAM_ENTRY = 86400 * 7 # 7 days for audit stream entries
55
+ TASK_QUEUE = 3600 * 4 # 4 hours for task queue items
56
+
57
+
58
+ @dataclass
59
+ class RedisConfig:
60
+ """Enhanced Redis configuration with SSL and retry support.
61
+
62
+ Supports:
63
+ - Standard connections (host:port)
64
+ - URL-based connections (redis://...)
65
+ - SSL/TLS for managed services (rediss://...)
66
+ - Sentinel for high availability
67
+ - Connection pooling
68
+ - Retry with exponential backoff
69
+ """
70
+
71
+ host: str = "localhost"
72
+ port: int = 6379
73
+ db: int = 0
74
+ password: str | None = None
75
+ use_mock: bool = False
76
+
77
+ # Security settings
78
+ pii_scrub_enabled: bool = True # Scrub PII before storing (HIPAA/GDPR compliance)
79
+ secrets_detection_enabled: bool = True # Block storage of detected secrets
80
+
81
+ # SSL/TLS settings
82
+ ssl: bool = False
83
+ ssl_cert_reqs: str | None = None # "required", "optional", "none"
84
+ ssl_ca_certs: str | None = None
85
+ ssl_certfile: str | None = None
86
+ ssl_keyfile: str | None = None
87
+
88
+ # Connection pool settings
89
+ max_connections: int = 10
90
+ socket_timeout: float = 5.0
91
+ socket_connect_timeout: float = 5.0
92
+
93
+ # Retry settings
94
+ retry_on_timeout: bool = True
95
+ retry_max_attempts: int = 3
96
+ retry_base_delay: float = 0.1 # seconds
97
+ retry_max_delay: float = 2.0 # seconds
98
+
99
+ # Sentinel settings (for HA)
100
+ sentinel_hosts: list[tuple[str, int]] | None = None
101
+ sentinel_master_name: str | None = None
102
+
103
+ def to_redis_kwargs(self) -> dict:
104
+ """Convert to redis.Redis constructor kwargs."""
105
+ kwargs: dict[str, Any] = {
106
+ "host": self.host,
107
+ "port": self.port,
108
+ "db": self.db,
109
+ "password": self.password,
110
+ "decode_responses": True,
111
+ "socket_timeout": self.socket_timeout,
112
+ "socket_connect_timeout": self.socket_connect_timeout,
113
+ "retry_on_timeout": self.retry_on_timeout,
114
+ }
115
+
116
+ if self.ssl:
117
+ kwargs["ssl"] = True
118
+ if self.ssl_cert_reqs:
119
+ kwargs["ssl_cert_reqs"] = self.ssl_cert_reqs
120
+ if self.ssl_ca_certs:
121
+ kwargs["ssl_ca_certs"] = self.ssl_ca_certs
122
+ if self.ssl_certfile:
123
+ kwargs["ssl_certfile"] = self.ssl_certfile
124
+ if self.ssl_keyfile:
125
+ kwargs["ssl_keyfile"] = self.ssl_keyfile
126
+
127
+ return kwargs
128
+
129
+
130
+ @dataclass
131
+ class RedisMetrics:
132
+ """Metrics for Redis operations."""
133
+
134
+ operations_total: int = 0
135
+ operations_success: int = 0
136
+ operations_failed: int = 0
137
+ retries_total: int = 0
138
+ latency_sum_ms: float = 0.0
139
+ latency_max_ms: float = 0.0
140
+
141
+ # Per-operation metrics
142
+ stash_count: int = 0
143
+ retrieve_count: int = 0
144
+ publish_count: int = 0
145
+ stream_append_count: int = 0
146
+
147
+ # Security metrics
148
+ pii_scrubbed_total: int = 0 # Total PII instances scrubbed
149
+ pii_scrub_operations: int = 0 # Operations that had PII scrubbed
150
+ secrets_blocked_total: int = 0 # Total secrets blocked from storage
151
+
152
+ def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
153
+ """Record an operation metric."""
154
+ self.operations_total += 1
155
+ self.latency_sum_ms += latency_ms
156
+ self.latency_max_ms = max(self.latency_max_ms, latency_ms)
157
+
158
+ if success:
159
+ self.operations_success += 1
160
+ else:
161
+ self.operations_failed += 1
162
+
163
+ # Track by operation type
164
+ if operation == "stash":
165
+ self.stash_count += 1
166
+ elif operation == "retrieve":
167
+ self.retrieve_count += 1
168
+ elif operation == "publish":
169
+ self.publish_count += 1
170
+ elif operation == "stream_append":
171
+ self.stream_append_count += 1
172
+
173
+ @property
174
+ def latency_avg_ms(self) -> float:
175
+ """Average latency in milliseconds."""
176
+ if self.operations_total == 0:
177
+ return 0.0
178
+ return self.latency_sum_ms / self.operations_total
179
+
180
+ @property
181
+ def success_rate(self) -> float:
182
+ """Success rate as percentage."""
183
+ if self.operations_total == 0:
184
+ return 100.0
185
+ return (self.operations_success / self.operations_total) * 100
186
+
187
+ def to_dict(self) -> dict:
188
+ """Convert metrics to dictionary for reporting and serialization.
189
+
190
+ Returns:
191
+ Dictionary with keys: operations_total, operations_success,
192
+ operations_failed, retries_total, latency_avg_ms, latency_max_ms,
193
+ success_rate, by_operation, security.
194
+ """
195
+ return {
196
+ "operations_total": self.operations_total,
197
+ "operations_success": self.operations_success,
198
+ "operations_failed": self.operations_failed,
199
+ "retries_total": self.retries_total,
200
+ "latency_avg_ms": round(self.latency_avg_ms, 2),
201
+ "latency_max_ms": round(self.latency_max_ms, 2),
202
+ "success_rate": round(self.success_rate, 2),
203
+ "by_operation": {
204
+ "stash": self.stash_count,
205
+ "retrieve": self.retrieve_count,
206
+ "publish": self.publish_count,
207
+ "stream_append": self.stream_append_count,
208
+ },
209
+ "security": {
210
+ "pii_scrubbed_total": self.pii_scrubbed_total,
211
+ "pii_scrub_operations": self.pii_scrub_operations,
212
+ "secrets_blocked_total": self.secrets_blocked_total,
213
+ },
214
+ }
215
+
216
+
217
+ @dataclass
218
+ class PaginatedResult:
219
+ """Result of a paginated query."""
220
+
221
+ items: list[Any]
222
+ cursor: str
223
+ has_more: bool
224
+ total_scanned: int = 0
225
+
226
+
227
+ @dataclass
228
+ class TimeWindowQuery:
229
+ """Query parameters for time-window operations."""
230
+
231
+ start_time: datetime | None = None
232
+ end_time: datetime | None = None
233
+ limit: int = 100
234
+ offset: int = 0
235
+
236
+ @property
237
+ def start_score(self) -> float:
238
+ """Start timestamp as Redis score."""
239
+ if self.start_time is None:
240
+ return float("-inf")
241
+ return self.start_time.timestamp()
242
+
243
+ @property
244
+ def end_score(self) -> float:
245
+ """End timestamp as Redis score."""
246
+ if self.end_time is None:
247
+ return float("+inf")
248
+ return self.end_time.timestamp()
249
+
250
+
251
+ @dataclass
252
+ class AgentCredentials:
253
+ """Agent identity and access permissions"""
254
+
255
+ agent_id: str
256
+ tier: AccessTier
257
+ roles: list[str] = field(default_factory=list)
258
+ created_at: datetime = field(default_factory=datetime.now)
259
+
260
+ def can_read(self) -> bool:
261
+ """All tiers can read"""
262
+ return True
263
+
264
+ def can_stage(self) -> bool:
265
+ """Contributor+ can stage patterns"""
266
+ return self.tier.value >= AccessTier.CONTRIBUTOR.value
267
+
268
+ def can_validate(self) -> bool:
269
+ """Validator+ can promote patterns"""
270
+ return self.tier.value >= AccessTier.VALIDATOR.value
271
+
272
+ def can_administer(self) -> bool:
273
+ """Only Stewards have full admin access"""
274
+ return self.tier.value >= AccessTier.STEWARD.value
275
+
276
+
277
+ @dataclass
278
+ class StagedPattern:
279
+ """Pattern awaiting validation"""
280
+
281
+ pattern_id: str
282
+ agent_id: str
283
+ pattern_type: str
284
+ name: str
285
+ description: str
286
+ code: str | None = None
287
+ context: dict = field(default_factory=dict)
288
+ confidence: float = 0.5
289
+ staged_at: datetime = field(default_factory=datetime.now)
290
+ interests: list[str] = field(default_factory=list) # For negotiation
291
+
292
+ def __post_init__(self):
293
+ """Validate fields after initialization"""
294
+ # Pattern 1: String ID validation
295
+ if not self.pattern_id or not self.pattern_id.strip():
296
+ raise ValueError("pattern_id cannot be empty")
297
+ if not self.agent_id or not self.agent_id.strip():
298
+ raise ValueError("agent_id cannot be empty")
299
+ if not self.pattern_type or not self.pattern_type.strip():
300
+ raise ValueError("pattern_type cannot be empty")
301
+
302
+ # Pattern 4: Range validation for confidence
303
+ if not 0.0 <= self.confidence <= 1.0:
304
+ raise ValueError(f"confidence must be between 0.0 and 1.0, got {self.confidence}")
305
+
306
+ # Pattern 5: Type validation
307
+ if not isinstance(self.context, dict):
308
+ raise TypeError(f"context must be dict, got {type(self.context).__name__}")
309
+ if not isinstance(self.interests, list):
310
+ raise TypeError(f"interests must be list, got {type(self.interests).__name__}")
311
+
312
+ def to_dict(self) -> dict:
313
+ """Convert staged pattern to dictionary for serialization.
314
+
315
+ Returns:
316
+ Dictionary with keys: pattern_id, agent_id, pattern_type, name,
317
+ description, code, context, confidence, staged_at, interests.
318
+ """
319
+ return {
320
+ "pattern_id": self.pattern_id,
321
+ "agent_id": self.agent_id,
322
+ "pattern_type": self.pattern_type,
323
+ "name": self.name,
324
+ "description": self.description,
325
+ "code": self.code,
326
+ "context": self.context,
327
+ "confidence": self.confidence,
328
+ "staged_at": self.staged_at.isoformat(),
329
+ "interests": self.interests,
330
+ }
331
+
332
+ @classmethod
333
+ def from_dict(cls, data: dict) -> "StagedPattern":
334
+ """Reconstruct StagedPattern from dictionary.
335
+
336
+ Args:
337
+ data: Dictionary with required keys: pattern_id, agent_id,
338
+ pattern_type, name, description, staged_at.
339
+
340
+ Returns:
341
+ Reconstructed StagedPattern instance.
342
+
343
+ Raises:
344
+ KeyError: If required keys are missing.
345
+ ValueError: If data format is invalid.
346
+ """
347
+ return cls(
348
+ pattern_id=data["pattern_id"],
349
+ agent_id=data["agent_id"],
350
+ pattern_type=data["pattern_type"],
351
+ name=data["name"],
352
+ description=data["description"],
353
+ code=data.get("code"),
354
+ context=data.get("context", {}),
355
+ confidence=data.get("confidence", 0.5),
356
+ staged_at=datetime.fromisoformat(data["staged_at"]),
357
+ interests=data.get("interests", []),
358
+ )
359
+
360
+
361
+ @dataclass
362
+ class ConflictContext:
363
+ """Context for principled negotiation
364
+
365
+ Per Getting to Yes framework:
366
+ - Positions: What each party says they want
367
+ - Interests: Why they want it (underlying needs)
368
+ - BATNA: Best Alternative to Negotiated Agreement
369
+ """
370
+
371
+ conflict_id: str
372
+ positions: dict[str, Any] # agent_id -> stated position
373
+ interests: dict[str, list[str]] # agent_id -> underlying interests
374
+ batna: str | None = None # Fallback strategy
375
+ created_at: datetime = field(default_factory=datetime.now)
376
+ resolved: bool = False
377
+ resolution: str | None = None
378
+
379
+ def to_dict(self) -> dict:
380
+ """Convert conflict context to dictionary for serialization.
381
+
382
+ Returns:
383
+ Dictionary with keys: conflict_id, positions, interests,
384
+ batna, created_at, resolved, resolution.
385
+ """
386
+ return {
387
+ "conflict_id": self.conflict_id,
388
+ "positions": self.positions,
389
+ "interests": self.interests,
390
+ "batna": self.batna,
391
+ "created_at": self.created_at.isoformat(),
392
+ "resolved": self.resolved,
393
+ "resolution": self.resolution,
394
+ }
395
+
396
+ @classmethod
397
+ def from_dict(cls, data: dict) -> "ConflictContext":
398
+ """Reconstruct ConflictContext from dictionary.
399
+
400
+ Args:
401
+ data: Dictionary with required keys: conflict_id, positions,
402
+ interests, created_at.
403
+
404
+ Returns:
405
+ Reconstructed ConflictContext instance.
406
+
407
+ Raises:
408
+ KeyError: If required keys are missing.
409
+ ValueError: If data format is invalid.
410
+ """
411
+ return cls(
412
+ conflict_id=data["conflict_id"],
413
+ positions=data["positions"],
414
+ interests=data["interests"],
415
+ batna=data.get("batna"),
416
+ created_at=datetime.fromisoformat(data["created_at"]),
417
+ resolved=data.get("resolved", False),
418
+ resolution=data.get("resolution"),
419
+ )
420
+
421
+
422
+ class SecurityError(Exception):
423
+ """Raised when a security policy is violated (e.g., secrets detected in data)."""
424
+
425
+
426
+ __all__ = [
427
+ "AccessTier",
428
+ "AgentCredentials",
429
+ "ConflictContext",
430
+ "PaginatedResult",
431
+ "RedisConfig",
432
+ "RedisMetrics",
433
+ "SecurityError",
434
+ "StagedPattern",
435
+ "TTLStrategy",
436
+ "TimeWindowQuery",
437
+ ]
@@ -139,9 +139,8 @@ class MemoryConfig:
139
139
  encryption_enabled=os.getenv("EMPATHY_ENCRYPTION", "true").lower() == "true",
140
140
  claude_memory_enabled=os.getenv("EMPATHY_CLAUDE_MEMORY", "true").lower() == "true",
141
141
  # Compact state
142
- auto_generate_compact_state=os.getenv(
143
- "EMPATHY_AUTO_COMPACT_STATE", "true"
144
- ).lower() == "true",
142
+ auto_generate_compact_state=os.getenv("EMPATHY_AUTO_COMPACT_STATE", "true").lower()
143
+ == "true",
145
144
  compact_state_path=os.getenv("EMPATHY_COMPACT_STATE_PATH", ".claude/compact-state.md"),
146
145
  )
147
146
 
@@ -1100,17 +1099,21 @@ class UnifiedMemory:
1100
1099
  # Add session info
1101
1100
  if self._file_session:
1102
1101
  session = self._file_session._state
1103
- lines.extend([
1104
- f"**Session ID:** {session.session_id}",
1105
- f"**User ID:** {session.user_id}",
1106
- "",
1107
- ])
1102
+ lines.extend(
1103
+ [
1104
+ f"**Session ID:** {session.session_id}",
1105
+ f"**User ID:** {session.user_id}",
1106
+ "",
1107
+ ]
1108
+ )
1108
1109
 
1109
- lines.extend([
1110
- "## SBAR Handoff",
1111
- "",
1112
- "### Situation",
1113
- ])
1110
+ lines.extend(
1111
+ [
1112
+ "## SBAR Handoff",
1113
+ "",
1114
+ "### Situation",
1115
+ ]
1116
+ )
1114
1117
 
1115
1118
  # Get context from file session
1116
1119
  context = {}
@@ -1122,30 +1125,34 @@ class UnifiedMemory:
1122
1125
  assessment = context.get("assessment", "No assessment recorded.")
1123
1126
  recommendation = context.get("recommendation", "Continue with current task.")
1124
1127
 
1125
- lines.extend([
1126
- situation,
1127
- "",
1128
- "### Background",
1129
- background,
1130
- "",
1131
- "### Assessment",
1132
- assessment,
1133
- "",
1134
- "### Recommendation",
1135
- recommendation,
1136
- "",
1137
- ])
1128
+ lines.extend(
1129
+ [
1130
+ situation,
1131
+ "",
1132
+ "### Background",
1133
+ background,
1134
+ "",
1135
+ "### Assessment",
1136
+ assessment,
1137
+ "",
1138
+ "### Recommendation",
1139
+ recommendation,
1140
+ "",
1141
+ ]
1142
+ )
1138
1143
 
1139
1144
  # Add working memory summary
1140
1145
  if self._file_session:
1141
1146
  working_keys = list(self._file_session._state.working_memory.keys())
1142
1147
  if working_keys:
1143
- lines.extend([
1144
- "## Working Memory",
1145
- "",
1146
- f"**Active keys:** {len(working_keys)}",
1147
- "",
1148
- ])
1148
+ lines.extend(
1149
+ [
1150
+ "## Working Memory",
1151
+ "",
1152
+ f"**Active keys:** {len(working_keys)}",
1153
+ "",
1154
+ ]
1155
+ )
1149
1156
  for key in working_keys[:10]: # Show max 10
1150
1157
  lines.append(f"- `{key}`")
1151
1158
  if len(working_keys) > 10:
@@ -1156,29 +1163,35 @@ class UnifiedMemory:
1156
1163
  if self._file_session:
1157
1164
  staged = list(self._file_session._state.staged_patterns.values())
1158
1165
  if staged:
1159
- lines.extend([
1160
- "## Staged Patterns",
1161
- "",
1162
- f"**Pending validation:** {len(staged)}",
1163
- "",
1164
- ])
1166
+ lines.extend(
1167
+ [
1168
+ "## Staged Patterns",
1169
+ "",
1170
+ f"**Pending validation:** {len(staged)}",
1171
+ "",
1172
+ ]
1173
+ )
1165
1174
  for pattern in staged[:5]: # Show max 5
1166
- lines.append(f"- {pattern.name} ({pattern.pattern_type}, conf: {pattern.confidence:.2f})")
1175
+ lines.append(
1176
+ f"- {pattern.name} ({pattern.pattern_type}, conf: {pattern.confidence:.2f})"
1177
+ )
1167
1178
  if len(staged) > 5:
1168
1179
  lines.append(f"- ... and {len(staged) - 5} more")
1169
1180
  lines.append("")
1170
1181
 
1171
1182
  # Add capabilities
1172
1183
  caps = self.get_capabilities()
1173
- lines.extend([
1174
- "## Capabilities",
1175
- "",
1176
- f"- File session: {'Yes' if caps['file_session'] else 'No'}",
1177
- f"- Redis: {'Yes' if caps['redis'] else 'No'}",
1178
- f"- Long-term memory: {'Yes' if caps['long_term'] else 'No'}",
1179
- f"- Real-time sync: {'Yes' if caps['realtime'] else 'No'}",
1180
- "",
1181
- ])
1184
+ lines.extend(
1185
+ [
1186
+ "## Capabilities",
1187
+ "",
1188
+ f"- File session: {'Yes' if caps['file_session'] else 'No'}",
1189
+ f"- Redis: {'Yes' if caps['redis'] else 'No'}",
1190
+ f"- Long-term memory: {'Yes' if caps['long_term'] else 'No'}",
1191
+ f"- Real-time sync: {'Yes' if caps['realtime'] else 'No'}",
1192
+ "",
1193
+ ]
1194
+ )
1182
1195
 
1183
1196
  return "\n".join(lines)
1184
1197
 
@@ -92,7 +92,7 @@ class FallbackPolicy:
92
92
  return self.custom_chain
93
93
 
94
94
  chain: list[FallbackStep] = []
95
- all_providers = ["anthropic", "openai", "ollama"]
95
+ all_providers = ["anthropic"] # Anthropic-only as of v5.0.0
96
96
  all_tiers = ["premium", "capable", "cheap"]
97
97
  # Optimization: Cache tier index for O(1) lookup (vs O(n) .index() call)
98
98
  tier_index_map = {tier: i for i, tier in enumerate(all_tiers)}