empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -26,9 +26,7 @@ import json
26
26
  import threading
27
27
  import time
28
28
  from collections.abc import Callable
29
- from dataclasses import dataclass, field
30
29
  from datetime import datetime
31
- from enum import Enum
32
30
  from typing import Any
33
31
 
34
32
  import structlog
@@ -37,6 +35,20 @@ from .security.pii_scrubber import PIIScrubber
37
35
  from .security.secrets_detector import SecretsDetector
38
36
  from .security.secrets_detector import Severity as SecretSeverity
39
37
 
38
+ # Import types from dedicated module
39
+ from .types import (
40
+ AccessTier,
41
+ AgentCredentials,
42
+ ConflictContext,
43
+ PaginatedResult,
44
+ RedisConfig,
45
+ RedisMetrics,
46
+ SecurityError,
47
+ StagedPattern,
48
+ TimeWindowQuery,
49
+ TTLStrategy,
50
+ )
51
+
40
52
  logger = structlog.get_logger(__name__)
41
53
 
42
54
  try:
@@ -51,408 +63,6 @@ except ImportError:
51
63
  RedisTimeoutError = Exception # type: ignore
52
64
 
53
65
 
54
- class AccessTier(Enum):
55
- """Role-based access tiers per EMPATHY_PHILOSOPHY.md
56
-
57
- Tier 1 - Observer: Read-only access to validated patterns
58
- Tier 2 - Contributor: Can stage patterns for validation
59
- Tier 3 - Validator: Can promote staged patterns to active
60
- Tier 4 - Steward: Full access including deprecation and audit
61
- """
62
-
63
- OBSERVER = 1
64
- CONTRIBUTOR = 2
65
- VALIDATOR = 3
66
- STEWARD = 4
67
-
68
-
69
- class TTLStrategy(Enum):
70
- """TTL strategies for different memory types
71
-
72
- Per EMPATHY_PHILOSOPHY.md Section 9.3:
73
- - Working results: 1 hour
74
- - Staged patterns: 24 hours
75
- - Coordination signals: 5 minutes
76
- - Conflict context: Until resolution
77
- """
78
-
79
- WORKING_RESULTS = 3600 # 1 hour
80
- STAGED_PATTERNS = 86400 # 24 hours
81
- COORDINATION = 300 # 5 minutes
82
- CONFLICT_CONTEXT = 604800 # 7 days (fallback for unresolved)
83
- SESSION = 1800 # 30 minutes
84
- STREAM_ENTRY = 86400 * 7 # 7 days for audit stream entries
85
- TASK_QUEUE = 3600 * 4 # 4 hours for task queue items
86
-
87
-
88
- @dataclass
89
- class RedisConfig:
90
- """Enhanced Redis configuration with SSL and retry support.
91
-
92
- Supports:
93
- - Standard connections (host:port)
94
- - URL-based connections (redis://...)
95
- - SSL/TLS for managed services (rediss://...)
96
- - Sentinel for high availability
97
- - Connection pooling
98
- - Retry with exponential backoff
99
- """
100
-
101
- host: str = "localhost"
102
- port: int = 6379
103
- db: int = 0
104
- password: str | None = None
105
- use_mock: bool = False
106
-
107
- # Security settings
108
- pii_scrub_enabled: bool = True # Scrub PII before storing (HIPAA/GDPR compliance)
109
- secrets_detection_enabled: bool = True # Block storage of detected secrets
110
-
111
- # SSL/TLS settings
112
- ssl: bool = False
113
- ssl_cert_reqs: str | None = None # "required", "optional", "none"
114
- ssl_ca_certs: str | None = None
115
- ssl_certfile: str | None = None
116
- ssl_keyfile: str | None = None
117
-
118
- # Connection pool settings
119
- max_connections: int = 10
120
- socket_timeout: float = 5.0
121
- socket_connect_timeout: float = 5.0
122
-
123
- # Retry settings
124
- retry_on_timeout: bool = True
125
- retry_max_attempts: int = 3
126
- retry_base_delay: float = 0.1 # seconds
127
- retry_max_delay: float = 2.0 # seconds
128
-
129
- # Sentinel settings (for HA)
130
- sentinel_hosts: list[tuple[str, int]] | None = None
131
- sentinel_master_name: str | None = None
132
-
133
- def to_redis_kwargs(self) -> dict:
134
- """Convert to redis.Redis constructor kwargs."""
135
- kwargs: dict[str, Any] = {
136
- "host": self.host,
137
- "port": self.port,
138
- "db": self.db,
139
- "password": self.password,
140
- "decode_responses": True,
141
- "socket_timeout": self.socket_timeout,
142
- "socket_connect_timeout": self.socket_connect_timeout,
143
- "retry_on_timeout": self.retry_on_timeout,
144
- }
145
-
146
- if self.ssl:
147
- kwargs["ssl"] = True
148
- if self.ssl_cert_reqs:
149
- kwargs["ssl_cert_reqs"] = self.ssl_cert_reqs
150
- if self.ssl_ca_certs:
151
- kwargs["ssl_ca_certs"] = self.ssl_ca_certs
152
- if self.ssl_certfile:
153
- kwargs["ssl_certfile"] = self.ssl_certfile
154
- if self.ssl_keyfile:
155
- kwargs["ssl_keyfile"] = self.ssl_keyfile
156
-
157
- return kwargs
158
-
159
-
160
- @dataclass
161
- class RedisMetrics:
162
- """Metrics for Redis operations."""
163
-
164
- operations_total: int = 0
165
- operations_success: int = 0
166
- operations_failed: int = 0
167
- retries_total: int = 0
168
- latency_sum_ms: float = 0.0
169
- latency_max_ms: float = 0.0
170
-
171
- # Per-operation metrics
172
- stash_count: int = 0
173
- retrieve_count: int = 0
174
- publish_count: int = 0
175
- stream_append_count: int = 0
176
-
177
- # Security metrics
178
- pii_scrubbed_total: int = 0 # Total PII instances scrubbed
179
- pii_scrub_operations: int = 0 # Operations that had PII scrubbed
180
- secrets_blocked_total: int = 0 # Total secrets blocked from storage
181
-
182
- def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
183
- """Record an operation metric."""
184
- self.operations_total += 1
185
- self.latency_sum_ms += latency_ms
186
- self.latency_max_ms = max(self.latency_max_ms, latency_ms)
187
-
188
- if success:
189
- self.operations_success += 1
190
- else:
191
- self.operations_failed += 1
192
-
193
- # Track by operation type
194
- if operation == "stash":
195
- self.stash_count += 1
196
- elif operation == "retrieve":
197
- self.retrieve_count += 1
198
- elif operation == "publish":
199
- self.publish_count += 1
200
- elif operation == "stream_append":
201
- self.stream_append_count += 1
202
-
203
- @property
204
- def latency_avg_ms(self) -> float:
205
- """Average latency in milliseconds."""
206
- if self.operations_total == 0:
207
- return 0.0
208
- return self.latency_sum_ms / self.operations_total
209
-
210
- @property
211
- def success_rate(self) -> float:
212
- """Success rate as percentage."""
213
- if self.operations_total == 0:
214
- return 100.0
215
- return (self.operations_success / self.operations_total) * 100
216
-
217
- def to_dict(self) -> dict:
218
- """Convert metrics to dictionary for reporting and serialization.
219
-
220
- Returns:
221
- Dictionary with keys: operations_total, operations_success,
222
- operations_failed, retries_total, latency_avg_ms, latency_max_ms,
223
- success_rate, by_operation, security.
224
- """
225
- return {
226
- "operations_total": self.operations_total,
227
- "operations_success": self.operations_success,
228
- "operations_failed": self.operations_failed,
229
- "retries_total": self.retries_total,
230
- "latency_avg_ms": round(self.latency_avg_ms, 2),
231
- "latency_max_ms": round(self.latency_max_ms, 2),
232
- "success_rate": round(self.success_rate, 2),
233
- "by_operation": {
234
- "stash": self.stash_count,
235
- "retrieve": self.retrieve_count,
236
- "publish": self.publish_count,
237
- "stream_append": self.stream_append_count,
238
- },
239
- "security": {
240
- "pii_scrubbed_total": self.pii_scrubbed_total,
241
- "pii_scrub_operations": self.pii_scrub_operations,
242
- "secrets_blocked_total": self.secrets_blocked_total,
243
- },
244
- }
245
-
246
-
247
- @dataclass
248
- class PaginatedResult:
249
- """Result of a paginated query."""
250
-
251
- items: list[Any]
252
- cursor: str
253
- has_more: bool
254
- total_scanned: int = 0
255
-
256
-
257
- @dataclass
258
- class TimeWindowQuery:
259
- """Query parameters for time-window operations."""
260
-
261
- start_time: datetime | None = None
262
- end_time: datetime | None = None
263
- limit: int = 100
264
- offset: int = 0
265
-
266
- @property
267
- def start_score(self) -> float:
268
- """Start timestamp as Redis score."""
269
- if self.start_time is None:
270
- return float("-inf")
271
- return self.start_time.timestamp()
272
-
273
- @property
274
- def end_score(self) -> float:
275
- """End timestamp as Redis score."""
276
- if self.end_time is None:
277
- return float("+inf")
278
- return self.end_time.timestamp()
279
-
280
-
281
- @dataclass
282
- class AgentCredentials:
283
- """Agent identity and access permissions"""
284
-
285
- agent_id: str
286
- tier: AccessTier
287
- roles: list[str] = field(default_factory=list)
288
- created_at: datetime = field(default_factory=datetime.now)
289
-
290
- def can_read(self) -> bool:
291
- """All tiers can read"""
292
- return True
293
-
294
- def can_stage(self) -> bool:
295
- """Contributor+ can stage patterns"""
296
- return self.tier.value >= AccessTier.CONTRIBUTOR.value
297
-
298
- def can_validate(self) -> bool:
299
- """Validator+ can promote patterns"""
300
- return self.tier.value >= AccessTier.VALIDATOR.value
301
-
302
- def can_administer(self) -> bool:
303
- """Only Stewards have full admin access"""
304
- return self.tier.value >= AccessTier.STEWARD.value
305
-
306
-
307
- @dataclass
308
- class StagedPattern:
309
- """Pattern awaiting validation"""
310
-
311
- pattern_id: str
312
- agent_id: str
313
- pattern_type: str
314
- name: str
315
- description: str
316
- code: str | None = None
317
- context: dict = field(default_factory=dict)
318
- confidence: float = 0.5
319
- staged_at: datetime = field(default_factory=datetime.now)
320
- interests: list[str] = field(default_factory=list) # For negotiation
321
-
322
- def __post_init__(self):
323
- """Validate fields after initialization"""
324
- # Pattern 1: String ID validation
325
- if not self.pattern_id or not self.pattern_id.strip():
326
- raise ValueError("pattern_id cannot be empty")
327
- if not self.agent_id or not self.agent_id.strip():
328
- raise ValueError("agent_id cannot be empty")
329
- if not self.pattern_type or not self.pattern_type.strip():
330
- raise ValueError("pattern_type cannot be empty")
331
-
332
- # Pattern 4: Range validation for confidence
333
- if not 0.0 <= self.confidence <= 1.0:
334
- raise ValueError(f"confidence must be between 0.0 and 1.0, got {self.confidence}")
335
-
336
- # Pattern 5: Type validation
337
- if not isinstance(self.context, dict):
338
- raise TypeError(f"context must be dict, got {type(self.context).__name__}")
339
- if not isinstance(self.interests, list):
340
- raise TypeError(f"interests must be list, got {type(self.interests).__name__}")
341
-
342
- def to_dict(self) -> dict:
343
- """Convert staged pattern to dictionary for serialization.
344
-
345
- Returns:
346
- Dictionary with keys: pattern_id, agent_id, pattern_type, name,
347
- description, code, context, confidence, staged_at, interests.
348
- """
349
- return {
350
- "pattern_id": self.pattern_id,
351
- "agent_id": self.agent_id,
352
- "pattern_type": self.pattern_type,
353
- "name": self.name,
354
- "description": self.description,
355
- "code": self.code,
356
- "context": self.context,
357
- "confidence": self.confidence,
358
- "staged_at": self.staged_at.isoformat(),
359
- "interests": self.interests,
360
- }
361
-
362
- @classmethod
363
- def from_dict(cls, data: dict) -> "StagedPattern":
364
- """Reconstruct StagedPattern from dictionary.
365
-
366
- Args:
367
- data: Dictionary with required keys: pattern_id, agent_id,
368
- pattern_type, name, description, staged_at.
369
-
370
- Returns:
371
- Reconstructed StagedPattern instance.
372
-
373
- Raises:
374
- KeyError: If required keys are missing.
375
- ValueError: If data format is invalid.
376
- """
377
- return cls(
378
- pattern_id=data["pattern_id"],
379
- agent_id=data["agent_id"],
380
- pattern_type=data["pattern_type"],
381
- name=data["name"],
382
- description=data["description"],
383
- code=data.get("code"),
384
- context=data.get("context", {}),
385
- confidence=data.get("confidence", 0.5),
386
- staged_at=datetime.fromisoformat(data["staged_at"]),
387
- interests=data.get("interests", []),
388
- )
389
-
390
-
391
- @dataclass
392
- class ConflictContext:
393
- """Context for principled negotiation
394
-
395
- Per Getting to Yes framework:
396
- - Positions: What each party says they want
397
- - Interests: Why they want it (underlying needs)
398
- - BATNA: Best Alternative to Negotiated Agreement
399
- """
400
-
401
- conflict_id: str
402
- positions: dict[str, Any] # agent_id -> stated position
403
- interests: dict[str, list[str]] # agent_id -> underlying interests
404
- batna: str | None = None # Fallback strategy
405
- created_at: datetime = field(default_factory=datetime.now)
406
- resolved: bool = False
407
- resolution: str | None = None
408
-
409
- def to_dict(self) -> dict:
410
- """Convert conflict context to dictionary for serialization.
411
-
412
- Returns:
413
- Dictionary with keys: conflict_id, positions, interests,
414
- batna, created_at, resolved, resolution.
415
- """
416
- return {
417
- "conflict_id": self.conflict_id,
418
- "positions": self.positions,
419
- "interests": self.interests,
420
- "batna": self.batna,
421
- "created_at": self.created_at.isoformat(),
422
- "resolved": self.resolved,
423
- "resolution": self.resolution,
424
- }
425
-
426
- @classmethod
427
- def from_dict(cls, data: dict) -> "ConflictContext":
428
- """Reconstruct ConflictContext from dictionary.
429
-
430
- Args:
431
- data: Dictionary with required keys: conflict_id, positions,
432
- interests, created_at.
433
-
434
- Returns:
435
- Reconstructed ConflictContext instance.
436
-
437
- Raises:
438
- KeyError: If required keys are missing.
439
- ValueError: If data format is invalid.
440
- """
441
- return cls(
442
- conflict_id=data["conflict_id"],
443
- positions=data["positions"],
444
- interests=data["interests"],
445
- batna=data.get("batna"),
446
- created_at=datetime.fromisoformat(data["created_at"]),
447
- resolved=data.get("resolved", False),
448
- resolution=data.get("resolution"),
449
- )
450
-
451
-
452
- class SecurityError(Exception):
453
- """Raised when a security policy is violated (e.g., secrets detected in data)."""
454
-
455
-
456
66
  class RedisShortTermMemory:
457
67
  """Redis-backed short-term memory for agent coordination
458
68