empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
  2. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
  3. empathy_os/__init__.py +1 -1
  4. empathy_os/cache/hybrid.py +5 -1
  5. empathy_os/cli/commands/batch.py +8 -0
  6. empathy_os/cli/commands/profiling.py +4 -0
  7. empathy_os/cli/commands/workflow.py +8 -4
  8. empathy_os/cli_router.py +9 -0
  9. empathy_os/config.py +15 -2
  10. empathy_os/core_modules/__init__.py +15 -0
  11. empathy_os/dashboard/simple_server.py +62 -30
  12. empathy_os/mcp/__init__.py +10 -0
  13. empathy_os/mcp/server.py +506 -0
  14. empathy_os/memory/control_panel.py +1 -131
  15. empathy_os/memory/control_panel_support.py +145 -0
  16. empathy_os/memory/encryption.py +159 -0
  17. empathy_os/memory/long_term.py +46 -631
  18. empathy_os/memory/long_term_types.py +99 -0
  19. empathy_os/memory/mixins/__init__.py +25 -0
  20. empathy_os/memory/mixins/backend_init_mixin.py +249 -0
  21. empathy_os/memory/mixins/capabilities_mixin.py +208 -0
  22. empathy_os/memory/mixins/handoff_mixin.py +208 -0
  23. empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
  24. empathy_os/memory/mixins/long_term_mixin.py +352 -0
  25. empathy_os/memory/mixins/promotion_mixin.py +109 -0
  26. empathy_os/memory/mixins/short_term_mixin.py +182 -0
  27. empathy_os/memory/short_term.py +61 -12
  28. empathy_os/memory/simple_storage.py +302 -0
  29. empathy_os/memory/storage_backend.py +167 -0
  30. empathy_os/memory/types.py +8 -3
  31. empathy_os/memory/unified.py +21 -1120
  32. empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
  33. empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
  34. empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
  35. empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
  36. empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
  37. empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
  38. empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
  39. empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
  40. empathy_os/models/telemetry/__init__.py +71 -0
  41. empathy_os/models/telemetry/analytics.py +594 -0
  42. empathy_os/models/telemetry/backend.py +196 -0
  43. empathy_os/models/telemetry/data_models.py +431 -0
  44. empathy_os/models/telemetry/storage.py +489 -0
  45. empathy_os/orchestration/__init__.py +35 -0
  46. empathy_os/orchestration/execution_strategies.py +481 -0
  47. empathy_os/orchestration/meta_orchestrator.py +488 -1
  48. empathy_os/routing/workflow_registry.py +36 -0
  49. empathy_os/telemetry/agent_coordination.py +2 -3
  50. empathy_os/telemetry/agent_tracking.py +26 -7
  51. empathy_os/telemetry/approval_gates.py +18 -24
  52. empathy_os/telemetry/cli.py +19 -724
  53. empathy_os/telemetry/commands/__init__.py +14 -0
  54. empathy_os/telemetry/commands/dashboard_commands.py +696 -0
  55. empathy_os/telemetry/event_streaming.py +7 -3
  56. empathy_os/telemetry/feedback_loop.py +28 -15
  57. empathy_os/tools.py +183 -0
  58. empathy_os/workflows/__init__.py +5 -0
  59. empathy_os/workflows/autonomous_test_gen.py +860 -161
  60. empathy_os/workflows/base.py +6 -2
  61. empathy_os/workflows/code_review.py +4 -1
  62. empathy_os/workflows/document_gen/__init__.py +25 -0
  63. empathy_os/workflows/document_gen/config.py +30 -0
  64. empathy_os/workflows/document_gen/report_formatter.py +162 -0
  65. empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
  66. empathy_os/workflows/output.py +4 -1
  67. empathy_os/workflows/progress.py +8 -2
  68. empathy_os/workflows/security_audit.py +2 -2
  69. empathy_os/workflows/security_audit_phase3.py +7 -4
  70. empathy_os/workflows/seo_optimization.py +633 -0
  71. empathy_os/workflows/test_gen/__init__.py +52 -0
  72. empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
  73. empathy_os/workflows/test_gen/config.py +88 -0
  74. empathy_os/workflows/test_gen/data_models.py +38 -0
  75. empathy_os/workflows/test_gen/report_formatter.py +289 -0
  76. empathy_os/workflows/test_gen/test_templates.py +381 -0
  77. empathy_os/workflows/test_gen/workflow.py +655 -0
  78. empathy_os/workflows/test_gen.py +42 -1905
  79. empathy_os/cli/parsers/cache 2.py +0 -65
  80. empathy_os/cli_router 2.py +0 -416
  81. empathy_os/dashboard/app 2.py +0 -512
  82. empathy_os/dashboard/simple_server 2.py +0 -403
  83. empathy_os/dashboard/standalone_server 2.py +0 -536
  84. empathy_os/memory/types 2.py +0 -441
  85. empathy_os/models/adaptive_routing 2.py +0 -437
  86. empathy_os/models/telemetry.py +0 -1660
  87. empathy_os/project_index/scanner_parallel 2.py +0 -291
  88. empathy_os/telemetry/agent_coordination 2.py +0 -478
  89. empathy_os/telemetry/agent_tracking 2.py +0 -350
  90. empathy_os/telemetry/approval_gates 2.py +0 -563
  91. empathy_os/telemetry/event_streaming 2.py +0 -405
  92. empathy_os/telemetry/feedback_loop 2.py +0 -557
  93. empathy_os/vscode_bridge 2.py +0 -173
  94. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  95. empathy_os/workflows/progressive/cli 2.py +0 -242
  96. empathy_os/workflows/progressive/core 2.py +0 -488
  97. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  98. empathy_os/workflows/progressive/reports 2.py +0 -528
  99. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  100. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  101. empathy_os/workflows/progressive/workflow 2.py +0 -628
  102. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
  103. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
  104. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
  105. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  106. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
@@ -1,441 +0,0 @@
1
- """Memory types and data classes for Empathy Framework.
2
-
3
- This module contains shared data structures used by the memory subsystem:
4
- - Access control (AccessTier, AgentCredentials)
5
- - TTL strategies (TTLStrategy)
6
- - Configuration (RedisConfig)
7
- - Metrics (RedisMetrics)
8
- - Query types (PaginatedResult, TimeWindowQuery)
9
- - Domain objects (StagedPattern, ConflictContext)
10
- - Exceptions (SecurityError)
11
-
12
- These types are independent of Redis and can be imported without the redis package.
13
-
14
- Copyright 2025 Smart AI Memory, LLC
15
- Licensed under Fair Source 0.9
16
- """
17
-
18
- from dataclasses import dataclass, field
19
- from datetime import datetime
20
- from enum import Enum
21
- from typing import Any
22
-
23
-
24
- class AccessTier(Enum):
25
- """Role-based access tiers per EMPATHY_PHILOSOPHY.md
26
-
27
- Tier 1 - Observer: Read-only access to validated patterns
28
- Tier 2 - Contributor: Can stage patterns for validation
29
- Tier 3 - Validator: Can promote staged patterns to active
30
- Tier 4 - Steward: Full access including deprecation and audit
31
- """
32
-
33
- OBSERVER = 1
34
- CONTRIBUTOR = 2
35
- VALIDATOR = 3
36
- STEWARD = 4
37
-
38
-
39
- class TTLStrategy(Enum):
40
- """TTL strategies for different memory types
41
-
42
- Per EMPATHY_PHILOSOPHY.md Section 9.3:
43
- - Working results: 1 hour
44
- - Staged patterns: 24 hours
45
- - Coordination signals: 5 minutes (REMOVED in v5.0 - see CoordinationSignals)
46
- - Conflict context: Until resolution
47
- """
48
-
49
- WORKING_RESULTS = 3600 # 1 hour
50
- STAGED_PATTERNS = 86400 # 24 hours
51
- # COORDINATION removed in v5.0 - use CoordinationSignals with custom TTLs
52
- CONFLICT_CONTEXT = 604800 # 7 days (fallback for unresolved)
53
- SESSION = 1800 # 30 minutes
54
- STREAM_ENTRY = 86400 * 7 # 7 days for audit stream entries
55
- TASK_QUEUE = 3600 * 4 # 4 hours for task queue items
56
-
57
-
58
- @dataclass
59
- class RedisConfig:
60
- """Enhanced Redis configuration with SSL and retry support.
61
-
62
- Supports:
63
- - Standard connections (host:port)
64
- - URL-based connections (redis://...)
65
- - SSL/TLS for managed services (rediss://...)
66
- - Sentinel for high availability
67
- - Connection pooling
68
- - Retry with exponential backoff
69
- """
70
-
71
- host: str = "localhost"
72
- port: int = 6379
73
- db: int = 0
74
- password: str | None = None
75
- use_mock: bool = False
76
-
77
- # Security settings
78
- pii_scrub_enabled: bool = True # Scrub PII before storing (HIPAA/GDPR compliance)
79
- secrets_detection_enabled: bool = True # Block storage of detected secrets
80
-
81
- # SSL/TLS settings
82
- ssl: bool = False
83
- ssl_cert_reqs: str | None = None # "required", "optional", "none"
84
- ssl_ca_certs: str | None = None
85
- ssl_certfile: str | None = None
86
- ssl_keyfile: str | None = None
87
-
88
- # Connection pool settings
89
- max_connections: int = 10
90
- socket_timeout: float = 5.0
91
- socket_connect_timeout: float = 5.0
92
-
93
- # Retry settings
94
- retry_on_timeout: bool = True
95
- retry_max_attempts: int = 3
96
- retry_base_delay: float = 0.1 # seconds
97
- retry_max_delay: float = 2.0 # seconds
98
-
99
- # Local LRU cache settings (two-tier caching)
100
- local_cache_enabled: bool = True # Enable local memory cache (reduces Redis network I/O)
101
- local_cache_size: int = 500 # Maximum number of cached keys (~50KB memory)
102
-
103
- # Sentinel settings (for HA)
104
- sentinel_hosts: list[tuple[str, int]] | None = None
105
- sentinel_master_name: str | None = None
106
-
107
- def to_redis_kwargs(self) -> dict:
108
- """Convert to redis.Redis constructor kwargs."""
109
- kwargs: dict[str, Any] = {
110
- "host": self.host,
111
- "port": self.port,
112
- "db": self.db,
113
- "password": self.password,
114
- "decode_responses": True,
115
- "socket_timeout": self.socket_timeout,
116
- "socket_connect_timeout": self.socket_connect_timeout,
117
- "retry_on_timeout": self.retry_on_timeout,
118
- }
119
-
120
- if self.ssl:
121
- kwargs["ssl"] = True
122
- if self.ssl_cert_reqs:
123
- kwargs["ssl_cert_reqs"] = self.ssl_cert_reqs
124
- if self.ssl_ca_certs:
125
- kwargs["ssl_ca_certs"] = self.ssl_ca_certs
126
- if self.ssl_certfile:
127
- kwargs["ssl_certfile"] = self.ssl_certfile
128
- if self.ssl_keyfile:
129
- kwargs["ssl_keyfile"] = self.ssl_keyfile
130
-
131
- return kwargs
132
-
133
-
134
- @dataclass
135
- class RedisMetrics:
136
- """Metrics for Redis operations."""
137
-
138
- operations_total: int = 0
139
- operations_success: int = 0
140
- operations_failed: int = 0
141
- retries_total: int = 0
142
- latency_sum_ms: float = 0.0
143
- latency_max_ms: float = 0.0
144
-
145
- # Per-operation metrics
146
- stash_count: int = 0
147
- retrieve_count: int = 0
148
- publish_count: int = 0
149
- stream_append_count: int = 0
150
-
151
- # Security metrics
152
- pii_scrubbed_total: int = 0 # Total PII instances scrubbed
153
- pii_scrub_operations: int = 0 # Operations that had PII scrubbed
154
- secrets_blocked_total: int = 0 # Total secrets blocked from storage
155
-
156
- def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
157
- """Record an operation metric."""
158
- self.operations_total += 1
159
- self.latency_sum_ms += latency_ms
160
- self.latency_max_ms = max(self.latency_max_ms, latency_ms)
161
-
162
- if success:
163
- self.operations_success += 1
164
- else:
165
- self.operations_failed += 1
166
-
167
- # Track by operation type
168
- if operation == "stash":
169
- self.stash_count += 1
170
- elif operation == "retrieve":
171
- self.retrieve_count += 1
172
- elif operation == "publish":
173
- self.publish_count += 1
174
- elif operation == "stream_append":
175
- self.stream_append_count += 1
176
-
177
- @property
178
- def latency_avg_ms(self) -> float:
179
- """Average latency in milliseconds."""
180
- if self.operations_total == 0:
181
- return 0.0
182
- return self.latency_sum_ms / self.operations_total
183
-
184
- @property
185
- def success_rate(self) -> float:
186
- """Success rate as percentage."""
187
- if self.operations_total == 0:
188
- return 100.0
189
- return (self.operations_success / self.operations_total) * 100
190
-
191
- def to_dict(self) -> dict:
192
- """Convert metrics to dictionary for reporting and serialization.
193
-
194
- Returns:
195
- Dictionary with keys: operations_total, operations_success,
196
- operations_failed, retries_total, latency_avg_ms, latency_max_ms,
197
- success_rate, by_operation, security.
198
- """
199
- return {
200
- "operations_total": self.operations_total,
201
- "operations_success": self.operations_success,
202
- "operations_failed": self.operations_failed,
203
- "retries_total": self.retries_total,
204
- "latency_avg_ms": round(self.latency_avg_ms, 2),
205
- "latency_max_ms": round(self.latency_max_ms, 2),
206
- "success_rate": round(self.success_rate, 2),
207
- "by_operation": {
208
- "stash": self.stash_count,
209
- "retrieve": self.retrieve_count,
210
- "publish": self.publish_count,
211
- "stream_append": self.stream_append_count,
212
- },
213
- "security": {
214
- "pii_scrubbed_total": self.pii_scrubbed_total,
215
- "pii_scrub_operations": self.pii_scrub_operations,
216
- "secrets_blocked_total": self.secrets_blocked_total,
217
- },
218
- }
219
-
220
-
221
- @dataclass
222
- class PaginatedResult:
223
- """Result of a paginated query."""
224
-
225
- items: list[Any]
226
- cursor: str
227
- has_more: bool
228
- total_scanned: int = 0
229
-
230
-
231
- @dataclass
232
- class TimeWindowQuery:
233
- """Query parameters for time-window operations."""
234
-
235
- start_time: datetime | None = None
236
- end_time: datetime | None = None
237
- limit: int = 100
238
- offset: int = 0
239
-
240
- @property
241
- def start_score(self) -> float:
242
- """Start timestamp as Redis score."""
243
- if self.start_time is None:
244
- return float("-inf")
245
- return self.start_time.timestamp()
246
-
247
- @property
248
- def end_score(self) -> float:
249
- """End timestamp as Redis score."""
250
- if self.end_time is None:
251
- return float("+inf")
252
- return self.end_time.timestamp()
253
-
254
-
255
- @dataclass
256
- class AgentCredentials:
257
- """Agent identity and access permissions"""
258
-
259
- agent_id: str
260
- tier: AccessTier
261
- roles: list[str] = field(default_factory=list)
262
- created_at: datetime = field(default_factory=datetime.now)
263
-
264
- def can_read(self) -> bool:
265
- """All tiers can read"""
266
- return True
267
-
268
- def can_stage(self) -> bool:
269
- """Contributor+ can stage patterns"""
270
- return self.tier.value >= AccessTier.CONTRIBUTOR.value
271
-
272
- def can_validate(self) -> bool:
273
- """Validator+ can promote patterns"""
274
- return self.tier.value >= AccessTier.VALIDATOR.value
275
-
276
- def can_administer(self) -> bool:
277
- """Only Stewards have full admin access"""
278
- return self.tier.value >= AccessTier.STEWARD.value
279
-
280
-
281
- @dataclass
282
- class StagedPattern:
283
- """Pattern awaiting validation"""
284
-
285
- pattern_id: str
286
- agent_id: str
287
- pattern_type: str
288
- name: str
289
- description: str
290
- code: str | None = None
291
- context: dict = field(default_factory=dict)
292
- confidence: float = 0.5
293
- staged_at: datetime = field(default_factory=datetime.now)
294
- interests: list[str] = field(default_factory=list) # For negotiation
295
-
296
- def __post_init__(self):
297
- """Validate fields after initialization"""
298
- # Pattern 1: String ID validation
299
- if not self.pattern_id or not self.pattern_id.strip():
300
- raise ValueError("pattern_id cannot be empty")
301
- if not self.agent_id or not self.agent_id.strip():
302
- raise ValueError("agent_id cannot be empty")
303
- if not self.pattern_type or not self.pattern_type.strip():
304
- raise ValueError("pattern_type cannot be empty")
305
-
306
- # Pattern 4: Range validation for confidence
307
- if not 0.0 <= self.confidence <= 1.0:
308
- raise ValueError(f"confidence must be between 0.0 and 1.0, got {self.confidence}")
309
-
310
- # Pattern 5: Type validation
311
- if not isinstance(self.context, dict):
312
- raise TypeError(f"context must be dict, got {type(self.context).__name__}")
313
- if not isinstance(self.interests, list):
314
- raise TypeError(f"interests must be list, got {type(self.interests).__name__}")
315
-
316
- def to_dict(self) -> dict:
317
- """Convert staged pattern to dictionary for serialization.
318
-
319
- Returns:
320
- Dictionary with keys: pattern_id, agent_id, pattern_type, name,
321
- description, code, context, confidence, staged_at, interests.
322
- """
323
- return {
324
- "pattern_id": self.pattern_id,
325
- "agent_id": self.agent_id,
326
- "pattern_type": self.pattern_type,
327
- "name": self.name,
328
- "description": self.description,
329
- "code": self.code,
330
- "context": self.context,
331
- "confidence": self.confidence,
332
- "staged_at": self.staged_at.isoformat(),
333
- "interests": self.interests,
334
- }
335
-
336
- @classmethod
337
- def from_dict(cls, data: dict) -> "StagedPattern":
338
- """Reconstruct StagedPattern from dictionary.
339
-
340
- Args:
341
- data: Dictionary with required keys: pattern_id, agent_id,
342
- pattern_type, name, description, staged_at.
343
-
344
- Returns:
345
- Reconstructed StagedPattern instance.
346
-
347
- Raises:
348
- KeyError: If required keys are missing.
349
- ValueError: If data format is invalid.
350
- """
351
- return cls(
352
- pattern_id=data["pattern_id"],
353
- agent_id=data["agent_id"],
354
- pattern_type=data["pattern_type"],
355
- name=data["name"],
356
- description=data["description"],
357
- code=data.get("code"),
358
- context=data.get("context", {}),
359
- confidence=data.get("confidence", 0.5),
360
- staged_at=datetime.fromisoformat(data["staged_at"]),
361
- interests=data.get("interests", []),
362
- )
363
-
364
-
365
- @dataclass
366
- class ConflictContext:
367
- """Context for principled negotiation
368
-
369
- Per Getting to Yes framework:
370
- - Positions: What each party says they want
371
- - Interests: Why they want it (underlying needs)
372
- - BATNA: Best Alternative to Negotiated Agreement
373
- """
374
-
375
- conflict_id: str
376
- positions: dict[str, Any] # agent_id -> stated position
377
- interests: dict[str, list[str]] # agent_id -> underlying interests
378
- batna: str | None = None # Fallback strategy
379
- created_at: datetime = field(default_factory=datetime.now)
380
- resolved: bool = False
381
- resolution: str | None = None
382
-
383
- def to_dict(self) -> dict:
384
- """Convert conflict context to dictionary for serialization.
385
-
386
- Returns:
387
- Dictionary with keys: conflict_id, positions, interests,
388
- batna, created_at, resolved, resolution.
389
- """
390
- return {
391
- "conflict_id": self.conflict_id,
392
- "positions": self.positions,
393
- "interests": self.interests,
394
- "batna": self.batna,
395
- "created_at": self.created_at.isoformat(),
396
- "resolved": self.resolved,
397
- "resolution": self.resolution,
398
- }
399
-
400
- @classmethod
401
- def from_dict(cls, data: dict) -> "ConflictContext":
402
- """Reconstruct ConflictContext from dictionary.
403
-
404
- Args:
405
- data: Dictionary with required keys: conflict_id, positions,
406
- interests, created_at.
407
-
408
- Returns:
409
- Reconstructed ConflictContext instance.
410
-
411
- Raises:
412
- KeyError: If required keys are missing.
413
- ValueError: If data format is invalid.
414
- """
415
- return cls(
416
- conflict_id=data["conflict_id"],
417
- positions=data["positions"],
418
- interests=data["interests"],
419
- batna=data.get("batna"),
420
- created_at=datetime.fromisoformat(data["created_at"]),
421
- resolved=data.get("resolved", False),
422
- resolution=data.get("resolution"),
423
- )
424
-
425
-
426
- class SecurityError(Exception):
427
- """Raised when a security policy is violated (e.g., secrets detected in data)."""
428
-
429
-
430
- __all__ = [
431
- "AccessTier",
432
- "AgentCredentials",
433
- "ConflictContext",
434
- "PaginatedResult",
435
- "RedisConfig",
436
- "RedisMetrics",
437
- "SecurityError",
438
- "StagedPattern",
439
- "TTLStrategy",
440
- "TimeWindowQuery",
441
- ]