empathy-framework 3.7.0__py3-none-any.whl → 3.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coach_wizards/code_reviewer_README.md +60 -0
- coach_wizards/code_reviewer_wizard.py +180 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/METADATA +148 -11
- empathy_framework-3.8.0.dist-info/RECORD +333 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/top_level.txt +5 -1
- empathy_healthcare_plugin/monitors/__init__.py +9 -0
- empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
- empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
- empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
- empathy_llm_toolkit/agent_factory/__init__.py +53 -0
- empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
- empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
- empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
- empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
- empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
- empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
- empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
- empathy_llm_toolkit/agent_factory/base.py +305 -0
- empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
- empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
- empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
- empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
- empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
- empathy_llm_toolkit/agent_factory/decorators.py +286 -0
- empathy_llm_toolkit/agent_factory/factory.py +558 -0
- empathy_llm_toolkit/agent_factory/framework.py +192 -0
- empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
- empathy_llm_toolkit/agent_factory/resilient.py +320 -0
- empathy_llm_toolkit/cli/__init__.py +8 -0
- empathy_llm_toolkit/cli/sync_claude.py +487 -0
- empathy_llm_toolkit/code_health.py +150 -3
- empathy_llm_toolkit/config/__init__.py +29 -0
- empathy_llm_toolkit/config/unified.py +295 -0
- empathy_llm_toolkit/routing/__init__.py +32 -0
- empathy_llm_toolkit/routing/model_router.py +362 -0
- empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
- empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
- empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
- empathy_llm_toolkit/security/README.md +262 -0
- empathy_llm_toolkit/security/__init__.py +62 -0
- empathy_llm_toolkit/security/audit_logger.py +929 -0
- empathy_llm_toolkit/security/audit_logger_example.py +152 -0
- empathy_llm_toolkit/security/pii_scrubber.py +640 -0
- empathy_llm_toolkit/security/secrets_detector.py +678 -0
- empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
- empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
- empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
- empathy_llm_toolkit/wizards/__init__.py +38 -0
- empathy_llm_toolkit/wizards/base_wizard.py +364 -0
- empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
- empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
- empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
- empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
- empathy_os/__init__.py +52 -52
- empathy_os/adaptive/__init__.py +13 -0
- empathy_os/adaptive/task_complexity.py +127 -0
- empathy_os/cache/__init__.py +117 -0
- empathy_os/cache/base.py +166 -0
- empathy_os/cache/dependency_manager.py +253 -0
- empathy_os/cache/hash_only.py +248 -0
- empathy_os/cache/hybrid.py +390 -0
- empathy_os/cache/storage.py +282 -0
- empathy_os/cli.py +118 -8
- empathy_os/cli_unified.py +121 -1
- empathy_os/config/__init__.py +63 -0
- empathy_os/config/xml_config.py +239 -0
- empathy_os/config.py +2 -1
- empathy_os/dashboard/__init__.py +15 -0
- empathy_os/dashboard/server.py +743 -0
- empathy_os/memory/__init__.py +195 -0
- empathy_os/memory/claude_memory.py +466 -0
- empathy_os/memory/config.py +224 -0
- empathy_os/memory/control_panel.py +1298 -0
- empathy_os/memory/edges.py +179 -0
- empathy_os/memory/graph.py +567 -0
- empathy_os/memory/long_term.py +1194 -0
- empathy_os/memory/nodes.py +179 -0
- empathy_os/memory/redis_bootstrap.py +540 -0
- empathy_os/memory/security/__init__.py +31 -0
- empathy_os/memory/security/audit_logger.py +930 -0
- empathy_os/memory/security/pii_scrubber.py +640 -0
- empathy_os/memory/security/secrets_detector.py +678 -0
- empathy_os/memory/short_term.py +2119 -0
- empathy_os/memory/storage/__init__.py +15 -0
- empathy_os/memory/summary_index.py +583 -0
- empathy_os/memory/unified.py +619 -0
- empathy_os/metrics/__init__.py +12 -0
- empathy_os/metrics/prompt_metrics.py +190 -0
- empathy_os/models/__init__.py +136 -0
- empathy_os/models/__main__.py +13 -0
- empathy_os/models/cli.py +655 -0
- empathy_os/models/empathy_executor.py +354 -0
- empathy_os/models/executor.py +252 -0
- empathy_os/models/fallback.py +671 -0
- empathy_os/models/provider_config.py +563 -0
- empathy_os/models/registry.py +382 -0
- empathy_os/models/tasks.py +302 -0
- empathy_os/models/telemetry.py +548 -0
- empathy_os/models/token_estimator.py +378 -0
- empathy_os/models/validation.py +274 -0
- empathy_os/monitoring/__init__.py +52 -0
- empathy_os/monitoring/alerts.py +23 -0
- empathy_os/monitoring/alerts_cli.py +268 -0
- empathy_os/monitoring/multi_backend.py +271 -0
- empathy_os/monitoring/otel_backend.py +363 -0
- empathy_os/optimization/__init__.py +19 -0
- empathy_os/optimization/context_optimizer.py +272 -0
- empathy_os/plugins/__init__.py +28 -0
- empathy_os/plugins/base.py +361 -0
- empathy_os/plugins/registry.py +268 -0
- empathy_os/project_index/__init__.py +30 -0
- empathy_os/project_index/cli.py +335 -0
- empathy_os/project_index/crew_integration.py +430 -0
- empathy_os/project_index/index.py +425 -0
- empathy_os/project_index/models.py +501 -0
- empathy_os/project_index/reports.py +473 -0
- empathy_os/project_index/scanner.py +538 -0
- empathy_os/prompts/__init__.py +61 -0
- empathy_os/prompts/config.py +77 -0
- empathy_os/prompts/context.py +177 -0
- empathy_os/prompts/parser.py +285 -0
- empathy_os/prompts/registry.py +313 -0
- empathy_os/prompts/templates.py +208 -0
- empathy_os/resilience/__init__.py +56 -0
- empathy_os/resilience/circuit_breaker.py +256 -0
- empathy_os/resilience/fallback.py +179 -0
- empathy_os/resilience/health.py +300 -0
- empathy_os/resilience/retry.py +209 -0
- empathy_os/resilience/timeout.py +135 -0
- empathy_os/routing/__init__.py +43 -0
- empathy_os/routing/chain_executor.py +433 -0
- empathy_os/routing/classifier.py +217 -0
- empathy_os/routing/smart_router.py +234 -0
- empathy_os/routing/wizard_registry.py +307 -0
- empathy_os/trust/__init__.py +28 -0
- empathy_os/trust/circuit_breaker.py +579 -0
- empathy_os/validation/__init__.py +19 -0
- empathy_os/validation/xml_validator.py +281 -0
- empathy_os/wizard_factory_cli.py +170 -0
- empathy_os/workflows/__init__.py +360 -0
- empathy_os/workflows/base.py +1660 -0
- empathy_os/workflows/bug_predict.py +962 -0
- empathy_os/workflows/code_review.py +960 -0
- empathy_os/workflows/code_review_adapters.py +310 -0
- empathy_os/workflows/code_review_pipeline.py +720 -0
- empathy_os/workflows/config.py +600 -0
- empathy_os/workflows/dependency_check.py +648 -0
- empathy_os/workflows/document_gen.py +1069 -0
- empathy_os/workflows/documentation_orchestrator.py +1205 -0
- empathy_os/workflows/health_check.py +679 -0
- empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
- empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
- empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
- empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
- empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
- empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
- empathy_os/workflows/manage_documentation.py +804 -0
- empathy_os/workflows/new_sample_workflow1.py +146 -0
- empathy_os/workflows/new_sample_workflow1_README.md +150 -0
- empathy_os/workflows/perf_audit.py +687 -0
- empathy_os/workflows/pr_review.py +748 -0
- empathy_os/workflows/progress.py +445 -0
- empathy_os/workflows/progress_server.py +322 -0
- empathy_os/workflows/refactor_plan.py +693 -0
- empathy_os/workflows/release_prep.py +808 -0
- empathy_os/workflows/research_synthesis.py +404 -0
- empathy_os/workflows/secure_release.py +585 -0
- empathy_os/workflows/security_adapters.py +297 -0
- empathy_os/workflows/security_audit.py +1046 -0
- empathy_os/workflows/step_config.py +234 -0
- empathy_os/workflows/test5.py +125 -0
- empathy_os/workflows/test5_README.md +158 -0
- empathy_os/workflows/test_gen.py +1855 -0
- empathy_os/workflows/test_lifecycle.py +526 -0
- empathy_os/workflows/test_maintenance.py +626 -0
- empathy_os/workflows/test_maintenance_cli.py +590 -0
- empathy_os/workflows/test_maintenance_crew.py +821 -0
- empathy_os/workflows/xml_enhanced_crew.py +285 -0
- empathy_software_plugin/cli/__init__.py +120 -0
- empathy_software_plugin/cli/inspect.py +362 -0
- empathy_software_plugin/cli.py +3 -1
- empathy_software_plugin/wizards/__init__.py +42 -0
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
- empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
- empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
- empathy_software_plugin/wizards/base_wizard.py +288 -0
- empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
- empathy_software_plugin/wizards/code_review_wizard.py +606 -0
- empathy_software_plugin/wizards/debugging/__init__.py +50 -0
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
- empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
- empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
- empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
- empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
- empathy_software_plugin/wizards/debugging/verification.py +369 -0
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
- empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
- empathy_software_plugin/wizards/performance/__init__.py +9 -0
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
- empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
- empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
- empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
- empathy_software_plugin/wizards/security/__init__.py +32 -0
- empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
- empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
- empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
- empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
- empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
- empathy_software_plugin/wizards/testing/__init__.py +27 -0
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
- empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
- empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
- empathy_software_plugin/wizards/testing_wizard.py +274 -0
- hot_reload/README.md +473 -0
- hot_reload/__init__.py +62 -0
- hot_reload/config.py +84 -0
- hot_reload/integration.py +228 -0
- hot_reload/reloader.py +298 -0
- hot_reload/watcher.py +179 -0
- hot_reload/websocket.py +176 -0
- scaffolding/README.md +589 -0
- scaffolding/__init__.py +35 -0
- scaffolding/__main__.py +14 -0
- scaffolding/cli.py +240 -0
- test_generator/__init__.py +38 -0
- test_generator/__main__.py +14 -0
- test_generator/cli.py +226 -0
- test_generator/generator.py +325 -0
- test_generator/risk_analyzer.py +216 -0
- workflow_patterns/__init__.py +33 -0
- workflow_patterns/behavior.py +249 -0
- workflow_patterns/core.py +76 -0
- workflow_patterns/output.py +99 -0
- workflow_patterns/registry.py +255 -0
- workflow_patterns/structural.py +288 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
- agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
- agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
- agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
- agents/compliance_anticipation_agent.py +0 -1422
- agents/compliance_db.py +0 -339
- agents/epic_integration_wizard.py +0 -530
- agents/notifications.py +0 -291
- agents/trust_building_behaviors.py +0 -872
- empathy_framework-3.7.0.dist-info/RECORD +0 -105
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/licenses/LICENSE +0 -0
- /empathy_os/{monitoring.py → agent_monitoring.py} +0 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
"""Response caching for Empathy Framework workflows.
|
|
2
|
+
|
|
3
|
+
Provides hybrid hash + semantic similarity caching to reduce API costs by 70%.
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
from empathy_os.cache import create_cache
|
|
7
|
+
|
|
8
|
+
# Auto-detect best cache (hybrid if deps available, hash-only otherwise)
|
|
9
|
+
cache = create_cache()
|
|
10
|
+
|
|
11
|
+
# Manual cache selection
|
|
12
|
+
from empathy_os.cache import HashOnlyCache, HybridCache
|
|
13
|
+
|
|
14
|
+
cache = HashOnlyCache() # Always available
|
|
15
|
+
cache = HybridCache() # Requires sentence-transformers
|
|
16
|
+
|
|
17
|
+
Copyright 2025 Smart-AI-Memory
|
|
18
|
+
Licensed under Fair Source License 0.9
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import logging
|
|
22
|
+
from typing import Optional
|
|
23
|
+
|
|
24
|
+
from .base import BaseCache, CacheEntry, CacheStats
|
|
25
|
+
from .hash_only import HashOnlyCache
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
# Try to import HybridCache (requires optional dependencies)
|
|
30
|
+
try:
|
|
31
|
+
from .hybrid import HybridCache
|
|
32
|
+
|
|
33
|
+
HYBRID_AVAILABLE = True
|
|
34
|
+
except ImportError:
|
|
35
|
+
HYBRID_AVAILABLE = False
|
|
36
|
+
HybridCache = None # type: ignore
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_cache(
|
|
40
|
+
cache_type: str | None = None,
|
|
41
|
+
**kwargs,
|
|
42
|
+
) -> BaseCache:
|
|
43
|
+
"""Create appropriate cache based on available dependencies.
|
|
44
|
+
|
|
45
|
+
Auto-detects if sentence-transformers is available and creates
|
|
46
|
+
HybridCache if possible, otherwise falls back to HashOnlyCache.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
cache_type: Force specific cache type ("hash" | "hybrid" | None for auto).
|
|
50
|
+
**kwargs: Additional arguments passed to cache constructor.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
BaseCache instance (HybridCache or HashOnlyCache).
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
# Auto-detect (recommended)
|
|
57
|
+
cache = create_cache()
|
|
58
|
+
|
|
59
|
+
# Force hash-only
|
|
60
|
+
cache = create_cache(cache_type="hash")
|
|
61
|
+
|
|
62
|
+
# Force hybrid (raises ImportError if deps missing)
|
|
63
|
+
cache = create_cache(cache_type="hybrid")
|
|
64
|
+
|
|
65
|
+
"""
|
|
66
|
+
# Force hash-only
|
|
67
|
+
if cache_type == "hash":
|
|
68
|
+
logger.info("Using hash-only cache (explicit)")
|
|
69
|
+
return HashOnlyCache(**kwargs)
|
|
70
|
+
|
|
71
|
+
# Force hybrid
|
|
72
|
+
if cache_type == "hybrid":
|
|
73
|
+
if not HYBRID_AVAILABLE:
|
|
74
|
+
raise ImportError(
|
|
75
|
+
"HybridCache requires sentence-transformers. "
|
|
76
|
+
"Install with: pip install empathy-framework[cache]"
|
|
77
|
+
)
|
|
78
|
+
logger.info("Using hybrid cache (explicit)")
|
|
79
|
+
return HybridCache(**kwargs)
|
|
80
|
+
|
|
81
|
+
# Auto-detect (default)
|
|
82
|
+
if HYBRID_AVAILABLE:
|
|
83
|
+
logger.info("Using hybrid cache (auto-detected)")
|
|
84
|
+
return HybridCache(**kwargs)
|
|
85
|
+
else:
|
|
86
|
+
logger.info(
|
|
87
|
+
"Using hash-only cache (sentence-transformers not available). "
|
|
88
|
+
"For 70% cost savings, install with: pip install empathy-framework[cache]"
|
|
89
|
+
)
|
|
90
|
+
return HashOnlyCache(**kwargs)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def auto_setup_cache() -> None:
|
|
94
|
+
"""Auto-setup cache with one-time prompt if dependencies missing.
|
|
95
|
+
|
|
96
|
+
Called automatically by BaseWorkflow on first run.
|
|
97
|
+
Prompts user to install cache dependencies if not available.
|
|
98
|
+
|
|
99
|
+
"""
|
|
100
|
+
from .dependency_manager import DependencyManager
|
|
101
|
+
|
|
102
|
+
manager = DependencyManager()
|
|
103
|
+
|
|
104
|
+
if manager.should_prompt_cache_install():
|
|
105
|
+
manager.prompt_cache_install()
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
__all__ = [
|
|
109
|
+
"BaseCache",
|
|
110
|
+
"CacheEntry",
|
|
111
|
+
"CacheStats",
|
|
112
|
+
"HashOnlyCache",
|
|
113
|
+
"HybridCache",
|
|
114
|
+
"create_cache",
|
|
115
|
+
"auto_setup_cache",
|
|
116
|
+
"HYBRID_AVAILABLE",
|
|
117
|
+
]
|
empathy_os/cache/base.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""Base cache interface for Empathy Framework response caching.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class CacheEntry:
|
|
14
|
+
"""Cached LLM response with metadata."""
|
|
15
|
+
|
|
16
|
+
key: str
|
|
17
|
+
response: Any
|
|
18
|
+
workflow: str
|
|
19
|
+
stage: str
|
|
20
|
+
model: str
|
|
21
|
+
prompt_hash: str
|
|
22
|
+
timestamp: float
|
|
23
|
+
ttl: int | None = None # Time-to-live in seconds
|
|
24
|
+
|
|
25
|
+
def is_expired(self, current_time: float) -> bool:
|
|
26
|
+
"""Check if entry has expired."""
|
|
27
|
+
if self.ttl is None:
|
|
28
|
+
return False
|
|
29
|
+
return (current_time - self.timestamp) > self.ttl
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class CacheStats:
|
|
34
|
+
"""Cache hit/miss statistics."""
|
|
35
|
+
|
|
36
|
+
hits: int = 0
|
|
37
|
+
misses: int = 0
|
|
38
|
+
evictions: int = 0
|
|
39
|
+
|
|
40
|
+
@property
|
|
41
|
+
def total(self) -> int:
|
|
42
|
+
"""Total cache lookups."""
|
|
43
|
+
return self.hits + self.misses
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def hit_rate(self) -> float:
|
|
47
|
+
"""Cache hit rate percentage."""
|
|
48
|
+
if self.total == 0:
|
|
49
|
+
return 0.0
|
|
50
|
+
return (self.hits / self.total) * 100.0
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> dict[str, Any]:
|
|
53
|
+
"""Convert to dictionary for logging."""
|
|
54
|
+
return {
|
|
55
|
+
"hits": self.hits,
|
|
56
|
+
"misses": self.misses,
|
|
57
|
+
"evictions": self.evictions,
|
|
58
|
+
"total": self.total,
|
|
59
|
+
"hit_rate": round(self.hit_rate, 1),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class BaseCache(ABC):
|
|
64
|
+
"""Abstract base class for LLM response caching."""
|
|
65
|
+
|
|
66
|
+
def __init__(self, max_size_mb: int = 500, default_ttl: int = 86400):
|
|
67
|
+
"""Initialize cache.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
max_size_mb: Maximum cache size in megabytes.
|
|
71
|
+
default_ttl: Default time-to-live in seconds (default: 24 hours).
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
self.max_size_mb = max_size_mb
|
|
75
|
+
self.default_ttl = default_ttl
|
|
76
|
+
self.stats = CacheStats()
|
|
77
|
+
|
|
78
|
+
@abstractmethod
|
|
79
|
+
def get(
|
|
80
|
+
self,
|
|
81
|
+
workflow: str,
|
|
82
|
+
stage: str,
|
|
83
|
+
prompt: str,
|
|
84
|
+
model: str,
|
|
85
|
+
) -> Any | None:
|
|
86
|
+
"""Get cached response.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
workflow: Workflow name (e.g., "code-review").
|
|
90
|
+
stage: Stage name (e.g., "scan").
|
|
91
|
+
prompt: Prompt text.
|
|
92
|
+
model: Model identifier (e.g., "claude-3-5-sonnet-20241022").
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Cached response if found, None otherwise.
|
|
96
|
+
|
|
97
|
+
"""
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
@abstractmethod
|
|
101
|
+
def put(
|
|
102
|
+
self,
|
|
103
|
+
workflow: str,
|
|
104
|
+
stage: str,
|
|
105
|
+
prompt: str,
|
|
106
|
+
model: str,
|
|
107
|
+
response: Any,
|
|
108
|
+
ttl: int | None = None,
|
|
109
|
+
) -> None:
|
|
110
|
+
"""Store response in cache.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
workflow: Workflow name.
|
|
114
|
+
stage: Stage name.
|
|
115
|
+
prompt: Prompt text.
|
|
116
|
+
model: Model identifier.
|
|
117
|
+
response: LLM response to cache.
|
|
118
|
+
ttl: Optional custom TTL (uses default if None).
|
|
119
|
+
|
|
120
|
+
"""
|
|
121
|
+
pass
|
|
122
|
+
|
|
123
|
+
@abstractmethod
|
|
124
|
+
def clear(self) -> None:
|
|
125
|
+
"""Clear all cached entries."""
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
def get_stats(self) -> CacheStats:
|
|
130
|
+
"""Get cache statistics.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
CacheStats with hit/miss counts.
|
|
134
|
+
|
|
135
|
+
"""
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
def _create_cache_key(
|
|
139
|
+
self,
|
|
140
|
+
workflow: str,
|
|
141
|
+
stage: str,
|
|
142
|
+
prompt: str,
|
|
143
|
+
model: str,
|
|
144
|
+
) -> str:
|
|
145
|
+
"""Create cache key from workflow, stage, prompt, and model.
|
|
146
|
+
|
|
147
|
+
Uses SHA256 hash of concatenated values.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
workflow: Workflow name.
|
|
151
|
+
stage: Stage name.
|
|
152
|
+
prompt: Prompt text.
|
|
153
|
+
model: Model identifier.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Cache key (SHA256 hash).
|
|
157
|
+
|
|
158
|
+
"""
|
|
159
|
+
import hashlib
|
|
160
|
+
|
|
161
|
+
# Combine all inputs for cache key
|
|
162
|
+
key_parts = [workflow, stage, prompt, model]
|
|
163
|
+
key_string = "|".join(key_parts)
|
|
164
|
+
|
|
165
|
+
# SHA256 hash for consistent key length
|
|
166
|
+
return hashlib.sha256(key_string.encode()).hexdigest()
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""Dependency manager for cache optional dependencies.
|
|
2
|
+
|
|
3
|
+
Handles auto-detection, user prompts, and installation of sentence-transformers.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
import subprocess
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
import yaml
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DependencyManager:
|
|
21
|
+
"""Manage optional cache dependencies with user prompts.
|
|
22
|
+
|
|
23
|
+
Handles:
|
|
24
|
+
- Auto-detection of installed dependencies
|
|
25
|
+
- One-time user prompt to install cache deps
|
|
26
|
+
- Configuration persistence (user preferences)
|
|
27
|
+
- Pip-based installation
|
|
28
|
+
|
|
29
|
+
Example:
|
|
30
|
+
manager = DependencyManager()
|
|
31
|
+
|
|
32
|
+
if manager.should_prompt_cache_install():
|
|
33
|
+
manager.prompt_cache_install()
|
|
34
|
+
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, config_path: Path | None = None):
|
|
38
|
+
"""Initialize dependency manager.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
config_path: Path to config file (default: ~/.empathy/config.yml).
|
|
42
|
+
|
|
43
|
+
"""
|
|
44
|
+
self.config_path = config_path or Path.home() / ".empathy" / "config.yml"
|
|
45
|
+
self.config = self._load_config()
|
|
46
|
+
|
|
47
|
+
def _load_config(self) -> dict:
|
|
48
|
+
"""Load user configuration.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Configuration dictionary.
|
|
52
|
+
|
|
53
|
+
"""
|
|
54
|
+
if not self.config_path.exists():
|
|
55
|
+
return {}
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
with open(self.config_path) as f:
|
|
59
|
+
return yaml.safe_load(f) or {}
|
|
60
|
+
except (yaml.YAMLError, OSError) as e:
|
|
61
|
+
logger.warning(f"Failed to load config: {e}")
|
|
62
|
+
return {}
|
|
63
|
+
|
|
64
|
+
def _save_config(self) -> None:
|
|
65
|
+
"""Save configuration to disk."""
|
|
66
|
+
try:
|
|
67
|
+
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
68
|
+
with open(self.config_path, "w") as f:
|
|
69
|
+
yaml.safe_dump(self.config, f, default_flow_style=False)
|
|
70
|
+
except (yaml.YAMLError, OSError) as e:
|
|
71
|
+
logger.error(f"Failed to save config: {e}")
|
|
72
|
+
|
|
73
|
+
def is_cache_installed(self) -> bool:
|
|
74
|
+
"""Check if cache dependencies are installed.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
True if sentence-transformers is available, False otherwise.
|
|
78
|
+
|
|
79
|
+
"""
|
|
80
|
+
try:
|
|
81
|
+
import sentence_transformers # noqa: F401
|
|
82
|
+
import torch # noqa: F401
|
|
83
|
+
|
|
84
|
+
return True
|
|
85
|
+
except ImportError:
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
def should_prompt_cache_install(self) -> bool:
|
|
89
|
+
"""Check if we should prompt user to install cache.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
True if we should prompt, False otherwise.
|
|
93
|
+
|
|
94
|
+
"""
|
|
95
|
+
# Never prompt if already installed
|
|
96
|
+
if self.is_cache_installed():
|
|
97
|
+
return False
|
|
98
|
+
|
|
99
|
+
# Never prompt if user already declined
|
|
100
|
+
cache_config = self.config.get("cache", {})
|
|
101
|
+
if cache_config.get("install_declined", False):
|
|
102
|
+
return False
|
|
103
|
+
|
|
104
|
+
# Never prompt if prompt already shown
|
|
105
|
+
if cache_config.get("prompt_shown", False):
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
# Never prompt if user disabled prompts
|
|
109
|
+
if not cache_config.get("prompt_enabled", True):
|
|
110
|
+
return False
|
|
111
|
+
|
|
112
|
+
# Prompt on first run
|
|
113
|
+
return True
|
|
114
|
+
|
|
115
|
+
def prompt_cache_install(self) -> bool:
|
|
116
|
+
"""Prompt user to install cache dependencies.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
True if user accepted and install succeeded, False otherwise.
|
|
120
|
+
|
|
121
|
+
"""
|
|
122
|
+
print("\n" + "=" * 60)
|
|
123
|
+
print("⚡ Smart Caching Available")
|
|
124
|
+
print("=" * 60)
|
|
125
|
+
print()
|
|
126
|
+
print(" Empathy Framework can reduce your API costs by 70% with hybrid caching.")
|
|
127
|
+
print(" This requires installing sentence-transformers (~150MB).")
|
|
128
|
+
print()
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
response = (
|
|
132
|
+
input(" Would you like to enable smart caching now? [Y/n]: ").strip().lower()
|
|
133
|
+
)
|
|
134
|
+
except (EOFError, KeyboardInterrupt):
|
|
135
|
+
print("\n Skipping cache installation.")
|
|
136
|
+
response = "n"
|
|
137
|
+
|
|
138
|
+
if response in ["y", "yes", ""]:
|
|
139
|
+
return self.install_cache_dependencies()
|
|
140
|
+
else:
|
|
141
|
+
print()
|
|
142
|
+
print(" ℹ Using hash-only cache (30% savings)")
|
|
143
|
+
print(" ℹ To enable later: empathy install cache")
|
|
144
|
+
print()
|
|
145
|
+
print("=" * 60)
|
|
146
|
+
print()
|
|
147
|
+
|
|
148
|
+
# Save that user declined
|
|
149
|
+
if "cache" not in self.config:
|
|
150
|
+
self.config["cache"] = {}
|
|
151
|
+
self.config["cache"]["install_declined"] = True
|
|
152
|
+
self.config["cache"]["prompt_shown"] = True
|
|
153
|
+
self._save_config()
|
|
154
|
+
|
|
155
|
+
return False
|
|
156
|
+
|
|
157
|
+
def install_cache_dependencies(self) -> bool:
|
|
158
|
+
"""Install cache dependencies using pip.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
True if installation succeeded, False otherwise.
|
|
162
|
+
|
|
163
|
+
"""
|
|
164
|
+
print()
|
|
165
|
+
print(" ↓ Installing cache dependencies...")
|
|
166
|
+
print()
|
|
167
|
+
|
|
168
|
+
packages = [
|
|
169
|
+
"sentence-transformers>=2.0.0",
|
|
170
|
+
"torch>=2.0.0",
|
|
171
|
+
"numpy>=1.24.0",
|
|
172
|
+
]
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
# Run pip install
|
|
176
|
+
subprocess.check_call(
|
|
177
|
+
[sys.executable, "-m", "pip", "install", "--quiet"] + packages,
|
|
178
|
+
stdout=subprocess.PIPE,
|
|
179
|
+
stderr=subprocess.PIPE,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
print(" ✓ sentence-transformers installed")
|
|
183
|
+
print(" ✓ torch installed")
|
|
184
|
+
print(" ✓ numpy installed")
|
|
185
|
+
print()
|
|
186
|
+
print(" ✓ Smart caching enabled! Future runs will save 70% on costs.")
|
|
187
|
+
print()
|
|
188
|
+
print("=" * 60)
|
|
189
|
+
print()
|
|
190
|
+
|
|
191
|
+
# Mark as installed in config
|
|
192
|
+
if "cache" not in self.config:
|
|
193
|
+
self.config["cache"] = {}
|
|
194
|
+
self.config["cache"]["enabled"] = True
|
|
195
|
+
self.config["cache"]["install_declined"] = False
|
|
196
|
+
self.config["cache"]["prompt_shown"] = True
|
|
197
|
+
self._save_config()
|
|
198
|
+
|
|
199
|
+
return True
|
|
200
|
+
|
|
201
|
+
except subprocess.CalledProcessError as e:
|
|
202
|
+
print()
|
|
203
|
+
print(f" ✗ Installation failed: {e}")
|
|
204
|
+
print(" ℹ You can try manually: pip install empathy-framework[cache]")
|
|
205
|
+
print()
|
|
206
|
+
print("=" * 60)
|
|
207
|
+
print()
|
|
208
|
+
|
|
209
|
+
logger.error(f"Failed to install cache dependencies: {e}")
|
|
210
|
+
return False
|
|
211
|
+
|
|
212
|
+
def disable_prompts(self) -> None:
|
|
213
|
+
"""Disable cache installation prompts."""
|
|
214
|
+
if "cache" not in self.config:
|
|
215
|
+
self.config["cache"] = {}
|
|
216
|
+
self.config["cache"]["prompt_enabled"] = False
|
|
217
|
+
self._save_config()
|
|
218
|
+
logger.info("Cache installation prompts disabled")
|
|
219
|
+
|
|
220
|
+
def enable_prompts(self) -> None:
|
|
221
|
+
"""Re-enable cache installation prompts."""
|
|
222
|
+
if "cache" not in self.config:
|
|
223
|
+
self.config["cache"] = {}
|
|
224
|
+
self.config["cache"]["prompt_enabled"] = True
|
|
225
|
+
self.config["cache"]["prompt_shown"] = False
|
|
226
|
+
self.config["cache"]["install_declined"] = False
|
|
227
|
+
self._save_config()
|
|
228
|
+
logger.info("Cache installation prompts re-enabled")
|
|
229
|
+
|
|
230
|
+
def get_config(self) -> dict[str, Any]:
|
|
231
|
+
"""Get cache configuration.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Cache configuration dictionary.
|
|
235
|
+
|
|
236
|
+
"""
|
|
237
|
+
result = self.config.get("cache", {})
|
|
238
|
+
if not isinstance(result, dict):
|
|
239
|
+
return {}
|
|
240
|
+
return result
|
|
241
|
+
|
|
242
|
+
def set_config(self, key: str, value: Any) -> None:
|
|
243
|
+
"""Set cache configuration value.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
key: Configuration key.
|
|
247
|
+
value: Configuration value.
|
|
248
|
+
|
|
249
|
+
"""
|
|
250
|
+
if "cache" not in self.config:
|
|
251
|
+
self.config["cache"] = {}
|
|
252
|
+
self.config["cache"][key] = value
|
|
253
|
+
self._save_config()
|