empathy-framework 3.7.0__py3-none-any.whl → 3.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coach_wizards/code_reviewer_README.md +60 -0
- coach_wizards/code_reviewer_wizard.py +180 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/METADATA +148 -11
- empathy_framework-3.8.0.dist-info/RECORD +333 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/top_level.txt +5 -1
- empathy_healthcare_plugin/monitors/__init__.py +9 -0
- empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
- empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
- empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
- empathy_llm_toolkit/agent_factory/__init__.py +53 -0
- empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
- empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
- empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
- empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
- empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
- empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
- empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
- empathy_llm_toolkit/agent_factory/base.py +305 -0
- empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
- empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
- empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
- empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
- empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
- empathy_llm_toolkit/agent_factory/decorators.py +286 -0
- empathy_llm_toolkit/agent_factory/factory.py +558 -0
- empathy_llm_toolkit/agent_factory/framework.py +192 -0
- empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
- empathy_llm_toolkit/agent_factory/resilient.py +320 -0
- empathy_llm_toolkit/cli/__init__.py +8 -0
- empathy_llm_toolkit/cli/sync_claude.py +487 -0
- empathy_llm_toolkit/code_health.py +150 -3
- empathy_llm_toolkit/config/__init__.py +29 -0
- empathy_llm_toolkit/config/unified.py +295 -0
- empathy_llm_toolkit/routing/__init__.py +32 -0
- empathy_llm_toolkit/routing/model_router.py +362 -0
- empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
- empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
- empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
- empathy_llm_toolkit/security/README.md +262 -0
- empathy_llm_toolkit/security/__init__.py +62 -0
- empathy_llm_toolkit/security/audit_logger.py +929 -0
- empathy_llm_toolkit/security/audit_logger_example.py +152 -0
- empathy_llm_toolkit/security/pii_scrubber.py +640 -0
- empathy_llm_toolkit/security/secrets_detector.py +678 -0
- empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
- empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
- empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
- empathy_llm_toolkit/wizards/__init__.py +38 -0
- empathy_llm_toolkit/wizards/base_wizard.py +364 -0
- empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
- empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
- empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
- empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
- empathy_os/__init__.py +52 -52
- empathy_os/adaptive/__init__.py +13 -0
- empathy_os/adaptive/task_complexity.py +127 -0
- empathy_os/cache/__init__.py +117 -0
- empathy_os/cache/base.py +166 -0
- empathy_os/cache/dependency_manager.py +253 -0
- empathy_os/cache/hash_only.py +248 -0
- empathy_os/cache/hybrid.py +390 -0
- empathy_os/cache/storage.py +282 -0
- empathy_os/cli.py +118 -8
- empathy_os/cli_unified.py +121 -1
- empathy_os/config/__init__.py +63 -0
- empathy_os/config/xml_config.py +239 -0
- empathy_os/config.py +2 -1
- empathy_os/dashboard/__init__.py +15 -0
- empathy_os/dashboard/server.py +743 -0
- empathy_os/memory/__init__.py +195 -0
- empathy_os/memory/claude_memory.py +466 -0
- empathy_os/memory/config.py +224 -0
- empathy_os/memory/control_panel.py +1298 -0
- empathy_os/memory/edges.py +179 -0
- empathy_os/memory/graph.py +567 -0
- empathy_os/memory/long_term.py +1194 -0
- empathy_os/memory/nodes.py +179 -0
- empathy_os/memory/redis_bootstrap.py +540 -0
- empathy_os/memory/security/__init__.py +31 -0
- empathy_os/memory/security/audit_logger.py +930 -0
- empathy_os/memory/security/pii_scrubber.py +640 -0
- empathy_os/memory/security/secrets_detector.py +678 -0
- empathy_os/memory/short_term.py +2119 -0
- empathy_os/memory/storage/__init__.py +15 -0
- empathy_os/memory/summary_index.py +583 -0
- empathy_os/memory/unified.py +619 -0
- empathy_os/metrics/__init__.py +12 -0
- empathy_os/metrics/prompt_metrics.py +190 -0
- empathy_os/models/__init__.py +136 -0
- empathy_os/models/__main__.py +13 -0
- empathy_os/models/cli.py +655 -0
- empathy_os/models/empathy_executor.py +354 -0
- empathy_os/models/executor.py +252 -0
- empathy_os/models/fallback.py +671 -0
- empathy_os/models/provider_config.py +563 -0
- empathy_os/models/registry.py +382 -0
- empathy_os/models/tasks.py +302 -0
- empathy_os/models/telemetry.py +548 -0
- empathy_os/models/token_estimator.py +378 -0
- empathy_os/models/validation.py +274 -0
- empathy_os/monitoring/__init__.py +52 -0
- empathy_os/monitoring/alerts.py +23 -0
- empathy_os/monitoring/alerts_cli.py +268 -0
- empathy_os/monitoring/multi_backend.py +271 -0
- empathy_os/monitoring/otel_backend.py +363 -0
- empathy_os/optimization/__init__.py +19 -0
- empathy_os/optimization/context_optimizer.py +272 -0
- empathy_os/plugins/__init__.py +28 -0
- empathy_os/plugins/base.py +361 -0
- empathy_os/plugins/registry.py +268 -0
- empathy_os/project_index/__init__.py +30 -0
- empathy_os/project_index/cli.py +335 -0
- empathy_os/project_index/crew_integration.py +430 -0
- empathy_os/project_index/index.py +425 -0
- empathy_os/project_index/models.py +501 -0
- empathy_os/project_index/reports.py +473 -0
- empathy_os/project_index/scanner.py +538 -0
- empathy_os/prompts/__init__.py +61 -0
- empathy_os/prompts/config.py +77 -0
- empathy_os/prompts/context.py +177 -0
- empathy_os/prompts/parser.py +285 -0
- empathy_os/prompts/registry.py +313 -0
- empathy_os/prompts/templates.py +208 -0
- empathy_os/resilience/__init__.py +56 -0
- empathy_os/resilience/circuit_breaker.py +256 -0
- empathy_os/resilience/fallback.py +179 -0
- empathy_os/resilience/health.py +300 -0
- empathy_os/resilience/retry.py +209 -0
- empathy_os/resilience/timeout.py +135 -0
- empathy_os/routing/__init__.py +43 -0
- empathy_os/routing/chain_executor.py +433 -0
- empathy_os/routing/classifier.py +217 -0
- empathy_os/routing/smart_router.py +234 -0
- empathy_os/routing/wizard_registry.py +307 -0
- empathy_os/trust/__init__.py +28 -0
- empathy_os/trust/circuit_breaker.py +579 -0
- empathy_os/validation/__init__.py +19 -0
- empathy_os/validation/xml_validator.py +281 -0
- empathy_os/wizard_factory_cli.py +170 -0
- empathy_os/workflows/__init__.py +360 -0
- empathy_os/workflows/base.py +1660 -0
- empathy_os/workflows/bug_predict.py +962 -0
- empathy_os/workflows/code_review.py +960 -0
- empathy_os/workflows/code_review_adapters.py +310 -0
- empathy_os/workflows/code_review_pipeline.py +720 -0
- empathy_os/workflows/config.py +600 -0
- empathy_os/workflows/dependency_check.py +648 -0
- empathy_os/workflows/document_gen.py +1069 -0
- empathy_os/workflows/documentation_orchestrator.py +1205 -0
- empathy_os/workflows/health_check.py +679 -0
- empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
- empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
- empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
- empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
- empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
- empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
- empathy_os/workflows/manage_documentation.py +804 -0
- empathy_os/workflows/new_sample_workflow1.py +146 -0
- empathy_os/workflows/new_sample_workflow1_README.md +150 -0
- empathy_os/workflows/perf_audit.py +687 -0
- empathy_os/workflows/pr_review.py +748 -0
- empathy_os/workflows/progress.py +445 -0
- empathy_os/workflows/progress_server.py +322 -0
- empathy_os/workflows/refactor_plan.py +693 -0
- empathy_os/workflows/release_prep.py +808 -0
- empathy_os/workflows/research_synthesis.py +404 -0
- empathy_os/workflows/secure_release.py +585 -0
- empathy_os/workflows/security_adapters.py +297 -0
- empathy_os/workflows/security_audit.py +1046 -0
- empathy_os/workflows/step_config.py +234 -0
- empathy_os/workflows/test5.py +125 -0
- empathy_os/workflows/test5_README.md +158 -0
- empathy_os/workflows/test_gen.py +1855 -0
- empathy_os/workflows/test_lifecycle.py +526 -0
- empathy_os/workflows/test_maintenance.py +626 -0
- empathy_os/workflows/test_maintenance_cli.py +590 -0
- empathy_os/workflows/test_maintenance_crew.py +821 -0
- empathy_os/workflows/xml_enhanced_crew.py +285 -0
- empathy_software_plugin/cli/__init__.py +120 -0
- empathy_software_plugin/cli/inspect.py +362 -0
- empathy_software_plugin/cli.py +3 -1
- empathy_software_plugin/wizards/__init__.py +42 -0
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
- empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
- empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
- empathy_software_plugin/wizards/base_wizard.py +288 -0
- empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
- empathy_software_plugin/wizards/code_review_wizard.py +606 -0
- empathy_software_plugin/wizards/debugging/__init__.py +50 -0
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
- empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
- empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
- empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
- empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
- empathy_software_plugin/wizards/debugging/verification.py +369 -0
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
- empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
- empathy_software_plugin/wizards/performance/__init__.py +9 -0
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
- empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
- empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
- empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
- empathy_software_plugin/wizards/security/__init__.py +32 -0
- empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
- empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
- empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
- empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
- empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
- empathy_software_plugin/wizards/testing/__init__.py +27 -0
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
- empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
- empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
- empathy_software_plugin/wizards/testing_wizard.py +274 -0
- hot_reload/README.md +473 -0
- hot_reload/__init__.py +62 -0
- hot_reload/config.py +84 -0
- hot_reload/integration.py +228 -0
- hot_reload/reloader.py +298 -0
- hot_reload/watcher.py +179 -0
- hot_reload/websocket.py +176 -0
- scaffolding/README.md +589 -0
- scaffolding/__init__.py +35 -0
- scaffolding/__main__.py +14 -0
- scaffolding/cli.py +240 -0
- test_generator/__init__.py +38 -0
- test_generator/__main__.py +14 -0
- test_generator/cli.py +226 -0
- test_generator/generator.py +325 -0
- test_generator/risk_analyzer.py +216 -0
- workflow_patterns/__init__.py +33 -0
- workflow_patterns/behavior.py +249 -0
- workflow_patterns/core.py +76 -0
- workflow_patterns/output.py +99 -0
- workflow_patterns/registry.py +255 -0
- workflow_patterns/structural.py +288 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
- agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
- agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
- agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
- agents/compliance_anticipation_agent.py +0 -1422
- agents/compliance_db.py +0 -339
- agents/epic_integration_wizard.py +0 -530
- agents/notifications.py +0 -291
- agents/trust_building_behaviors.py +0 -872
- empathy_framework-3.7.0.dist-info/RECORD +0 -105
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/licenses/LICENSE +0 -0
- /empathy_os/{monitoring.py → agent_monitoring.py} +0 -0
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
"""OpenTelemetry Backend for LLM Telemetry
|
|
2
|
+
|
|
3
|
+
Exports telemetry data to OTEL-compatible collectors (SigNoz, Datadog, New Relic).
|
|
4
|
+
|
|
5
|
+
**Features:**
|
|
6
|
+
- Auto-detection of OTEL collector (localhost:4317)
|
|
7
|
+
- Environment variable configuration (EMPATHY_OTEL_ENDPOINT)
|
|
8
|
+
- Semantic conventions for LLM traces
|
|
9
|
+
- Batch export with retry logic
|
|
10
|
+
- Graceful fallback if collector unavailable
|
|
11
|
+
|
|
12
|
+
**Setup:**
|
|
13
|
+
```bash
|
|
14
|
+
export EMPATHY_OTEL_ENDPOINT=http://localhost:4317
|
|
15
|
+
pip install empathy-framework[otel]
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
Copyright 2025 Smart-AI-Memory
|
|
19
|
+
Licensed under Fair Source License 0.9
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import os
|
|
23
|
+
import socket
|
|
24
|
+
|
|
25
|
+
from empathy_os.models.telemetry import LLMCallRecord, WorkflowRunRecord
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class OTELBackend:
|
|
29
|
+
"""OpenTelemetry backend for exporting telemetry to OTEL collectors.
|
|
30
|
+
|
|
31
|
+
Implements the TelemetryBackend protocol for OTEL export.
|
|
32
|
+
|
|
33
|
+
**Auto-detection:**
|
|
34
|
+
- Checks for OTEL collector on localhost:4317
|
|
35
|
+
- Falls back to EMPATHY_OTEL_ENDPOINT environment variable
|
|
36
|
+
|
|
37
|
+
**Semantic Conventions:**
|
|
38
|
+
- LLM calls → OTEL spans with llm.* attributes
|
|
39
|
+
- Workflows → OTEL traces with workflow.* attributes
|
|
40
|
+
|
|
41
|
+
**Batch Export:**
|
|
42
|
+
- Buffers records and exports in batches
|
|
43
|
+
- Retries on transient failures
|
|
44
|
+
- Logs errors but doesn't crash
|
|
45
|
+
|
|
46
|
+
Example:
|
|
47
|
+
>>> backend = OTELBackend()
|
|
48
|
+
>>> if backend.is_available():
|
|
49
|
+
... backend.log_call(call_record)
|
|
50
|
+
... backend.log_workflow(workflow_record)
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
endpoint: str | None = None,
|
|
56
|
+
batch_size: int = 10,
|
|
57
|
+
retry_count: int = 3,
|
|
58
|
+
):
|
|
59
|
+
"""Initialize OTEL backend.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
endpoint: OTEL collector endpoint (default: auto-detect)
|
|
63
|
+
batch_size: Number of records to buffer before export
|
|
64
|
+
retry_count: Number of retries on transient failures
|
|
65
|
+
"""
|
|
66
|
+
self.endpoint = endpoint or self._detect_endpoint()
|
|
67
|
+
self.batch_size = batch_size
|
|
68
|
+
self.retry_count = retry_count
|
|
69
|
+
self.call_buffer: list[LLMCallRecord] = []
|
|
70
|
+
self.workflow_buffer: list[WorkflowRunRecord] = []
|
|
71
|
+
self._available = self._check_availability()
|
|
72
|
+
|
|
73
|
+
# Try importing OTEL dependencies
|
|
74
|
+
self._otel_available = self._check_otel_installed()
|
|
75
|
+
|
|
76
|
+
if self._otel_available and self._available:
|
|
77
|
+
self._init_otel()
|
|
78
|
+
|
|
79
|
+
def _detect_endpoint(self) -> str:
|
|
80
|
+
"""Detect OTEL collector endpoint.
|
|
81
|
+
|
|
82
|
+
Checks (in order):
|
|
83
|
+
1. EMPATHY_OTEL_ENDPOINT environment variable
|
|
84
|
+
2. localhost:4317 (default OTEL gRPC port)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
OTEL collector endpoint URL
|
|
88
|
+
"""
|
|
89
|
+
# Check environment variable
|
|
90
|
+
endpoint = os.getenv("EMPATHY_OTEL_ENDPOINT")
|
|
91
|
+
if endpoint:
|
|
92
|
+
return endpoint
|
|
93
|
+
|
|
94
|
+
# Check localhost:4317
|
|
95
|
+
if self._is_port_open("localhost", 4317):
|
|
96
|
+
return "http://localhost:4317"
|
|
97
|
+
|
|
98
|
+
# Default (will fail availability check)
|
|
99
|
+
return "http://localhost:4317"
|
|
100
|
+
|
|
101
|
+
def _is_port_open(self, host: str, port: int, timeout: float = 1.0) -> bool:
|
|
102
|
+
"""Check if a port is open on a host.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
host: Hostname or IP address
|
|
106
|
+
port: Port number
|
|
107
|
+
timeout: Connection timeout in seconds
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
True if port is open, False otherwise
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
114
|
+
sock.settimeout(timeout)
|
|
115
|
+
sock.connect((host, port))
|
|
116
|
+
sock.close()
|
|
117
|
+
return True
|
|
118
|
+
except (TimeoutError, OSError):
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
def _check_availability(self) -> bool:
|
|
122
|
+
"""Check if OTEL collector is available.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
True if collector is reachable, False otherwise
|
|
126
|
+
"""
|
|
127
|
+
if not self.endpoint:
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
# Parse endpoint to extract host and port
|
|
131
|
+
try:
|
|
132
|
+
# Remove http:// or https://
|
|
133
|
+
endpoint = self.endpoint.replace("http://", "").replace("https://", "")
|
|
134
|
+
if ":" in endpoint:
|
|
135
|
+
host, port_str = endpoint.split(":")
|
|
136
|
+
port = int(port_str.split("/")[0]) # Remove any path
|
|
137
|
+
else:
|
|
138
|
+
host = endpoint
|
|
139
|
+
port = 4317 # Default OTEL gRPC port
|
|
140
|
+
|
|
141
|
+
return self._is_port_open(host, port)
|
|
142
|
+
except (ValueError, IndexError):
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
def _check_otel_installed(self) -> bool:
|
|
146
|
+
"""Check if OTEL dependencies are installed.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
True if opentelemetry-api and opentelemetry-sdk are installed
|
|
150
|
+
"""
|
|
151
|
+
try:
|
|
152
|
+
import opentelemetry.trace # noqa: F401
|
|
153
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
|
|
154
|
+
OTLPSpanExporter, # noqa: F401
|
|
155
|
+
)
|
|
156
|
+
from opentelemetry.sdk.trace import TracerProvider # noqa: F401
|
|
157
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor # noqa: F401
|
|
158
|
+
|
|
159
|
+
return True
|
|
160
|
+
except ImportError:
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
def _init_otel(self) -> None:
|
|
164
|
+
"""Initialize OTEL tracer and exporter."""
|
|
165
|
+
try:
|
|
166
|
+
from opentelemetry import trace
|
|
167
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
|
168
|
+
from opentelemetry.sdk.resources import Resource
|
|
169
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
170
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
171
|
+
|
|
172
|
+
# Create resource with service name
|
|
173
|
+
resource = Resource.create(
|
|
174
|
+
{
|
|
175
|
+
"service.name": "empathy-framework",
|
|
176
|
+
"service.version": "3.8.0-alpha",
|
|
177
|
+
}
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
# Create tracer provider
|
|
181
|
+
provider = TracerProvider(resource=resource)
|
|
182
|
+
|
|
183
|
+
# Create OTLP exporter
|
|
184
|
+
exporter = OTLPSpanExporter(endpoint=self.endpoint, insecure=True)
|
|
185
|
+
|
|
186
|
+
# Add batch processor
|
|
187
|
+
processor = BatchSpanProcessor(exporter)
|
|
188
|
+
provider.add_span_processor(processor)
|
|
189
|
+
|
|
190
|
+
# Set global tracer provider
|
|
191
|
+
trace.set_tracer_provider(provider)
|
|
192
|
+
|
|
193
|
+
# Get tracer
|
|
194
|
+
self.tracer = trace.get_tracer("empathy.llm.telemetry", "3.8.0-alpha")
|
|
195
|
+
|
|
196
|
+
except Exception as e:
|
|
197
|
+
print(f"⚠️ Failed to initialize OTEL: {e}")
|
|
198
|
+
self._available = False
|
|
199
|
+
|
|
200
|
+
def is_available(self) -> bool:
|
|
201
|
+
"""Check if OTEL backend is available.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
True if OTEL collector is reachable and dependencies installed
|
|
205
|
+
"""
|
|
206
|
+
return self._available and self._otel_available
|
|
207
|
+
|
|
208
|
+
def log_call(self, record: LLMCallRecord) -> None:
|
|
209
|
+
"""Log an LLM call record to OTEL.
|
|
210
|
+
|
|
211
|
+
Creates an OTEL span with LLM semantic conventions.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
record: LLM call record to log
|
|
215
|
+
"""
|
|
216
|
+
if not self.is_available():
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
# Create span with LLM semantic conventions
|
|
221
|
+
with self.tracer.start_as_current_span(
|
|
222
|
+
f"llm.{record.provider}.{record.model_id}"
|
|
223
|
+
) as span:
|
|
224
|
+
# Set standard LLM attributes
|
|
225
|
+
span.set_attribute("llm.provider", record.provider)
|
|
226
|
+
span.set_attribute("llm.model", record.model_id)
|
|
227
|
+
span.set_attribute("llm.tier", record.tier)
|
|
228
|
+
span.set_attribute("llm.task_type", record.task_type)
|
|
229
|
+
|
|
230
|
+
# Set token usage
|
|
231
|
+
span.set_attribute("llm.usage.input_tokens", record.input_tokens)
|
|
232
|
+
span.set_attribute("llm.usage.output_tokens", record.output_tokens)
|
|
233
|
+
span.set_attribute(
|
|
234
|
+
"llm.usage.total_tokens", record.input_tokens + record.output_tokens
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Set cost and latency
|
|
238
|
+
span.set_attribute("llm.cost.estimated", record.estimated_cost)
|
|
239
|
+
if record.actual_cost:
|
|
240
|
+
span.set_attribute("llm.cost.actual", record.actual_cost)
|
|
241
|
+
span.set_attribute("llm.latency_ms", record.latency_ms)
|
|
242
|
+
|
|
243
|
+
# Set workflow context
|
|
244
|
+
if record.workflow_name:
|
|
245
|
+
span.set_attribute("workflow.name", record.workflow_name)
|
|
246
|
+
if record.step_name:
|
|
247
|
+
span.set_attribute("workflow.step", record.step_name)
|
|
248
|
+
if record.session_id:
|
|
249
|
+
span.set_attribute("session.id", record.session_id)
|
|
250
|
+
|
|
251
|
+
# Set fallback info
|
|
252
|
+
if record.fallback_used:
|
|
253
|
+
span.set_attribute("llm.fallback.used", True)
|
|
254
|
+
if record.original_provider:
|
|
255
|
+
span.set_attribute(
|
|
256
|
+
"llm.fallback.original_provider", record.original_provider
|
|
257
|
+
)
|
|
258
|
+
if record.original_model:
|
|
259
|
+
span.set_attribute("llm.fallback.original_model", record.original_model)
|
|
260
|
+
|
|
261
|
+
# Set error info
|
|
262
|
+
if not record.success:
|
|
263
|
+
span.set_attribute("llm.error", True)
|
|
264
|
+
if record.error_type:
|
|
265
|
+
span.set_attribute("llm.error.type", record.error_type)
|
|
266
|
+
if record.error_message:
|
|
267
|
+
span.set_attribute("llm.error.message", record.error_message)
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
print(f"⚠️ Failed to export LLM call to OTEL: {e}")
|
|
271
|
+
|
|
272
|
+
def log_workflow(self, record: WorkflowRunRecord) -> None:
|
|
273
|
+
"""Log a workflow run record to OTEL.
|
|
274
|
+
|
|
275
|
+
Creates an OTEL trace with workflow semantic conventions.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
record: Workflow run record to log
|
|
279
|
+
"""
|
|
280
|
+
if not self.is_available():
|
|
281
|
+
return
|
|
282
|
+
|
|
283
|
+
try:
|
|
284
|
+
# Create trace for workflow
|
|
285
|
+
with self.tracer.start_as_current_span(f"workflow.{record.workflow_name}") as span:
|
|
286
|
+
# Set workflow attributes
|
|
287
|
+
span.set_attribute("workflow.name", record.workflow_name)
|
|
288
|
+
span.set_attribute("workflow.run_id", record.run_id)
|
|
289
|
+
if record.session_id:
|
|
290
|
+
span.set_attribute("session.id", record.session_id)
|
|
291
|
+
|
|
292
|
+
# Set token usage
|
|
293
|
+
span.set_attribute("workflow.usage.input_tokens", record.total_input_tokens)
|
|
294
|
+
span.set_attribute("workflow.usage.output_tokens", record.total_output_tokens)
|
|
295
|
+
span.set_attribute(
|
|
296
|
+
"workflow.usage.total_tokens",
|
|
297
|
+
record.total_input_tokens + record.total_output_tokens,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
# Set cost and savings
|
|
301
|
+
span.set_attribute("workflow.cost.total", record.total_cost)
|
|
302
|
+
span.set_attribute("workflow.cost.baseline", record.baseline_cost)
|
|
303
|
+
span.set_attribute("workflow.cost.savings", record.savings)
|
|
304
|
+
span.set_attribute("workflow.cost.savings_percent", record.savings_percent)
|
|
305
|
+
|
|
306
|
+
# Set duration
|
|
307
|
+
span.set_attribute("workflow.duration_ms", record.total_duration_ms)
|
|
308
|
+
|
|
309
|
+
# Set providers and tiers used
|
|
310
|
+
span.set_attribute("workflow.providers_used", ",".join(record.providers_used))
|
|
311
|
+
span.set_attribute("workflow.tiers_used", ",".join(record.tiers_used))
|
|
312
|
+
|
|
313
|
+
# Set success status
|
|
314
|
+
span.set_attribute("workflow.success", record.success)
|
|
315
|
+
if not record.success and record.error:
|
|
316
|
+
span.set_attribute("workflow.error", record.error)
|
|
317
|
+
|
|
318
|
+
# Create child spans for each stage
|
|
319
|
+
for stage in record.stages:
|
|
320
|
+
with self.tracer.start_as_current_span(
|
|
321
|
+
f"stage.{stage.stage_name}"
|
|
322
|
+
) as stage_span:
|
|
323
|
+
stage_span.set_attribute("stage.name", stage.stage_name)
|
|
324
|
+
stage_span.set_attribute("llm.tier", stage.tier)
|
|
325
|
+
stage_span.set_attribute("llm.model", stage.model_id)
|
|
326
|
+
stage_span.set_attribute("llm.usage.input_tokens", stage.input_tokens)
|
|
327
|
+
stage_span.set_attribute("llm.usage.output_tokens", stage.output_tokens)
|
|
328
|
+
stage_span.set_attribute("llm.cost", stage.cost)
|
|
329
|
+
stage_span.set_attribute("llm.latency_ms", stage.latency_ms)
|
|
330
|
+
stage_span.set_attribute("stage.success", stage.success)
|
|
331
|
+
|
|
332
|
+
if stage.skipped:
|
|
333
|
+
stage_span.set_attribute("stage.skipped", True)
|
|
334
|
+
if stage.skip_reason:
|
|
335
|
+
stage_span.set_attribute("stage.skip_reason", stage.skip_reason)
|
|
336
|
+
|
|
337
|
+
if stage.error:
|
|
338
|
+
stage_span.set_attribute("stage.error", stage.error)
|
|
339
|
+
|
|
340
|
+
except Exception as e:
|
|
341
|
+
print(f"⚠️ Failed to export workflow to OTEL: {e}")
|
|
342
|
+
|
|
343
|
+
def flush(self) -> None:
|
|
344
|
+
"""Flush any buffered records to OTEL collector.
|
|
345
|
+
|
|
346
|
+
Called automatically on shutdown or can be called manually.
|
|
347
|
+
"""
|
|
348
|
+
if not self.is_available():
|
|
349
|
+
return
|
|
350
|
+
|
|
351
|
+
try:
|
|
352
|
+
from opentelemetry import trace
|
|
353
|
+
|
|
354
|
+
# Get tracer provider and force flush
|
|
355
|
+
provider = trace.get_tracer_provider()
|
|
356
|
+
if hasattr(provider, "force_flush"):
|
|
357
|
+
provider.force_flush()
|
|
358
|
+
except Exception as e:
|
|
359
|
+
print(f"⚠️ Failed to flush OTEL data: {e}")
|
|
360
|
+
|
|
361
|
+
def __del__(self) -> None:
|
|
362
|
+
"""Cleanup on deletion."""
|
|
363
|
+
self.flush()
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Context window optimization for XML-enhanced prompts.
|
|
2
|
+
|
|
3
|
+
Provides compression and optimization to reduce token usage by 20-30%.
|
|
4
|
+
|
|
5
|
+
Copyright 2026 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from empathy_os.optimization.context_optimizer import (
|
|
10
|
+
CompressionLevel,
|
|
11
|
+
ContextOptimizer,
|
|
12
|
+
optimize_xml_prompt,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"CompressionLevel",
|
|
17
|
+
"ContextOptimizer",
|
|
18
|
+
"optimize_xml_prompt",
|
|
19
|
+
]
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
"""Context window optimization for reducing token usage.
|
|
2
|
+
|
|
3
|
+
Implements compression strategies to reduce prompt tokens by 20-30%
|
|
4
|
+
while maintaining semantic content and XML structure.
|
|
5
|
+
|
|
6
|
+
Copyright 2026 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
from enum import Enum
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CompressionLevel(Enum):
|
|
15
|
+
"""Context window compression levels."""
|
|
16
|
+
|
|
17
|
+
NONE = "none" # No compression
|
|
18
|
+
LIGHT = "light" # Minimal compression (5-10% reduction)
|
|
19
|
+
MODERATE = "moderate" # Balanced compression (15-25% reduction)
|
|
20
|
+
AGGRESSIVE = "aggressive" # Maximum compression (25-35% reduction)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ContextOptimizer:
|
|
24
|
+
"""Optimizes XML prompts to reduce token usage.
|
|
25
|
+
|
|
26
|
+
Strategies:
|
|
27
|
+
1. Tag compression: <thinking> → <t>, <answer> → <a>
|
|
28
|
+
2. Whitespace optimization: Remove excess whitespace
|
|
29
|
+
3. Comment removal: Strip XML comments
|
|
30
|
+
4. Redundancy elimination: Remove duplicate text
|
|
31
|
+
|
|
32
|
+
Usage:
|
|
33
|
+
optimizer = ContextOptimizer(CompressionLevel.MODERATE)
|
|
34
|
+
optimized = optimizer.optimize(xml_prompt)
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, level: CompressionLevel = CompressionLevel.MODERATE):
|
|
38
|
+
"""Initialize optimizer with compression level.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
level: Compression level to apply
|
|
42
|
+
"""
|
|
43
|
+
self.level = level
|
|
44
|
+
|
|
45
|
+
# Tag compression mappings
|
|
46
|
+
self._tag_mappings = {
|
|
47
|
+
# Core structure tags
|
|
48
|
+
"thinking": "t",
|
|
49
|
+
"answer": "a",
|
|
50
|
+
"agent_role": "role",
|
|
51
|
+
"agent_goal": "goal",
|
|
52
|
+
"agent_backstory": "back",
|
|
53
|
+
# Common output tags
|
|
54
|
+
"description": "desc",
|
|
55
|
+
"recommendation": "rec",
|
|
56
|
+
"assessment": "assess",
|
|
57
|
+
"analysis": "analyze",
|
|
58
|
+
"explanation": "explain",
|
|
59
|
+
"example": "ex",
|
|
60
|
+
"code_review": "review",
|
|
61
|
+
"security_issue": "sec",
|
|
62
|
+
"performance_issue": "perf",
|
|
63
|
+
"architecture": "arch",
|
|
64
|
+
"implementation": "impl",
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
# Reverse mapping for decompression
|
|
68
|
+
self._reverse_mappings = {v: k for k, v in self._tag_mappings.items()}
|
|
69
|
+
|
|
70
|
+
def optimize(self, xml_prompt: str) -> str:
|
|
71
|
+
"""Optimize XML prompt to reduce token usage.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
xml_prompt: Original XML-structured prompt
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Optimized prompt with reduced token count
|
|
78
|
+
"""
|
|
79
|
+
if self.level == CompressionLevel.NONE:
|
|
80
|
+
return xml_prompt
|
|
81
|
+
|
|
82
|
+
optimized = xml_prompt
|
|
83
|
+
|
|
84
|
+
# Apply optimizations based on level
|
|
85
|
+
if self.level in (
|
|
86
|
+
CompressionLevel.LIGHT,
|
|
87
|
+
CompressionLevel.MODERATE,
|
|
88
|
+
CompressionLevel.AGGRESSIVE,
|
|
89
|
+
):
|
|
90
|
+
optimized = self._strip_whitespace(optimized)
|
|
91
|
+
optimized = self._remove_comments(optimized)
|
|
92
|
+
|
|
93
|
+
if self.level in (CompressionLevel.MODERATE, CompressionLevel.AGGRESSIVE):
|
|
94
|
+
optimized = self._compress_tags(optimized)
|
|
95
|
+
optimized = self._remove_redundancy(optimized)
|
|
96
|
+
|
|
97
|
+
if self.level == CompressionLevel.AGGRESSIVE:
|
|
98
|
+
optimized = self._aggressive_compression(optimized)
|
|
99
|
+
|
|
100
|
+
return optimized
|
|
101
|
+
|
|
102
|
+
def decompress(self, compressed_response: str) -> str:
|
|
103
|
+
"""Decompress response to restore original tag names.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
compressed_response: Response with compressed tags
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
Response with full tag names restored
|
|
110
|
+
"""
|
|
111
|
+
decompressed = compressed_response
|
|
112
|
+
|
|
113
|
+
# Restore full tag names
|
|
114
|
+
for short, full in self._reverse_mappings.items():
|
|
115
|
+
# Opening tags
|
|
116
|
+
decompressed = decompressed.replace(f"<{short}>", f"<{full}>")
|
|
117
|
+
decompressed = decompressed.replace(f"<{short} ", f"<{full} ")
|
|
118
|
+
# Closing tags
|
|
119
|
+
decompressed = decompressed.replace(f"</{short}>", f"</{full}>")
|
|
120
|
+
|
|
121
|
+
return decompressed
|
|
122
|
+
|
|
123
|
+
def _strip_whitespace(self, text: str) -> str:
|
|
124
|
+
"""Remove excess whitespace while preserving structure.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
text: Input text
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Text with optimized whitespace
|
|
131
|
+
"""
|
|
132
|
+
# Replace multiple spaces with single space
|
|
133
|
+
text = re.sub(r" {2,}", " ", text)
|
|
134
|
+
|
|
135
|
+
# Remove spaces around XML tags
|
|
136
|
+
text = re.sub(r">\s+<", "><", text)
|
|
137
|
+
|
|
138
|
+
# Remove leading/trailing whitespace from lines
|
|
139
|
+
lines = [line.strip() for line in text.split("\n")]
|
|
140
|
+
|
|
141
|
+
# Remove empty lines
|
|
142
|
+
lines = [line for line in lines if line]
|
|
143
|
+
|
|
144
|
+
return "\n".join(lines)
|
|
145
|
+
|
|
146
|
+
def _remove_comments(self, text: str) -> str:
|
|
147
|
+
"""Remove XML comments.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
text: Input text
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
Text without comments
|
|
154
|
+
"""
|
|
155
|
+
# Remove XML comments
|
|
156
|
+
text = re.sub(r"<!--.*?-->", "", text, flags=re.DOTALL)
|
|
157
|
+
|
|
158
|
+
return text
|
|
159
|
+
|
|
160
|
+
def _compress_tags(self, text: str) -> str:
|
|
161
|
+
"""Compress XML tag names to shorter versions.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
text: Input text
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Text with compressed tags
|
|
168
|
+
"""
|
|
169
|
+
for full, short in self._tag_mappings.items():
|
|
170
|
+
# Opening tags
|
|
171
|
+
text = text.replace(f"<{full}>", f"<{short}>")
|
|
172
|
+
text = text.replace(f"<{full} ", f"<{short} ")
|
|
173
|
+
# Closing tags
|
|
174
|
+
text = text.replace(f"</{full}>", f"</{short}>")
|
|
175
|
+
|
|
176
|
+
return text
|
|
177
|
+
|
|
178
|
+
def _remove_redundancy(self, text: str) -> str:
|
|
179
|
+
"""Remove redundant phrases and repetition.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
text: Input text
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Text with redundancy removed
|
|
186
|
+
"""
|
|
187
|
+
# Common redundant phrases in prompts
|
|
188
|
+
redundant_phrases = [
|
|
189
|
+
"Please note that ",
|
|
190
|
+
"It is important to ",
|
|
191
|
+
"Make sure to ",
|
|
192
|
+
"Be sure to ",
|
|
193
|
+
"Remember to ",
|
|
194
|
+
"Don't forget to ",
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
for phrase in redundant_phrases:
|
|
198
|
+
text = text.replace(phrase, "")
|
|
199
|
+
|
|
200
|
+
return text
|
|
201
|
+
|
|
202
|
+
def _aggressive_compression(self, text: str) -> str:
|
|
203
|
+
"""Apply aggressive compression techniques.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
text: Input text
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Aggressively compressed text
|
|
210
|
+
"""
|
|
211
|
+
# Remove articles (a, an, the) where they don't affect meaning
|
|
212
|
+
# Only in instruction text, not in code or structured output
|
|
213
|
+
lines = []
|
|
214
|
+
for line in text.split("\n"):
|
|
215
|
+
# Skip lines that look like code or XML content
|
|
216
|
+
if "<" in line and ">" in line:
|
|
217
|
+
lines.append(line)
|
|
218
|
+
else:
|
|
219
|
+
# Remove articles from instruction text
|
|
220
|
+
line = re.sub(r"\b(a|an|the)\b\s+", "", line, flags=re.IGNORECASE)
|
|
221
|
+
lines.append(line)
|
|
222
|
+
|
|
223
|
+
text = "\n".join(lines)
|
|
224
|
+
|
|
225
|
+
# Abbreviate common instruction words
|
|
226
|
+
abbreviations = {
|
|
227
|
+
"Generate": "Gen",
|
|
228
|
+
"Analyze": "Analyze", # Keep as is (short)
|
|
229
|
+
"Provide": "Give",
|
|
230
|
+
"Identify": "ID",
|
|
231
|
+
"Determine": "Find",
|
|
232
|
+
"Evaluate": "Eval",
|
|
233
|
+
"Recommend": "Rec",
|
|
234
|
+
"Implement": "Impl",
|
|
235
|
+
"following": "below",
|
|
236
|
+
"should be": "is",
|
|
237
|
+
"you should": "you",
|
|
238
|
+
"must be": "is",
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
for full, abbrev in abbreviations.items():
|
|
242
|
+
text = text.replace(full, abbrev)
|
|
243
|
+
|
|
244
|
+
return text
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def optimize_xml_prompt(
|
|
248
|
+
prompt: str,
|
|
249
|
+
level: CompressionLevel = CompressionLevel.MODERATE,
|
|
250
|
+
) -> str:
|
|
251
|
+
"""Convenience function to optimize XML prompt.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
prompt: XML-structured prompt to optimize
|
|
255
|
+
level: Compression level to apply
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Optimized prompt
|
|
259
|
+
|
|
260
|
+
Example:
|
|
261
|
+
>>> prompt = '''<thinking>
|
|
262
|
+
... Analyze the code carefully
|
|
263
|
+
... </thinking>
|
|
264
|
+
... <answer>
|
|
265
|
+
... The code is good
|
|
266
|
+
... </answer>'''
|
|
267
|
+
>>> optimized = optimize_xml_prompt(prompt)
|
|
268
|
+
>>> print(optimized)
|
|
269
|
+
<t>Analyze code carefully</t><a>Code is good</a>
|
|
270
|
+
"""
|
|
271
|
+
optimizer = ContextOptimizer(level)
|
|
272
|
+
return optimizer.optimize(prompt)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Empathy Framework - Plugin System
|
|
2
|
+
|
|
3
|
+
Enables modular extension of the Empathy Framework with domain-specific plugins.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
+
Licensed under Fair Source 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from .base import (
|
|
10
|
+
BasePlugin,
|
|
11
|
+
BaseWizard,
|
|
12
|
+
PluginError,
|
|
13
|
+
PluginLoadError,
|
|
14
|
+
PluginMetadata,
|
|
15
|
+
PluginValidationError,
|
|
16
|
+
)
|
|
17
|
+
from .registry import PluginRegistry, get_global_registry
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"BasePlugin",
|
|
21
|
+
"BaseWizard",
|
|
22
|
+
"PluginError",
|
|
23
|
+
"PluginLoadError",
|
|
24
|
+
"PluginMetadata",
|
|
25
|
+
"PluginRegistry",
|
|
26
|
+
"PluginValidationError",
|
|
27
|
+
"get_global_registry",
|
|
28
|
+
]
|