empathy-framework 3.7.0__py3-none-any.whl → 3.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coach_wizards/code_reviewer_README.md +60 -0
- coach_wizards/code_reviewer_wizard.py +180 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/METADATA +20 -2
- empathy_framework-3.7.1.dist-info/RECORD +327 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/top_level.txt +5 -1
- empathy_healthcare_plugin/monitors/__init__.py +9 -0
- empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
- empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
- empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
- empathy_llm_toolkit/agent_factory/__init__.py +53 -0
- empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
- empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
- empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
- empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
- empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
- empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
- empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
- empathy_llm_toolkit/agent_factory/base.py +305 -0
- empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
- empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
- empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
- empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
- empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
- empathy_llm_toolkit/agent_factory/decorators.py +286 -0
- empathy_llm_toolkit/agent_factory/factory.py +558 -0
- empathy_llm_toolkit/agent_factory/framework.py +192 -0
- empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
- empathy_llm_toolkit/agent_factory/resilient.py +320 -0
- empathy_llm_toolkit/cli/__init__.py +8 -0
- empathy_llm_toolkit/cli/sync_claude.py +487 -0
- empathy_llm_toolkit/code_health.py +150 -3
- empathy_llm_toolkit/config/__init__.py +29 -0
- empathy_llm_toolkit/config/unified.py +295 -0
- empathy_llm_toolkit/routing/__init__.py +32 -0
- empathy_llm_toolkit/routing/model_router.py +362 -0
- empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
- empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
- empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
- empathy_llm_toolkit/security/README.md +262 -0
- empathy_llm_toolkit/security/__init__.py +62 -0
- empathy_llm_toolkit/security/audit_logger.py +929 -0
- empathy_llm_toolkit/security/audit_logger_example.py +152 -0
- empathy_llm_toolkit/security/pii_scrubber.py +640 -0
- empathy_llm_toolkit/security/secrets_detector.py +678 -0
- empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
- empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
- empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
- empathy_llm_toolkit/wizards/__init__.py +38 -0
- empathy_llm_toolkit/wizards/base_wizard.py +364 -0
- empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
- empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
- empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
- empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
- empathy_os/__init__.py +52 -52
- empathy_os/adaptive/__init__.py +13 -0
- empathy_os/adaptive/task_complexity.py +127 -0
- empathy_os/cli.py +118 -8
- empathy_os/cli_unified.py +121 -1
- empathy_os/config/__init__.py +63 -0
- empathy_os/config/xml_config.py +239 -0
- empathy_os/dashboard/__init__.py +15 -0
- empathy_os/dashboard/server.py +743 -0
- empathy_os/memory/__init__.py +195 -0
- empathy_os/memory/claude_memory.py +466 -0
- empathy_os/memory/config.py +224 -0
- empathy_os/memory/control_panel.py +1298 -0
- empathy_os/memory/edges.py +179 -0
- empathy_os/memory/graph.py +567 -0
- empathy_os/memory/long_term.py +1193 -0
- empathy_os/memory/nodes.py +179 -0
- empathy_os/memory/redis_bootstrap.py +540 -0
- empathy_os/memory/security/__init__.py +31 -0
- empathy_os/memory/security/audit_logger.py +930 -0
- empathy_os/memory/security/pii_scrubber.py +640 -0
- empathy_os/memory/security/secrets_detector.py +678 -0
- empathy_os/memory/short_term.py +2119 -0
- empathy_os/memory/storage/__init__.py +15 -0
- empathy_os/memory/summary_index.py +583 -0
- empathy_os/memory/unified.py +619 -0
- empathy_os/metrics/__init__.py +12 -0
- empathy_os/metrics/prompt_metrics.py +190 -0
- empathy_os/models/__init__.py +136 -0
- empathy_os/models/__main__.py +13 -0
- empathy_os/models/cli.py +655 -0
- empathy_os/models/empathy_executor.py +354 -0
- empathy_os/models/executor.py +252 -0
- empathy_os/models/fallback.py +671 -0
- empathy_os/models/provider_config.py +563 -0
- empathy_os/models/registry.py +382 -0
- empathy_os/models/tasks.py +302 -0
- empathy_os/models/telemetry.py +548 -0
- empathy_os/models/token_estimator.py +378 -0
- empathy_os/models/validation.py +274 -0
- empathy_os/monitoring/__init__.py +52 -0
- empathy_os/monitoring/alerts.py +23 -0
- empathy_os/monitoring/alerts_cli.py +268 -0
- empathy_os/monitoring/multi_backend.py +271 -0
- empathy_os/monitoring/otel_backend.py +363 -0
- empathy_os/optimization/__init__.py +19 -0
- empathy_os/optimization/context_optimizer.py +272 -0
- empathy_os/plugins/__init__.py +28 -0
- empathy_os/plugins/base.py +361 -0
- empathy_os/plugins/registry.py +268 -0
- empathy_os/project_index/__init__.py +30 -0
- empathy_os/project_index/cli.py +335 -0
- empathy_os/project_index/crew_integration.py +430 -0
- empathy_os/project_index/index.py +425 -0
- empathy_os/project_index/models.py +501 -0
- empathy_os/project_index/reports.py +473 -0
- empathy_os/project_index/scanner.py +538 -0
- empathy_os/prompts/__init__.py +61 -0
- empathy_os/prompts/config.py +77 -0
- empathy_os/prompts/context.py +177 -0
- empathy_os/prompts/parser.py +285 -0
- empathy_os/prompts/registry.py +313 -0
- empathy_os/prompts/templates.py +208 -0
- empathy_os/resilience/__init__.py +56 -0
- empathy_os/resilience/circuit_breaker.py +256 -0
- empathy_os/resilience/fallback.py +179 -0
- empathy_os/resilience/health.py +300 -0
- empathy_os/resilience/retry.py +209 -0
- empathy_os/resilience/timeout.py +135 -0
- empathy_os/routing/__init__.py +43 -0
- empathy_os/routing/chain_executor.py +433 -0
- empathy_os/routing/classifier.py +217 -0
- empathy_os/routing/smart_router.py +234 -0
- empathy_os/routing/wizard_registry.py +307 -0
- empathy_os/trust/__init__.py +28 -0
- empathy_os/trust/circuit_breaker.py +579 -0
- empathy_os/validation/__init__.py +19 -0
- empathy_os/validation/xml_validator.py +281 -0
- empathy_os/wizard_factory_cli.py +170 -0
- empathy_os/workflows/__init__.py +360 -0
- empathy_os/workflows/base.py +1530 -0
- empathy_os/workflows/bug_predict.py +962 -0
- empathy_os/workflows/code_review.py +960 -0
- empathy_os/workflows/code_review_adapters.py +310 -0
- empathy_os/workflows/code_review_pipeline.py +720 -0
- empathy_os/workflows/config.py +600 -0
- empathy_os/workflows/dependency_check.py +648 -0
- empathy_os/workflows/document_gen.py +1069 -0
- empathy_os/workflows/documentation_orchestrator.py +1205 -0
- empathy_os/workflows/health_check.py +679 -0
- empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
- empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
- empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
- empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
- empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
- empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
- empathy_os/workflows/manage_documentation.py +804 -0
- empathy_os/workflows/new_sample_workflow1.py +146 -0
- empathy_os/workflows/new_sample_workflow1_README.md +150 -0
- empathy_os/workflows/perf_audit.py +687 -0
- empathy_os/workflows/pr_review.py +748 -0
- empathy_os/workflows/progress.py +445 -0
- empathy_os/workflows/progress_server.py +322 -0
- empathy_os/workflows/refactor_plan.py +691 -0
- empathy_os/workflows/release_prep.py +808 -0
- empathy_os/workflows/research_synthesis.py +404 -0
- empathy_os/workflows/secure_release.py +585 -0
- empathy_os/workflows/security_adapters.py +297 -0
- empathy_os/workflows/security_audit.py +1050 -0
- empathy_os/workflows/step_config.py +234 -0
- empathy_os/workflows/test5.py +125 -0
- empathy_os/workflows/test5_README.md +158 -0
- empathy_os/workflows/test_gen.py +1855 -0
- empathy_os/workflows/test_lifecycle.py +526 -0
- empathy_os/workflows/test_maintenance.py +626 -0
- empathy_os/workflows/test_maintenance_cli.py +590 -0
- empathy_os/workflows/test_maintenance_crew.py +821 -0
- empathy_os/workflows/xml_enhanced_crew.py +285 -0
- empathy_software_plugin/cli/__init__.py +120 -0
- empathy_software_plugin/cli/inspect.py +362 -0
- empathy_software_plugin/cli.py +3 -1
- empathy_software_plugin/wizards/__init__.py +42 -0
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
- empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
- empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
- empathy_software_plugin/wizards/base_wizard.py +288 -0
- empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
- empathy_software_plugin/wizards/code_review_wizard.py +606 -0
- empathy_software_plugin/wizards/debugging/__init__.py +50 -0
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
- empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
- empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
- empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
- empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
- empathy_software_plugin/wizards/debugging/verification.py +369 -0
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
- empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
- empathy_software_plugin/wizards/performance/__init__.py +9 -0
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
- empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
- empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
- empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
- empathy_software_plugin/wizards/security/__init__.py +32 -0
- empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
- empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
- empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
- empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
- empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
- empathy_software_plugin/wizards/testing/__init__.py +27 -0
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
- empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
- empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
- empathy_software_plugin/wizards/testing_wizard.py +274 -0
- hot_reload/README.md +473 -0
- hot_reload/__init__.py +62 -0
- hot_reload/config.py +84 -0
- hot_reload/integration.py +228 -0
- hot_reload/reloader.py +298 -0
- hot_reload/watcher.py +179 -0
- hot_reload/websocket.py +176 -0
- scaffolding/README.md +589 -0
- scaffolding/__init__.py +35 -0
- scaffolding/__main__.py +14 -0
- scaffolding/cli.py +240 -0
- test_generator/__init__.py +38 -0
- test_generator/__main__.py +14 -0
- test_generator/cli.py +226 -0
- test_generator/generator.py +325 -0
- test_generator/risk_analyzer.py +216 -0
- workflow_patterns/__init__.py +33 -0
- workflow_patterns/behavior.py +249 -0
- workflow_patterns/core.py +76 -0
- workflow_patterns/output.py +99 -0
- workflow_patterns/registry.py +255 -0
- workflow_patterns/structural.py +288 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
- agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
- agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
- agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
- agents/compliance_anticipation_agent.py +0 -1422
- agents/compliance_db.py +0 -339
- agents/epic_integration_wizard.py +0 -530
- agents/notifications.py +0 -291
- agents/trust_building_behaviors.py +0 -872
- empathy_framework-3.7.0.dist-info/RECORD +0 -105
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/WHEEL +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/licenses/LICENSE +0 -0
- /empathy_os/{monitoring.py → agent_monitoring.py} +0 -0
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
"""Token Estimation Service
|
|
2
|
+
|
|
3
|
+
Pre-flight token estimation for cost prediction using tiktoken.
|
|
4
|
+
Provides accurate token counts before workflow execution.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
7
|
+
Licensed under Fair Source 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import functools
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
# Try to import tiktoken, fall back to heuristic if not available
|
|
16
|
+
try:
|
|
17
|
+
import tiktoken
|
|
18
|
+
|
|
19
|
+
TIKTOKEN_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
TIKTOKEN_AVAILABLE = False
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Heuristic fallback: ~4 tokens per word, ~0.25 tokens per character
|
|
25
|
+
TOKENS_PER_CHAR_HEURISTIC = 0.25
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@functools.lru_cache(maxsize=4)
|
|
29
|
+
def _get_encoding(model_id: str) -> Any:
|
|
30
|
+
"""Get tiktoken encoding for a model, with caching."""
|
|
31
|
+
if not TIKTOKEN_AVAILABLE:
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# Map model IDs to encoding names
|
|
35
|
+
if "claude" in model_id.lower() or "anthropic" in model_id.lower():
|
|
36
|
+
# Claude uses cl100k_base-like encoding
|
|
37
|
+
return tiktoken.get_encoding("cl100k_base")
|
|
38
|
+
if "gpt-4" in model_id.lower() or "gpt-3.5" in model_id.lower() or "o1" in model_id.lower():
|
|
39
|
+
return tiktoken.encoding_for_model("gpt-4")
|
|
40
|
+
# Default to cl100k_base for unknown models
|
|
41
|
+
return tiktoken.get_encoding("cl100k_base")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def estimate_tokens(text: str, model_id: str = "claude-sonnet-4-5-20250514") -> int:
|
|
45
|
+
"""Estimate token count for text.
|
|
46
|
+
|
|
47
|
+
Uses tiktoken for accurate counting, falls back to heuristic if unavailable.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
text: The text to count tokens for
|
|
51
|
+
model_id: The model ID to use for encoding selection
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Estimated token count
|
|
55
|
+
|
|
56
|
+
"""
|
|
57
|
+
if not text:
|
|
58
|
+
return 0
|
|
59
|
+
|
|
60
|
+
if TIKTOKEN_AVAILABLE:
|
|
61
|
+
try:
|
|
62
|
+
encoding = _get_encoding(model_id)
|
|
63
|
+
if encoding:
|
|
64
|
+
return len(encoding.encode(text))
|
|
65
|
+
except Exception:
|
|
66
|
+
pass # Fall through to heuristic
|
|
67
|
+
|
|
68
|
+
# Heuristic fallback
|
|
69
|
+
return max(1, int(len(text) * TOKENS_PER_CHAR_HEURISTIC))
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def estimate_workflow_cost(
|
|
73
|
+
workflow_name: str,
|
|
74
|
+
input_text: str,
|
|
75
|
+
provider: str = "anthropic",
|
|
76
|
+
target_path: str | None = None,
|
|
77
|
+
) -> dict[str, Any]:
|
|
78
|
+
"""Estimate total workflow cost before execution.
|
|
79
|
+
|
|
80
|
+
Analyzes workflow stages and estimates token usage and cost for each,
|
|
81
|
+
providing a cost range for the full workflow.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
workflow_name: Name of the workflow (e.g., "security-audit", "test-gen")
|
|
85
|
+
input_text: The input text/code to be processed
|
|
86
|
+
provider: LLM provider (anthropic, openai, ollama, hybrid)
|
|
87
|
+
target_path: Optional path for file-based workflows
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Dictionary with cost estimates:
|
|
91
|
+
{
|
|
92
|
+
"workflow": str,
|
|
93
|
+
"provider": str,
|
|
94
|
+
"input_tokens": int,
|
|
95
|
+
"stages": [...],
|
|
96
|
+
"total_min": float,
|
|
97
|
+
"total_max": float,
|
|
98
|
+
"display": str,
|
|
99
|
+
"risk": "low" | "medium" | "high"
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
"""
|
|
103
|
+
from .registry import get_model, get_supported_providers
|
|
104
|
+
|
|
105
|
+
# Validate provider
|
|
106
|
+
if provider not in get_supported_providers():
|
|
107
|
+
provider = "anthropic" # Default fallback
|
|
108
|
+
|
|
109
|
+
# Workflow stage configurations by workflow name
|
|
110
|
+
# Based on actual workflow implementations
|
|
111
|
+
WORKFLOW_STAGES = {
|
|
112
|
+
"security-audit": [
|
|
113
|
+
{"name": "identify_vulnerabilities", "tier": "capable"},
|
|
114
|
+
{"name": "analyze_risk", "tier": "capable"},
|
|
115
|
+
{"name": "generate_report", "tier": "cheap"},
|
|
116
|
+
],
|
|
117
|
+
"code-review": [
|
|
118
|
+
{"name": "analyze_code", "tier": "capable"},
|
|
119
|
+
{"name": "generate_feedback", "tier": "capable"},
|
|
120
|
+
{"name": "summarize", "tier": "cheap"},
|
|
121
|
+
],
|
|
122
|
+
"test-gen": [
|
|
123
|
+
{"name": "analyze_code", "tier": "capable"},
|
|
124
|
+
{"name": "generate_tests", "tier": "capable"},
|
|
125
|
+
{"name": "review_tests", "tier": "cheap"},
|
|
126
|
+
],
|
|
127
|
+
"doc-gen": [
|
|
128
|
+
{"name": "analyze_structure", "tier": "cheap"},
|
|
129
|
+
{"name": "generate_documentation", "tier": "capable"},
|
|
130
|
+
],
|
|
131
|
+
"bug-predict": [
|
|
132
|
+
{"name": "analyze_patterns", "tier": "capable"},
|
|
133
|
+
{"name": "predict_risks", "tier": "capable"},
|
|
134
|
+
],
|
|
135
|
+
"refactor-plan": [
|
|
136
|
+
{"name": "identify_issues", "tier": "capable"},
|
|
137
|
+
{"name": "plan_refactoring", "tier": "capable"},
|
|
138
|
+
{"name": "review_plan", "tier": "cheap"},
|
|
139
|
+
],
|
|
140
|
+
"perf-audit": [
|
|
141
|
+
{"name": "analyze_performance", "tier": "capable"},
|
|
142
|
+
{"name": "identify_bottlenecks", "tier": "capable"},
|
|
143
|
+
{"name": "generate_recommendations", "tier": "cheap"},
|
|
144
|
+
],
|
|
145
|
+
"health-check": [
|
|
146
|
+
{"name": "scan_codebase", "tier": "cheap"},
|
|
147
|
+
{"name": "analyze_health", "tier": "capable"},
|
|
148
|
+
{"name": "generate_report", "tier": "cheap"},
|
|
149
|
+
],
|
|
150
|
+
"pr-review": [
|
|
151
|
+
{"name": "analyze_diff", "tier": "capable"},
|
|
152
|
+
{"name": "check_quality", "tier": "capable"},
|
|
153
|
+
{"name": "generate_review", "tier": "capable"},
|
|
154
|
+
],
|
|
155
|
+
"pro-review": [
|
|
156
|
+
{"name": "deep_analysis", "tier": "premium"},
|
|
157
|
+
{"name": "generate_insights", "tier": "capable"},
|
|
158
|
+
],
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
# Get stage configuration for this workflow
|
|
162
|
+
stages_config = WORKFLOW_STAGES.get(
|
|
163
|
+
workflow_name,
|
|
164
|
+
[
|
|
165
|
+
{"name": "analyze", "tier": "capable"},
|
|
166
|
+
{"name": "generate", "tier": "capable"},
|
|
167
|
+
{"name": "review", "tier": "cheap"},
|
|
168
|
+
],
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Estimate input tokens
|
|
172
|
+
input_tokens = estimate_tokens(input_text)
|
|
173
|
+
|
|
174
|
+
# If we have a target path, estimate additional content
|
|
175
|
+
if target_path:
|
|
176
|
+
try:
|
|
177
|
+
import os
|
|
178
|
+
|
|
179
|
+
if os.path.isfile(target_path):
|
|
180
|
+
with open(target_path, encoding="utf-8", errors="ignore") as f:
|
|
181
|
+
file_content = f.read()
|
|
182
|
+
input_tokens += estimate_tokens(file_content)
|
|
183
|
+
elif os.path.isdir(target_path):
|
|
184
|
+
# Estimate based on directory size (rough heuristic)
|
|
185
|
+
total_chars = 0
|
|
186
|
+
for root, _, files in os.walk(target_path):
|
|
187
|
+
for file in files[:50]: # Limit to first 50 files
|
|
188
|
+
if file.endswith((".py", ".js", ".ts", ".tsx", ".jsx")):
|
|
189
|
+
try:
|
|
190
|
+
filepath = os.path.join(root, file)
|
|
191
|
+
with open(filepath, encoding="utf-8", errors="ignore") as f:
|
|
192
|
+
total_chars += len(f.read())
|
|
193
|
+
except Exception:
|
|
194
|
+
pass
|
|
195
|
+
input_tokens += int(total_chars * TOKENS_PER_CHAR_HEURISTIC)
|
|
196
|
+
except Exception:
|
|
197
|
+
pass # Keep original estimate
|
|
198
|
+
|
|
199
|
+
# Output multipliers by stage type
|
|
200
|
+
output_multipliers = {
|
|
201
|
+
"identify": 0.3,
|
|
202
|
+
"analyze": 0.8,
|
|
203
|
+
"generate": 2.0,
|
|
204
|
+
"review": 0.5,
|
|
205
|
+
"summarize": 0.3,
|
|
206
|
+
"fix": 1.5,
|
|
207
|
+
"test": 1.5,
|
|
208
|
+
"document": 1.0,
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
estimates = []
|
|
212
|
+
total_min = 0.0
|
|
213
|
+
total_max = 0.0
|
|
214
|
+
|
|
215
|
+
for stage in stages_config:
|
|
216
|
+
stage_name = stage.get("name", "unknown")
|
|
217
|
+
tier = stage.get("tier", "capable")
|
|
218
|
+
|
|
219
|
+
# Get model for this tier
|
|
220
|
+
try:
|
|
221
|
+
model_info = get_model(provider, tier)
|
|
222
|
+
except Exception:
|
|
223
|
+
# Fallback to capable tier
|
|
224
|
+
model_info = get_model(provider, "capable")
|
|
225
|
+
|
|
226
|
+
if model_info is None:
|
|
227
|
+
# Skip stage if no model available
|
|
228
|
+
continue
|
|
229
|
+
|
|
230
|
+
# Estimate output tokens based on stage type
|
|
231
|
+
multiplier = 1.0
|
|
232
|
+
for stage_type, mult in output_multipliers.items():
|
|
233
|
+
if stage_type in stage_name.lower():
|
|
234
|
+
multiplier = mult
|
|
235
|
+
break
|
|
236
|
+
|
|
237
|
+
est_output = int(input_tokens * multiplier)
|
|
238
|
+
|
|
239
|
+
# Calculate cost
|
|
240
|
+
cost = (input_tokens / 1_000_000) * model_info.input_cost_per_million + (
|
|
241
|
+
est_output / 1_000_000
|
|
242
|
+
) * model_info.output_cost_per_million
|
|
243
|
+
|
|
244
|
+
estimates.append(
|
|
245
|
+
{
|
|
246
|
+
"stage": stage_name,
|
|
247
|
+
"tier": tier,
|
|
248
|
+
"model": model_info.id,
|
|
249
|
+
"estimated_input_tokens": input_tokens,
|
|
250
|
+
"estimated_output_tokens": est_output,
|
|
251
|
+
"estimated_cost": round(cost, 6),
|
|
252
|
+
},
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
# Accumulate with variance (80% - 120%)
|
|
256
|
+
total_min += cost * 0.8
|
|
257
|
+
total_max += cost * 1.2
|
|
258
|
+
|
|
259
|
+
# Determine risk level
|
|
260
|
+
if total_max > 1.0:
|
|
261
|
+
risk = "high"
|
|
262
|
+
elif total_max > 0.10:
|
|
263
|
+
risk = "medium"
|
|
264
|
+
else:
|
|
265
|
+
risk = "low"
|
|
266
|
+
|
|
267
|
+
return {
|
|
268
|
+
"workflow": workflow_name,
|
|
269
|
+
"provider": provider,
|
|
270
|
+
"input_tokens": input_tokens,
|
|
271
|
+
"stages": estimates,
|
|
272
|
+
"total_min": round(total_min, 4),
|
|
273
|
+
"total_max": round(total_max, 4),
|
|
274
|
+
"display": f"${total_min:.3f} - ${total_max:.3f}",
|
|
275
|
+
"risk": risk,
|
|
276
|
+
"tiktoken_available": TIKTOKEN_AVAILABLE,
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def estimate_single_call_cost(
|
|
281
|
+
text: str,
|
|
282
|
+
task_type: str,
|
|
283
|
+
provider: str = "anthropic",
|
|
284
|
+
) -> dict[str, Any]:
|
|
285
|
+
"""Estimate cost for a single LLM call.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
text: Input text
|
|
289
|
+
task_type: Type of task (e.g., "summarize", "generate_code")
|
|
290
|
+
provider: LLM provider
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Cost estimate dictionary
|
|
294
|
+
|
|
295
|
+
"""
|
|
296
|
+
from .registry import get_model
|
|
297
|
+
from .tasks import get_tier_for_task
|
|
298
|
+
|
|
299
|
+
input_tokens = estimate_tokens(text)
|
|
300
|
+
|
|
301
|
+
# Get tier for task
|
|
302
|
+
tier = get_tier_for_task(task_type)
|
|
303
|
+
model_info = get_model(provider, tier.value)
|
|
304
|
+
|
|
305
|
+
if model_info is None:
|
|
306
|
+
# Return a fallback estimate if no model available
|
|
307
|
+
return {
|
|
308
|
+
"task_type": task_type,
|
|
309
|
+
"tier": tier.value,
|
|
310
|
+
"model": "unknown",
|
|
311
|
+
"provider": provider,
|
|
312
|
+
"input_tokens": input_tokens,
|
|
313
|
+
"estimated_output_tokens": input_tokens,
|
|
314
|
+
"estimated_cost": 0.0,
|
|
315
|
+
"display": "$0.0000",
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
# Estimate output based on task type
|
|
319
|
+
output_multipliers = {
|
|
320
|
+
"summarize": 0.3,
|
|
321
|
+
"classify": 0.1,
|
|
322
|
+
"generate_code": 1.5,
|
|
323
|
+
"fix_bug": 1.2,
|
|
324
|
+
"review": 0.5,
|
|
325
|
+
"document": 0.8,
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
multiplier = output_multipliers.get(task_type, 1.0)
|
|
329
|
+
est_output = int(input_tokens * multiplier)
|
|
330
|
+
|
|
331
|
+
cost = (input_tokens / 1_000_000) * model_info.input_cost_per_million + (
|
|
332
|
+
est_output / 1_000_000
|
|
333
|
+
) * model_info.output_cost_per_million
|
|
334
|
+
|
|
335
|
+
return {
|
|
336
|
+
"task_type": task_type,
|
|
337
|
+
"tier": tier.value,
|
|
338
|
+
"model": model_info.id,
|
|
339
|
+
"provider": provider,
|
|
340
|
+
"input_tokens": input_tokens,
|
|
341
|
+
"estimated_output_tokens": est_output,
|
|
342
|
+
"estimated_cost": round(cost, 6),
|
|
343
|
+
"display": f"${cost:.4f}",
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
# CLI support
|
|
348
|
+
if __name__ == "__main__":
|
|
349
|
+
import argparse
|
|
350
|
+
import json
|
|
351
|
+
import sys
|
|
352
|
+
|
|
353
|
+
parser = argparse.ArgumentParser(description="Estimate workflow costs")
|
|
354
|
+
parser.add_argument("workflow", help="Workflow name (e.g., security-audit)")
|
|
355
|
+
parser.add_argument("--input", "-i", help="Input text or file path")
|
|
356
|
+
parser.add_argument("--provider", "-p", default="anthropic", help="LLM provider")
|
|
357
|
+
parser.add_argument("--target", "-t", help="Target path for file-based workflows")
|
|
358
|
+
|
|
359
|
+
args = parser.parse_args()
|
|
360
|
+
|
|
361
|
+
# Read input
|
|
362
|
+
input_text = ""
|
|
363
|
+
if args.input:
|
|
364
|
+
try:
|
|
365
|
+
with open(args.input) as f:
|
|
366
|
+
input_text = f.read()
|
|
367
|
+
except FileNotFoundError:
|
|
368
|
+
input_text = args.input
|
|
369
|
+
|
|
370
|
+
result = estimate_workflow_cost(
|
|
371
|
+
workflow_name=args.workflow,
|
|
372
|
+
input_text=input_text,
|
|
373
|
+
provider=args.provider,
|
|
374
|
+
target_path=args.target,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
print(json.dumps(result, indent=2))
|
|
378
|
+
sys.exit(0)
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
"""Configuration Validation for Multi-Model Workflows
|
|
2
|
+
|
|
3
|
+
Provides schema validation for workflow configurations:
|
|
4
|
+
- Required field validation
|
|
5
|
+
- Type checking
|
|
6
|
+
- Value range validation
|
|
7
|
+
- Provider/tier existence checks
|
|
8
|
+
|
|
9
|
+
Copyright 2025 Smart-AI-Memory
|
|
10
|
+
Licensed under Fair Source License 0.9
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from .registry import MODEL_REGISTRY, ModelTier
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class ValidationError:
|
|
21
|
+
"""A single validation error."""
|
|
22
|
+
|
|
23
|
+
path: str
|
|
24
|
+
message: str
|
|
25
|
+
severity: str = "error" # "error" | "warning"
|
|
26
|
+
|
|
27
|
+
def __str__(self) -> str:
|
|
28
|
+
return f"[{self.severity.upper()}] {self.path}: {self.message}"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ValidationResult:
|
|
33
|
+
"""Result of configuration validation."""
|
|
34
|
+
|
|
35
|
+
valid: bool
|
|
36
|
+
errors: list[ValidationError] = field(default_factory=list)
|
|
37
|
+
warnings: list[ValidationError] = field(default_factory=list)
|
|
38
|
+
|
|
39
|
+
def add_error(self, path: str, message: str) -> None:
|
|
40
|
+
"""Add an error."""
|
|
41
|
+
self.errors.append(ValidationError(path, message, "error"))
|
|
42
|
+
self.valid = False
|
|
43
|
+
|
|
44
|
+
def add_warning(self, path: str, message: str) -> None:
|
|
45
|
+
"""Add a warning."""
|
|
46
|
+
self.warnings.append(ValidationError(path, message, "warning"))
|
|
47
|
+
|
|
48
|
+
def __str__(self) -> str:
|
|
49
|
+
lines = []
|
|
50
|
+
if self.valid:
|
|
51
|
+
lines.append("Configuration is valid")
|
|
52
|
+
else:
|
|
53
|
+
lines.append("Configuration has errors")
|
|
54
|
+
|
|
55
|
+
for error in self.errors:
|
|
56
|
+
lines.append(f" {error}")
|
|
57
|
+
for warning in self.warnings:
|
|
58
|
+
lines.append(f" {warning}")
|
|
59
|
+
|
|
60
|
+
return "\n".join(lines)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class ConfigValidator:
|
|
64
|
+
"""Validator for multi-model workflow configurations.
|
|
65
|
+
|
|
66
|
+
Validates:
|
|
67
|
+
- Provider names exist in registry
|
|
68
|
+
- Tier names are valid
|
|
69
|
+
- Required fields are present
|
|
70
|
+
- Numeric values are in valid ranges
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
# Valid provider names from registry
|
|
74
|
+
VALID_PROVIDERS = set(MODEL_REGISTRY.keys())
|
|
75
|
+
|
|
76
|
+
# Valid tier names
|
|
77
|
+
VALID_TIERS = {tier.value for tier in ModelTier}
|
|
78
|
+
|
|
79
|
+
# Schema for workflow config
|
|
80
|
+
WORKFLOW_SCHEMA = {
|
|
81
|
+
"name": {"type": str, "required": True},
|
|
82
|
+
"description": {"type": str, "required": False},
|
|
83
|
+
"default_provider": {"type": str, "required": False},
|
|
84
|
+
"stages": {"type": list, "required": False},
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
# Schema for stage config
|
|
88
|
+
STAGE_SCHEMA = {
|
|
89
|
+
"name": {"type": str, "required": True},
|
|
90
|
+
"tier": {"type": str, "required": True},
|
|
91
|
+
"provider": {"type": str, "required": False},
|
|
92
|
+
"timeout_ms": {"type": int, "required": False, "min": 0, "max": 600000},
|
|
93
|
+
"max_retries": {"type": int, "required": False, "min": 0, "max": 10},
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
def validate_workflow_config(self, config: dict[str, Any]) -> ValidationResult:
|
|
97
|
+
"""Validate a workflow configuration dictionary.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
config: Workflow configuration dict
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
ValidationResult with any errors or warnings
|
|
104
|
+
|
|
105
|
+
"""
|
|
106
|
+
result = ValidationResult(valid=True)
|
|
107
|
+
|
|
108
|
+
# Check required fields
|
|
109
|
+
for field_name, spec in self.WORKFLOW_SCHEMA.items():
|
|
110
|
+
if spec.get("required") and field_name not in config:
|
|
111
|
+
result.add_error(field_name, f"Required field '{field_name}' is missing")
|
|
112
|
+
|
|
113
|
+
# Validate types
|
|
114
|
+
for field_name, value in config.items():
|
|
115
|
+
if field_name in self.WORKFLOW_SCHEMA:
|
|
116
|
+
spec = self.WORKFLOW_SCHEMA[field_name]
|
|
117
|
+
expected_type = spec.get("type")
|
|
118
|
+
if expected_type is not None:
|
|
119
|
+
# Cast to type for isinstance check
|
|
120
|
+
type_cls = (
|
|
121
|
+
expected_type if isinstance(expected_type, type) else type(expected_type)
|
|
122
|
+
)
|
|
123
|
+
if not isinstance(value, type_cls):
|
|
124
|
+
type_name = getattr(type_cls, "__name__", str(type_cls))
|
|
125
|
+
result.add_error(
|
|
126
|
+
field_name,
|
|
127
|
+
f"Expected {type_name}, got {type(value).__name__}",
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Validate default_provider
|
|
131
|
+
if "default_provider" in config:
|
|
132
|
+
provider = config["default_provider"]
|
|
133
|
+
if provider not in self.VALID_PROVIDERS:
|
|
134
|
+
result.add_error(
|
|
135
|
+
"default_provider",
|
|
136
|
+
f"Unknown provider '{provider}'. Valid: {sorted(self.VALID_PROVIDERS)}",
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Validate stages
|
|
140
|
+
if "stages" in config and isinstance(config["stages"], list):
|
|
141
|
+
for i, stage in enumerate(config["stages"]):
|
|
142
|
+
stage_path = f"stages[{i}]"
|
|
143
|
+
self._validate_stage(stage, stage_path, result)
|
|
144
|
+
|
|
145
|
+
return result
|
|
146
|
+
|
|
147
|
+
def _validate_stage(self, stage: dict[str, Any], path: str, result: ValidationResult) -> None:
|
|
148
|
+
"""Validate a single stage configuration."""
|
|
149
|
+
if not isinstance(stage, dict):
|
|
150
|
+
result.add_error(path, "Stage must be a dictionary")
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
# Check required fields
|
|
154
|
+
for field_name, spec in self.STAGE_SCHEMA.items():
|
|
155
|
+
if spec.get("required") and field_name not in stage:
|
|
156
|
+
result.add_error(f"{path}.{field_name}", "Required field is missing")
|
|
157
|
+
|
|
158
|
+
# Validate tier
|
|
159
|
+
if "tier" in stage:
|
|
160
|
+
tier = stage["tier"]
|
|
161
|
+
if tier not in self.VALID_TIERS:
|
|
162
|
+
result.add_error(
|
|
163
|
+
f"{path}.tier",
|
|
164
|
+
f"Unknown tier '{tier}'. Valid: {sorted(self.VALID_TIERS)}",
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Validate provider
|
|
168
|
+
if "provider" in stage:
|
|
169
|
+
provider = stage["provider"]
|
|
170
|
+
if provider not in self.VALID_PROVIDERS:
|
|
171
|
+
result.add_error(
|
|
172
|
+
f"{path}.provider",
|
|
173
|
+
f"Unknown provider '{provider}'. Valid: {sorted(self.VALID_PROVIDERS)}",
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Validate numeric ranges
|
|
177
|
+
for field_name in ["timeout_ms", "max_retries"]:
|
|
178
|
+
if field_name in stage:
|
|
179
|
+
value = stage[field_name]
|
|
180
|
+
spec = self.STAGE_SCHEMA[field_name]
|
|
181
|
+
|
|
182
|
+
if not isinstance(value, int):
|
|
183
|
+
result.add_error(
|
|
184
|
+
f"{path}.{field_name}",
|
|
185
|
+
f"Expected integer, got {type(value).__name__}",
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
min_val = spec.get("min")
|
|
189
|
+
max_val = spec.get("max")
|
|
190
|
+
if isinstance(min_val, int | float) and value < min_val:
|
|
191
|
+
result.add_error(
|
|
192
|
+
f"{path}.{field_name}",
|
|
193
|
+
f"Value {value} below minimum {min_val}",
|
|
194
|
+
)
|
|
195
|
+
if isinstance(max_val, int | float) and value > max_val:
|
|
196
|
+
result.add_error(
|
|
197
|
+
f"{path}.{field_name}",
|
|
198
|
+
f"Value {value} above maximum {max_val}",
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def validate_provider_tier(self, provider: str, tier: str) -> ValidationResult:
|
|
202
|
+
"""Validate that a provider/tier combination exists.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
provider: Provider name
|
|
206
|
+
tier: Tier name
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
ValidationResult
|
|
210
|
+
|
|
211
|
+
"""
|
|
212
|
+
result = ValidationResult(valid=True)
|
|
213
|
+
|
|
214
|
+
if provider not in self.VALID_PROVIDERS:
|
|
215
|
+
result.add_error("provider", f"Unknown provider '{provider}'")
|
|
216
|
+
return result
|
|
217
|
+
|
|
218
|
+
if tier not in self.VALID_TIERS:
|
|
219
|
+
result.add_error("tier", f"Unknown tier '{tier}'")
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
# Check if combination exists in registry
|
|
223
|
+
if tier not in MODEL_REGISTRY.get(provider, {}):
|
|
224
|
+
result.add_warning(
|
|
225
|
+
"provider_tier",
|
|
226
|
+
f"Provider '{provider}' may not have tier '{tier}' configured",
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return result
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def validate_config(config: dict[str, Any]) -> ValidationResult:
|
|
233
|
+
"""Convenience function to validate a workflow config.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
config: Configuration dictionary
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
ValidationResult
|
|
240
|
+
|
|
241
|
+
"""
|
|
242
|
+
validator = ConfigValidator()
|
|
243
|
+
return validator.validate_workflow_config(config)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def validate_yaml_file(file_path: str) -> ValidationResult:
|
|
247
|
+
"""Validate a YAML configuration file.
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
file_path: Path to YAML file
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
ValidationResult
|
|
254
|
+
|
|
255
|
+
"""
|
|
256
|
+
import yaml
|
|
257
|
+
|
|
258
|
+
result = ValidationResult(valid=True)
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
with open(file_path) as f:
|
|
262
|
+
config = yaml.safe_load(f)
|
|
263
|
+
except FileNotFoundError:
|
|
264
|
+
result.add_error("file", f"File not found: {file_path}")
|
|
265
|
+
return result
|
|
266
|
+
except yaml.YAMLError as e:
|
|
267
|
+
result.add_error("yaml", f"Invalid YAML: {e}")
|
|
268
|
+
return result
|
|
269
|
+
|
|
270
|
+
if config is None:
|
|
271
|
+
result.add_error("file", "Empty configuration file")
|
|
272
|
+
return result
|
|
273
|
+
|
|
274
|
+
return validate_config(config)
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""LLM Telemetry Monitoring System
|
|
2
|
+
|
|
3
|
+
A zero-config monitoring system for LLM usage tracking with progressive enhancement:
|
|
4
|
+
|
|
5
|
+
**Tier 1 (Default - Zero Config):**
|
|
6
|
+
- JSONL telemetry (automatic logging to `.empathy/`)
|
|
7
|
+
- CLI dashboard (`empathy telemetry`)
|
|
8
|
+
- VSCode panel (real-time visualization)
|
|
9
|
+
|
|
10
|
+
**Tier 2 (Enterprise - Opt-in):**
|
|
11
|
+
- Alert system (threshold-based notifications)
|
|
12
|
+
- OpenTelemetry backend (export to SigNoz/Datadog)
|
|
13
|
+
|
|
14
|
+
This package provides the backend components for the monitoring system.
|
|
15
|
+
The frontend VSCode extension is in `website/components/telemetry/`.
|
|
16
|
+
|
|
17
|
+
Copyright 2025 Smart-AI-Memory
|
|
18
|
+
Licensed under Fair Source License 0.9
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# Import agent monitoring classes from sibling module (backwards compatibility)
|
|
22
|
+
# The agent_monitoring.py module contains agent monitoring for multi-agent systems
|
|
23
|
+
# This package (monitoring/) contains LLM telemetry monitoring
|
|
24
|
+
from empathy_os.agent_monitoring import AgentMetrics, AgentMonitor, TeamMetrics
|
|
25
|
+
|
|
26
|
+
# Import telemetry classes
|
|
27
|
+
from empathy_os.models.telemetry import (
|
|
28
|
+
LLMCallRecord,
|
|
29
|
+
TelemetryAnalytics,
|
|
30
|
+
TelemetryStore,
|
|
31
|
+
WorkflowRunRecord,
|
|
32
|
+
get_telemetry_store,
|
|
33
|
+
log_llm_call,
|
|
34
|
+
log_workflow_run,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
__all__ = [
|
|
38
|
+
# Agent monitoring (backwards compatibility)
|
|
39
|
+
"AgentMetrics",
|
|
40
|
+
"AgentMonitor",
|
|
41
|
+
"TeamMetrics",
|
|
42
|
+
# LLM telemetry
|
|
43
|
+
"LLMCallRecord",
|
|
44
|
+
"WorkflowRunRecord",
|
|
45
|
+
"TelemetryStore",
|
|
46
|
+
"TelemetryAnalytics",
|
|
47
|
+
"get_telemetry_store",
|
|
48
|
+
"log_llm_call",
|
|
49
|
+
"log_workflow_run",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
__version__ = "3.8.0-alpha"
|