entroplain 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/26.0.1 +0 -0
- package/CONTRIBUTING.md +103 -103
- package/README.md +209 -122
- package/dist/entroplain-0.2.0-py3-none-any.whl +0 -0
- package/dist/entroplain-0.2.0.tar.gz +0 -0
- package/entroplain/__init__.py +30 -33
- package/entroplain/cost_tracker.py +231 -0
- package/entroplain/dashboard.py +368 -0
- package/entroplain/monitor.py +178 -60
- package/entroplain/proxy.py +92 -21
- package/entroplain-proxy +0 -0
- package/package.json +4 -2
- package/paper.md +299 -0
- package/pip +0 -0
- package/pyproject.toml +96 -89
- package/test_nvidia.py +56 -0
- package/test_proxy.py +16 -0
- package/dist/entroplain-0.1.1-py3-none-any.whl +0 -0
- package/dist/entroplain-0.1.1.tar.gz +0 -0
package/entroplain/__init__.py
CHANGED
|
@@ -1,33 +1,30 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Entroplain — Entropy-based early exit for efficient agent reasoning.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
__version__ = "0.
|
|
6
|
-
__author__ = "Entroplain Contributors"
|
|
7
|
-
|
|
8
|
-
from .monitor import EntropyMonitor, calculate_entropy
|
|
9
|
-
from .providers import (
|
|
10
|
-
OpenAIProvider,
|
|
11
|
-
AnthropicProvider,
|
|
12
|
-
GeminiProvider,
|
|
13
|
-
NVIDIAProvider,
|
|
14
|
-
OllamaProvider,
|
|
15
|
-
LlamaCppProvider,
|
|
16
|
-
)
|
|
17
|
-
from .hooks import track_entropy, early_exit
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
"
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
|
|
31
|
-
"EntropyProxy",
|
|
32
|
-
"ProxyConfig",
|
|
33
|
-
]
|
|
1
|
+
"""
|
|
2
|
+
Entroplain — Entropy-based early exit for efficient agent reasoning.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
__version__ = "0.2.0"
|
|
6
|
+
__author__ = "Entroplain Contributors"
|
|
7
|
+
|
|
8
|
+
from .monitor import EntropyMonitor, calculate_entropy
|
|
9
|
+
from .providers import (
|
|
10
|
+
OpenAIProvider,
|
|
11
|
+
AnthropicProvider,
|
|
12
|
+
GeminiProvider,
|
|
13
|
+
NVIDIAProvider,
|
|
14
|
+
OllamaProvider,
|
|
15
|
+
LlamaCppProvider,
|
|
16
|
+
)
|
|
17
|
+
from .hooks import track_entropy, early_exit
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"EntropyMonitor",
|
|
21
|
+
"calculate_entropy",
|
|
22
|
+
"OpenAIProvider",
|
|
23
|
+
"AnthropicProvider",
|
|
24
|
+
"GeminiProvider",
|
|
25
|
+
"NVIDIAProvider",
|
|
26
|
+
"OllamaProvider",
|
|
27
|
+
"LlamaCppProvider",
|
|
28
|
+
"track_entropy",
|
|
29
|
+
"early_exit",
|
|
30
|
+
]
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cost tracking and savings calculator.
|
|
3
|
+
|
|
4
|
+
Estimates cost savings from early exit based on token usage.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import math
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import Optional, Dict, Any
|
|
10
|
+
from enum import Enum
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PricingTier(Enum):
|
|
14
|
+
"""Pricing tiers for different models."""
|
|
15
|
+
# OpenAI
|
|
16
|
+
GPT4O = ("gpt-4o", 2.50, 10.00) # input, output per 1M tokens
|
|
17
|
+
GPT4O_MINI = ("gpt-4o-mini", 0.15, 0.60)
|
|
18
|
+
GPT4_TURBO = ("gpt-4-turbo", 10.00, 30.00)
|
|
19
|
+
|
|
20
|
+
# Anthropic
|
|
21
|
+
CLAUDE_4_OPUS = ("claude-4-opus", 15.00, 75.00)
|
|
22
|
+
CLAUDE_4_SONNET = ("claude-4-sonnet", 3.00, 15.00)
|
|
23
|
+
|
|
24
|
+
# NVIDIA
|
|
25
|
+
LLAMA_70B = ("meta/llama-3.1-70b-instruct", 0.70, 0.70)
|
|
26
|
+
LLAMA_405B = ("meta/llama-3.1-405b-instruct", 2.70, 2.70)
|
|
27
|
+
|
|
28
|
+
# Default (unknown model)
|
|
29
|
+
DEFAULT = ("default", 1.00, 1.00)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class CostEstimate:
|
|
34
|
+
"""Estimated cost for a completion."""
|
|
35
|
+
model: str
|
|
36
|
+
input_tokens: int
|
|
37
|
+
output_tokens: int
|
|
38
|
+
output_tokens_full: int # If no early exit
|
|
39
|
+
cost_actual_usd: float
|
|
40
|
+
cost_full_usd: float
|
|
41
|
+
cost_saved_usd: float
|
|
42
|
+
savings_percent: float
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class CostTracker:
|
|
46
|
+
"""
|
|
47
|
+
Track token usage and calculate cost savings.
|
|
48
|
+
|
|
49
|
+
Usage:
|
|
50
|
+
tracker = CostTracker(model="gpt-4o")
|
|
51
|
+
tracker.track_input(100) # 100 input tokens
|
|
52
|
+
tracker.track_output(50) # 50 output tokens
|
|
53
|
+
tracker.set_full_estimate(150) # Would have been 150 output tokens
|
|
54
|
+
|
|
55
|
+
estimate = tracker.get_estimate()
|
|
56
|
+
print(f"Saved ${estimate.cost_saved_usd:.4f}")
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
# Model name to pricing tier mapping
|
|
60
|
+
MODEL_ALIASES = {
|
|
61
|
+
# OpenAI
|
|
62
|
+
"gpt-4o": PricingTier.GPT4O,
|
|
63
|
+
"gpt-4o-mini": PricingTier.GPT4O_MINI,
|
|
64
|
+
"gpt-4-turbo": PricingTier.GPT4_TURBO,
|
|
65
|
+
"gpt-4-turbo-preview": PricingTier.GPT4_TURBO,
|
|
66
|
+
|
|
67
|
+
# Anthropic
|
|
68
|
+
"claude-4-opus": PricingTier.CLAUDE_4_OPUS,
|
|
69
|
+
"claude-opus-4": PricingTier.CLAUDE_4_OPUS,
|
|
70
|
+
"claude-4-sonnet": PricingTier.CLAUDE_4_SONNET,
|
|
71
|
+
"claude-sonnet-4": PricingTier.CLAUDE_4_SONNET,
|
|
72
|
+
|
|
73
|
+
# NVIDIA / Meta
|
|
74
|
+
"meta/llama-3.1-70b-instruct": PricingTier.LLAMA_70B,
|
|
75
|
+
"llama-3.1-70b": PricingTier.LLAMA_70B,
|
|
76
|
+
"meta/llama-3.1-405b-instruct": PricingTier.LLAMA_405B,
|
|
77
|
+
"llama-3.1-405b": PricingTier.LLAMA_405B,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
def __init__(
|
|
81
|
+
self,
|
|
82
|
+
model: str = "default",
|
|
83
|
+
custom_pricing: Optional[tuple] = None
|
|
84
|
+
):
|
|
85
|
+
"""
|
|
86
|
+
Initialize cost tracker.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
model: Model name (e.g., "gpt-4o", "claude-4-sonnet")
|
|
90
|
+
custom_pricing: Optional (input_price, output_price) per 1M tokens
|
|
91
|
+
"""
|
|
92
|
+
self.model = model
|
|
93
|
+
self.input_tokens = 0
|
|
94
|
+
self.output_tokens = 0
|
|
95
|
+
self.estimated_full_output = None
|
|
96
|
+
self._custom_pricing = custom_pricing
|
|
97
|
+
|
|
98
|
+
# Get pricing for model
|
|
99
|
+
if custom_pricing:
|
|
100
|
+
self._input_price, self._output_price = custom_pricing
|
|
101
|
+
else:
|
|
102
|
+
tier = self.MODEL_ALIASES.get(model.lower(), PricingTier.DEFAULT)
|
|
103
|
+
self._input_price, self._output_price = tier.value[1], tier.value[2]
|
|
104
|
+
|
|
105
|
+
def track_input(self, tokens: int):
|
|
106
|
+
"""Track input tokens."""
|
|
107
|
+
self.input_tokens += tokens
|
|
108
|
+
|
|
109
|
+
def track_output(self, tokens: int):
|
|
110
|
+
"""Track output tokens generated."""
|
|
111
|
+
self.output_tokens += tokens
|
|
112
|
+
|
|
113
|
+
def set_full_estimate(self, tokens: int):
|
|
114
|
+
"""Set estimate of what output would have been without early exit."""
|
|
115
|
+
self.estimated_full_output = tokens
|
|
116
|
+
|
|
117
|
+
def estimate_full_output(self, multiplier: float = 2.0) -> int:
|
|
118
|
+
"""
|
|
119
|
+
Auto-estimate full output if not set.
|
|
120
|
+
|
|
121
|
+
Uses a simple multiplier based on observed tokens.
|
|
122
|
+
Default assumes early exit saves ~50%.
|
|
123
|
+
"""
|
|
124
|
+
if self.estimated_full_output:
|
|
125
|
+
return self.estimated_full_output
|
|
126
|
+
return int(self.output_tokens * multiplier)
|
|
127
|
+
|
|
128
|
+
def calculate_cost(self, input_tokens: int, output_tokens: int) -> float:
|
|
129
|
+
"""Calculate cost for given token counts."""
|
|
130
|
+
input_cost = (input_tokens / 1_000_000) * self._input_price
|
|
131
|
+
output_cost = (output_tokens / 1_000_000) * self._output_price
|
|
132
|
+
return input_cost + output_cost
|
|
133
|
+
|
|
134
|
+
def get_estimate(self) -> CostEstimate:
|
|
135
|
+
"""Get cost estimate with savings calculation."""
|
|
136
|
+
full_output = self.estimate_full_output()
|
|
137
|
+
|
|
138
|
+
cost_actual = self.calculate_cost(self.input_tokens, self.output_tokens)
|
|
139
|
+
cost_full = self.calculate_cost(self.input_tokens, full_output)
|
|
140
|
+
cost_saved = cost_full - cost_actual
|
|
141
|
+
|
|
142
|
+
if cost_full > 0:
|
|
143
|
+
savings_pct = (cost_saved / cost_full) * 100
|
|
144
|
+
else:
|
|
145
|
+
savings_pct = 0.0
|
|
146
|
+
|
|
147
|
+
return CostEstimate(
|
|
148
|
+
model=self.model,
|
|
149
|
+
input_tokens=self.input_tokens,
|
|
150
|
+
output_tokens=self.output_tokens,
|
|
151
|
+
output_tokens_full=full_output,
|
|
152
|
+
cost_actual_usd=cost_actual,
|
|
153
|
+
cost_full_usd=cost_full,
|
|
154
|
+
cost_saved_usd=cost_saved,
|
|
155
|
+
savings_percent=savings_pct
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def reset(self):
|
|
159
|
+
"""Reset tracking for new request."""
|
|
160
|
+
self.input_tokens = 0
|
|
161
|
+
self.output_tokens = 0
|
|
162
|
+
self.estimated_full_output = None
|
|
163
|
+
|
|
164
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
165
|
+
"""Get current stats as dict."""
|
|
166
|
+
estimate = self.get_estimate()
|
|
167
|
+
return {
|
|
168
|
+
"model": estimate.model,
|
|
169
|
+
"input_tokens": estimate.input_tokens,
|
|
170
|
+
"output_tokens": estimate.output_tokens,
|
|
171
|
+
"output_tokens_full": estimate.output_tokens_full,
|
|
172
|
+
"tokens_saved": estimate.output_tokens_full - estimate.output_tokens,
|
|
173
|
+
"cost_actual_usd": estimate.cost_actual_usd,
|
|
174
|
+
"cost_full_usd": estimate.cost_full_usd,
|
|
175
|
+
"cost_saved_usd": estimate.cost_saved_usd,
|
|
176
|
+
"savings_percent": estimate.savings_percent,
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
# Convenience function for quick estimates
|
|
181
|
+
def estimate_savings(
|
|
182
|
+
model: str,
|
|
183
|
+
tokens_generated: int,
|
|
184
|
+
tokens_if_full: int,
|
|
185
|
+
input_tokens: int = 0
|
|
186
|
+
) -> CostEstimate:
|
|
187
|
+
"""
|
|
188
|
+
Quick estimate of cost savings.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
model: Model name
|
|
192
|
+
tokens_generated: Actual tokens generated (with early exit)
|
|
193
|
+
tokens_if_full: Tokens that would have been generated without early exit
|
|
194
|
+
input_tokens: Input prompt tokens
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
CostEstimate with savings details
|
|
198
|
+
"""
|
|
199
|
+
tracker = CostTracker(model)
|
|
200
|
+
tracker.track_input(input_tokens)
|
|
201
|
+
tracker.track_output(tokens_generated)
|
|
202
|
+
tracker.set_full_estimate(tokens_if_full)
|
|
203
|
+
return tracker.get_estimate()
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def format_cost_report(estimate: CostEstimate) -> str:
|
|
207
|
+
"""Format a human-readable cost report."""
|
|
208
|
+
lines = [
|
|
209
|
+
f"📊 Cost Report for {estimate.model}",
|
|
210
|
+
f"",
|
|
211
|
+
f" Input tokens: {estimate.input_tokens:,}",
|
|
212
|
+
f" Output tokens: {estimate.output_tokens:,} (actual)",
|
|
213
|
+
f" {estimate.output_tokens_full:,} (if no early exit)",
|
|
214
|
+
f" Tokens saved: {estimate.output_tokens_full - estimate.output_tokens:,}",
|
|
215
|
+
f"",
|
|
216
|
+
f" Cost actual: ${estimate.cost_actual_usd:.6f}",
|
|
217
|
+
f" Cost if full: ${estimate.cost_full_usd:.6f}",
|
|
218
|
+
f" 💰 Cost saved: ${estimate.cost_saved_usd:.6f} ({estimate.savings_percent:.1f}%)",
|
|
219
|
+
]
|
|
220
|
+
return "\n".join(lines)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
if __name__ == "__main__":
|
|
224
|
+
# Demo
|
|
225
|
+
estimate = estimate_savings(
|
|
226
|
+
model="gpt-4o",
|
|
227
|
+
tokens_generated=82,
|
|
228
|
+
tokens_if_full=150,
|
|
229
|
+
input_tokens=50
|
|
230
|
+
)
|
|
231
|
+
print(format_cost_report(estimate))
|
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Real-time entropy visualization dashboard.
|
|
3
|
+
|
|
4
|
+
Run with: entroplain-dashboard --port 8765
|
|
5
|
+
Then open: http://localhost:8050
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Dict, List, Any, Optional
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from fastapi import FastAPI, WebSocket
|
|
14
|
+
from fastapi.responses import HTMLResponse
|
|
15
|
+
import uvicorn
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class DashboardConfig:
|
|
20
|
+
"""Configuration for the dashboard."""
|
|
21
|
+
port: int = 8050
|
|
22
|
+
proxy_port: int = 8765
|
|
23
|
+
update_interval_ms: int = 100
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# HTML template for the dashboard
|
|
27
|
+
DASHBOARD_HTML = """
|
|
28
|
+
<!DOCTYPE html>
|
|
29
|
+
<html>
|
|
30
|
+
<head>
|
|
31
|
+
<title>Entroplain Dashboard</title>
|
|
32
|
+
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
|
33
|
+
<style>
|
|
34
|
+
* { box-sizing: border-box; margin: 0; padding: 0; }
|
|
35
|
+
body {
|
|
36
|
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
|
37
|
+
background: #0a0a0a;
|
|
38
|
+
color: #e0e0e0;
|
|
39
|
+
padding: 20px;
|
|
40
|
+
}
|
|
41
|
+
.container { max-width: 1200px; margin: 0 auto; }
|
|
42
|
+
h1 {
|
|
43
|
+
font-size: 24px;
|
|
44
|
+
margin-bottom: 20px;
|
|
45
|
+
color: #4ade80;
|
|
46
|
+
}
|
|
47
|
+
.grid {
|
|
48
|
+
display: grid;
|
|
49
|
+
grid-template-columns: 1fr 300px;
|
|
50
|
+
gap: 20px;
|
|
51
|
+
}
|
|
52
|
+
.chart-container {
|
|
53
|
+
background: #1a1a1a;
|
|
54
|
+
border-radius: 8px;
|
|
55
|
+
padding: 20px;
|
|
56
|
+
}
|
|
57
|
+
.stats-panel {
|
|
58
|
+
display: flex;
|
|
59
|
+
flex-direction: column;
|
|
60
|
+
gap: 15px;
|
|
61
|
+
}
|
|
62
|
+
.stat-card {
|
|
63
|
+
background: #1a1a1a;
|
|
64
|
+
border-radius: 8px;
|
|
65
|
+
padding: 15px;
|
|
66
|
+
}
|
|
67
|
+
.stat-label {
|
|
68
|
+
font-size: 12px;
|
|
69
|
+
color: #888;
|
|
70
|
+
text-transform: uppercase;
|
|
71
|
+
letter-spacing: 0.05em;
|
|
72
|
+
}
|
|
73
|
+
.stat-value {
|
|
74
|
+
font-size: 32px;
|
|
75
|
+
font-weight: 600;
|
|
76
|
+
color: #fff;
|
|
77
|
+
margin-top: 5px;
|
|
78
|
+
}
|
|
79
|
+
.stat-value.savings { color: #4ade80; }
|
|
80
|
+
.stat-value.cost { color: #fbbf24; }
|
|
81
|
+
.valleys { color: #60a5fa; }
|
|
82
|
+
.status-badge {
|
|
83
|
+
display: inline-block;
|
|
84
|
+
padding: 4px 12px;
|
|
85
|
+
border-radius: 12px;
|
|
86
|
+
font-size: 12px;
|
|
87
|
+
font-weight: 500;
|
|
88
|
+
}
|
|
89
|
+
.status-active { background: #22c55e; color: #000; }
|
|
90
|
+
.status-idle { background: #374151; color: #888; }
|
|
91
|
+
.status-exited { background: #f59e0b; color: #000; }
|
|
92
|
+
#status { margin-top: 10px; }
|
|
93
|
+
.legend {
|
|
94
|
+
display: flex;
|
|
95
|
+
gap: 20px;
|
|
96
|
+
margin-top: 15px;
|
|
97
|
+
font-size: 12px;
|
|
98
|
+
}
|
|
99
|
+
.legend-item {
|
|
100
|
+
display: flex;
|
|
101
|
+
align-items: center;
|
|
102
|
+
gap: 5px;
|
|
103
|
+
}
|
|
104
|
+
.legend-dot {
|
|
105
|
+
width: 10px;
|
|
106
|
+
height: 10px;
|
|
107
|
+
border-radius: 50%;
|
|
108
|
+
}
|
|
109
|
+
.dot-entropy { background: #60a5fa; }
|
|
110
|
+
.dot-valley { background: #f59e0b; }
|
|
111
|
+
.dot-threshold { background: #ef4444; }
|
|
112
|
+
</style>
|
|
113
|
+
</head>
|
|
114
|
+
<body>
|
|
115
|
+
<div class="container">
|
|
116
|
+
<h1>🎯 Entroplain Dashboard</h1>
|
|
117
|
+
|
|
118
|
+
<div class="grid">
|
|
119
|
+
<div class="chart-container">
|
|
120
|
+
<canvas id="entropyChart" height="300"></canvas>
|
|
121
|
+
<div class="legend">
|
|
122
|
+
<div class="legend-item">
|
|
123
|
+
<div class="legend-dot dot-entropy"></div>
|
|
124
|
+
<span>Entropy</span>
|
|
125
|
+
</div>
|
|
126
|
+
<div class="legend-item">
|
|
127
|
+
<div class="legend-dot dot-valley"></div>
|
|
128
|
+
<span>Valley Detected</span>
|
|
129
|
+
</div>
|
|
130
|
+
<div class="legend-item">
|
|
131
|
+
<div class="legend-dot dot-threshold"></div>
|
|
132
|
+
<span>Threshold (0.15)</span>
|
|
133
|
+
</div>
|
|
134
|
+
</div>
|
|
135
|
+
</div>
|
|
136
|
+
|
|
137
|
+
<div class="stats-panel">
|
|
138
|
+
<div class="stat-card">
|
|
139
|
+
<div class="stat-label">Status</div>
|
|
140
|
+
<div id="status">
|
|
141
|
+
<span class="status-badge status-idle">Idle</span>
|
|
142
|
+
</div>
|
|
143
|
+
</div>
|
|
144
|
+
|
|
145
|
+
<div class="stat-card">
|
|
146
|
+
<div class="stat-label">Tokens Generated</div>
|
|
147
|
+
<div class="stat-value" id="tokens">0</div>
|
|
148
|
+
</div>
|
|
149
|
+
|
|
150
|
+
<div class="stat-card">
|
|
151
|
+
<div class="stat-label">Valleys Detected</div>
|
|
152
|
+
<div class="stat-value valleys" id="valleys">0</div>
|
|
153
|
+
</div>
|
|
154
|
+
|
|
155
|
+
<div class="stat-card">
|
|
156
|
+
<div class="stat-label">Current Entropy</div>
|
|
157
|
+
<div class="stat-value" id="currentEntropy">-</div>
|
|
158
|
+
</div>
|
|
159
|
+
|
|
160
|
+
<div class="stat-card">
|
|
161
|
+
<div class="stat-label">Mean Entropy</div>
|
|
162
|
+
<div class="stat-value" id="meanEntropy">-</div>
|
|
163
|
+
</div>
|
|
164
|
+
|
|
165
|
+
<div class="stat-card">
|
|
166
|
+
<div class="stat-label">Tokens Saved</div>
|
|
167
|
+
<div class="stat-value savings" id="saved">0%</div>
|
|
168
|
+
</div>
|
|
169
|
+
|
|
170
|
+
<div class="stat-card">
|
|
171
|
+
<div class="stat-label">Cost Saved</div>
|
|
172
|
+
<div class="stat-value cost" id="costSaved">$0.00</div>
|
|
173
|
+
</div>
|
|
174
|
+
</div>
|
|
175
|
+
</div>
|
|
176
|
+
</div>
|
|
177
|
+
|
|
178
|
+
<script>
|
|
179
|
+
const ctx = document.getElementById('entropyChart').getContext('2d');
|
|
180
|
+
|
|
181
|
+
const chart = new Chart(ctx, {
|
|
182
|
+
type: 'line',
|
|
183
|
+
data: {
|
|
184
|
+
labels: [],
|
|
185
|
+
datasets: [
|
|
186
|
+
{
|
|
187
|
+
label: 'Entropy',
|
|
188
|
+
data: [],
|
|
189
|
+
borderColor: '#60a5fa',
|
|
190
|
+
backgroundColor: 'rgba(96, 165, 250, 0.1)',
|
|
191
|
+
fill: true,
|
|
192
|
+
tension: 0.3,
|
|
193
|
+
pointRadius: 0,
|
|
194
|
+
},
|
|
195
|
+
{
|
|
196
|
+
label: 'Threshold',
|
|
197
|
+
data: [],
|
|
198
|
+
borderColor: '#ef4444',
|
|
199
|
+
borderDash: [5, 5],
|
|
200
|
+
pointRadius: 0,
|
|
201
|
+
fill: false,
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
label: 'Valleys',
|
|
205
|
+
data: [],
|
|
206
|
+
borderColor: '#f59e0b',
|
|
207
|
+
pointBackgroundColor: '#f59e0b',
|
|
208
|
+
pointRadius: 6,
|
|
209
|
+
showLine: false,
|
|
210
|
+
}
|
|
211
|
+
]
|
|
212
|
+
},
|
|
213
|
+
options: {
|
|
214
|
+
responsive: true,
|
|
215
|
+
maintainAspectRatio: false,
|
|
216
|
+
animation: { duration: 0 },
|
|
217
|
+
scales: {
|
|
218
|
+
x: {
|
|
219
|
+
title: { display: true, text: 'Tokens', color: '#888' },
|
|
220
|
+
grid: { color: '#333' },
|
|
221
|
+
ticks: { color: '#888' }
|
|
222
|
+
},
|
|
223
|
+
y: {
|
|
224
|
+
title: { display: true, text: 'Entropy (bits)', color: '#888' },
|
|
225
|
+
min: 0,
|
|
226
|
+
max: 1,
|
|
227
|
+
grid: { color: '#333' },
|
|
228
|
+
ticks: { color: '#888' }
|
|
229
|
+
}
|
|
230
|
+
},
|
|
231
|
+
plugins: {
|
|
232
|
+
legend: { display: false }
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
});
|
|
236
|
+
|
|
237
|
+
const ws = new WebSocket(`ws://${location.host}/ws`);
|
|
238
|
+
|
|
239
|
+
ws.onmessage = (event) => {
|
|
240
|
+
const data = JSON.parse(event.data);
|
|
241
|
+
updateChart(data);
|
|
242
|
+
updateStats(data);
|
|
243
|
+
};
|
|
244
|
+
|
|
245
|
+
function updateChart(data) {
|
|
246
|
+
const labels = data.trajectory.map((_, i) => i);
|
|
247
|
+
const entropies = data.trajectory.map(p => p.entropy);
|
|
248
|
+
const threshold = data.trajectory.map(() => data.threshold || 0.15);
|
|
249
|
+
|
|
250
|
+
// Mark valleys
|
|
251
|
+
const valleyPoints = data.trajectory.map(p =>
|
|
252
|
+
p.is_valley ? p.entropy : null
|
|
253
|
+
);
|
|
254
|
+
|
|
255
|
+
chart.data.labels = labels;
|
|
256
|
+
chart.data.datasets[0].data = entropies;
|
|
257
|
+
chart.data.datasets[1].data = threshold;
|
|
258
|
+
chart.data.datasets[2].data = valleyPoints;
|
|
259
|
+
chart.update();
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
function updateStats(data) {
|
|
263
|
+
document.getElementById('tokens').textContent = data.token_count;
|
|
264
|
+
document.getElementById('valleys').textContent = data.valley_count;
|
|
265
|
+
document.getElementById('currentEntropy').textContent =
|
|
266
|
+
data.current_entropy ? data.current_entropy.toFixed(3) : '-';
|
|
267
|
+
document.getElementById('meanEntropy').textContent =
|
|
268
|
+
data.mean_entropy ? data.mean_entropy.toFixed(3) : '-';
|
|
269
|
+
|
|
270
|
+
// Calculate savings
|
|
271
|
+
if (data.exited_early) {
|
|
272
|
+
const savedPct = Math.round((data.tokens_saved / data.tokens_total) * 100);
|
|
273
|
+
document.getElementById('saved').textContent = savedPct + '%';
|
|
274
|
+
document.getElementById('costSaved').textContent = '$' + data.cost_saved.toFixed(4);
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
// Update status
|
|
278
|
+
const statusEl = document.getElementById('status');
|
|
279
|
+
if (data.exited_early) {
|
|
280
|
+
statusEl.innerHTML = '<span class="status-badge status-exited">Exited Early</span>';
|
|
281
|
+
} else if (data.active) {
|
|
282
|
+
statusEl.innerHTML = '<span class="status-badge status-active">Active</span>';
|
|
283
|
+
} else {
|
|
284
|
+
statusEl.innerHTML = '<span class="status-badge status-idle">Idle</span>';
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
</script>
|
|
288
|
+
</body>
|
|
289
|
+
</html>
|
|
290
|
+
"""
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
class Dashboard:
|
|
294
|
+
"""Real-time dashboard server."""
|
|
295
|
+
|
|
296
|
+
def __init__(self, config: DashboardConfig):
|
|
297
|
+
self.config = config
|
|
298
|
+
self.app = FastAPI(title="Entroplain Dashboard")
|
|
299
|
+
self._websocket_clients: List[WebSocket] = []
|
|
300
|
+
self._current_data: Dict[str, Any] = {
|
|
301
|
+
"trajectory": [],
|
|
302
|
+
"token_count": 0,
|
|
303
|
+
"valley_count": 0,
|
|
304
|
+
"current_entropy": 0,
|
|
305
|
+
"mean_entropy": 0,
|
|
306
|
+
"active": False,
|
|
307
|
+
"exited_early": False,
|
|
308
|
+
}
|
|
309
|
+
self._setup_routes()
|
|
310
|
+
|
|
311
|
+
def _setup_routes(self):
|
|
312
|
+
@self.app.get("/")
|
|
313
|
+
async def root():
|
|
314
|
+
return HTMLResponse(content=DASHBOARD_HTML)
|
|
315
|
+
|
|
316
|
+
@self.app.websocket("/ws")
|
|
317
|
+
async def websocket_endpoint(websocket: WebSocket):
|
|
318
|
+
await websocket.accept()
|
|
319
|
+
self._websocket_clients.append(websocket)
|
|
320
|
+
try:
|
|
321
|
+
while True:
|
|
322
|
+
# Keep connection alive
|
|
323
|
+
data = await websocket.receive_text()
|
|
324
|
+
except Exception:
|
|
325
|
+
self._websocket_clients.remove(websocket)
|
|
326
|
+
|
|
327
|
+
async def broadcast_update(self, data: Dict[str, Any]):
|
|
328
|
+
"""Broadcast entropy data to all connected clients."""
|
|
329
|
+
self._current_data = data
|
|
330
|
+
for client in self._websocket_clients:
|
|
331
|
+
try:
|
|
332
|
+
await client.send_json(data)
|
|
333
|
+
except Exception:
|
|
334
|
+
self._websocket_clients.remove(client)
|
|
335
|
+
|
|
336
|
+
def run(self):
|
|
337
|
+
"""Start the dashboard server."""
|
|
338
|
+
uvicorn.run(self.app, host="0.0.0.0", port=self.config.port)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
def main():
|
|
342
|
+
"""CLI entry point for the dashboard."""
|
|
343
|
+
import argparse
|
|
344
|
+
|
|
345
|
+
parser = argparse.ArgumentParser(description="Entroplain Dashboard")
|
|
346
|
+
parser.add_argument("--port", type=int, default=8050, help="Dashboard port")
|
|
347
|
+
parser.add_argument("--proxy-port", type=int, default=8765, help="Proxy port to monitor")
|
|
348
|
+
args = parser.parse_args()
|
|
349
|
+
|
|
350
|
+
config = DashboardConfig(port=args.port, proxy_port=args.proxy_port)
|
|
351
|
+
dashboard = Dashboard(config)
|
|
352
|
+
|
|
353
|
+
print(f"""
|
|
354
|
+
==============================================================
|
|
355
|
+
ENTROPPLAIN DASHBOARD
|
|
356
|
+
==============================================================
|
|
357
|
+
Dashboard: http://localhost:{args.port}
|
|
358
|
+
Monitoring proxy on port {args.proxy_port}
|
|
359
|
+
==============================================================
|
|
360
|
+
Open the dashboard to see real-time entropy visualization
|
|
361
|
+
==============================================================
|
|
362
|
+
""")
|
|
363
|
+
|
|
364
|
+
dashboard.run()
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
if __name__ == "__main__":
|
|
368
|
+
main()
|