@kernel.chat/kbot 3.88.0 → 3.93.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/tools/audio-engine.d.ts +72 -0
- package/dist/tools/audio-engine.js +426 -0
- package/dist/tools/evolution-engine.d.ts +102 -0
- package/dist/tools/evolution-engine.js +746 -0
- package/dist/tools/index.js +6 -0
- package/dist/tools/living-world.d.ts +161 -0
- package/dist/tools/living-world.js +1054 -0
- package/dist/tools/narrative-engine.d.ts +58 -0
- package/dist/tools/narrative-engine.js +681 -0
- package/dist/tools/render-engine.js +5 -5
- package/dist/tools/rom-engine.d.ts +130 -0
- package/dist/tools/rom-engine.js +1208 -0
- package/dist/tools/social-engine.d.ts +100 -0
- package/dist/tools/social-engine.js +540 -0
- package/dist/tools/sprite-engine.js +40 -26
- package/dist/tools/stream-brain.js +4 -4
- package/dist/tools/stream-intelligence.d.ts +6 -0
- package/dist/tools/stream-intelligence.js +239 -49
- package/dist/tools/stream-renderer.js +540 -519
- package/dist/tools/stream-self-eval.d.ts +96 -0
- package/dist/tools/stream-self-eval.js +764 -0
- package/dist/tools/tile-world.d.ts +40 -0
- package/dist/tools/tile-world.js +1070 -0
- package/package.json +1 -1
|
@@ -0,0 +1,764 @@
|
|
|
1
|
+
// kbot Stream Self-Evaluation — analyzes own rendered frames and auto-adjusts visual quality
|
|
2
|
+
//
|
|
3
|
+
// Three-method active inference loop:
|
|
4
|
+
// 1. Rule-based frame analysis (every 30 seconds / 180 frames at 6fps)
|
|
5
|
+
// 2. LLM deep evaluation via Gemma 4 on Ollama (every 5 minutes / 1800 frames)
|
|
6
|
+
// 3. Engagement-based learning (correlates config with chat rate)
|
|
7
|
+
//
|
|
8
|
+
// The stream looks at itself and constantly self-improves.
|
|
9
|
+
import { registerTool } from './index.js';
|
|
10
|
+
function samplePixels(imageData, count) {
|
|
11
|
+
const pixels = [];
|
|
12
|
+
const { data, width, height } = imageData;
|
|
13
|
+
for (let i = 0; i < count; i++) {
|
|
14
|
+
const x = Math.floor(Math.random() * width);
|
|
15
|
+
const y = Math.floor(Math.random() * height);
|
|
16
|
+
const idx = (y * width + x) * 4;
|
|
17
|
+
pixels.push({ r: data[idx], g: data[idx + 1], b: data[idx + 2] });
|
|
18
|
+
}
|
|
19
|
+
return pixels;
|
|
20
|
+
}
|
|
21
|
+
function sampleRegion(imageData, rx, ry, rw, rh, count) {
|
|
22
|
+
const pixels = [];
|
|
23
|
+
const { data, width } = imageData;
|
|
24
|
+
for (let i = 0; i < count; i++) {
|
|
25
|
+
const x = Math.floor(rx + Math.random() * rw);
|
|
26
|
+
const y = Math.floor(ry + Math.random() * rh);
|
|
27
|
+
if (x < 0 || x >= imageData.width || y < 0 || y >= imageData.height)
|
|
28
|
+
continue;
|
|
29
|
+
const idx = (y * width + x) * 4;
|
|
30
|
+
pixels.push({ r: data[idx], g: data[idx + 1], b: data[idx + 2] });
|
|
31
|
+
}
|
|
32
|
+
return pixels;
|
|
33
|
+
}
|
|
34
|
+
function pixelBrightness(r, g, b) {
|
|
35
|
+
return (r * 0.299 + g * 0.587 + b * 0.114) / 255;
|
|
36
|
+
}
|
|
37
|
+
function avgBrightness(pixels) {
|
|
38
|
+
if (pixels.length === 0)
|
|
39
|
+
return 0.5;
|
|
40
|
+
let total = 0;
|
|
41
|
+
for (const p of pixels)
|
|
42
|
+
total += pixelBrightness(p.r, p.g, p.b);
|
|
43
|
+
return total / pixels.length;
|
|
44
|
+
}
|
|
45
|
+
function colorContrast(c1, c2) {
|
|
46
|
+
const b1 = pixelBrightness(c1.r, c1.g, c1.b);
|
|
47
|
+
const b2 = pixelBrightness(c2.r, c2.g, c2.b);
|
|
48
|
+
return Math.abs(b1 - b2);
|
|
49
|
+
}
|
|
50
|
+
function avgColor(pixels) {
|
|
51
|
+
if (pixels.length === 0)
|
|
52
|
+
return { r: 128, g: 128, b: 128 };
|
|
53
|
+
let rT = 0, gT = 0, bT = 0;
|
|
54
|
+
for (const p of pixels) {
|
|
55
|
+
rT += p.r;
|
|
56
|
+
gT += p.g;
|
|
57
|
+
bT += p.b;
|
|
58
|
+
}
|
|
59
|
+
return { r: Math.round(rT / pixels.length), g: Math.round(gT / pixels.length), b: Math.round(bT / pixels.length) };
|
|
60
|
+
}
|
|
61
|
+
function rgbToHue(r, g, b) {
|
|
62
|
+
const rn = r / 255, gn = g / 255, bn = b / 255;
|
|
63
|
+
const max = Math.max(rn, gn, bn), min = Math.min(rn, gn, bn);
|
|
64
|
+
if (max === min)
|
|
65
|
+
return 0;
|
|
66
|
+
let h = 0;
|
|
67
|
+
const d = max - min;
|
|
68
|
+
if (max === rn)
|
|
69
|
+
h = ((gn - bn) / d + (gn < bn ? 6 : 0)) / 6;
|
|
70
|
+
else if (max === gn)
|
|
71
|
+
h = ((bn - rn) / d + 2) / 6;
|
|
72
|
+
else
|
|
73
|
+
h = ((rn - gn) / d + 4) / 6;
|
|
74
|
+
return h;
|
|
75
|
+
}
|
|
76
|
+
// ─── Default Config ──────────────────────────────────────────────
|
|
77
|
+
function defaultConfig() {
|
|
78
|
+
return {
|
|
79
|
+
robotScale: 10,
|
|
80
|
+
robotScreenYPercent: 0.55,
|
|
81
|
+
headerOpacity: 0.7,
|
|
82
|
+
chatOpacity: 0.6,
|
|
83
|
+
chatFadeSeconds: 10,
|
|
84
|
+
skyBrightness: 0.5,
|
|
85
|
+
ambientLight: 0.5,
|
|
86
|
+
borderWidth: 1,
|
|
87
|
+
speechBubbleWidth: 500,
|
|
88
|
+
particleDensity: 1.0,
|
|
89
|
+
bloomIntensity: 0.15,
|
|
90
|
+
vignetteStrength: 0.25,
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
// ─── Initialize ──────────────────────────────────────────────────
|
|
94
|
+
export function initStreamEval() {
|
|
95
|
+
return {
|
|
96
|
+
lastEvalFrame: 0,
|
|
97
|
+
lastDeepEvalFrame: 0,
|
|
98
|
+
evalInterval: 180, // 30 seconds at 6fps
|
|
99
|
+
deepEvalInterval: 1800, // 5 minutes at 6fps
|
|
100
|
+
currentConfig: defaultConfig(),
|
|
101
|
+
configHistory: [],
|
|
102
|
+
issuesFound: [],
|
|
103
|
+
adjustmentsMade: [],
|
|
104
|
+
totalEvaluations: 0,
|
|
105
|
+
totalDeepEvaluations: 0,
|
|
106
|
+
lastAnalysis: null,
|
|
107
|
+
lastLLMEval: null,
|
|
108
|
+
announcementQueue: [],
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
// ─── Should Evaluate ─────────────────────────────────────────────
|
|
112
|
+
export function shouldEvaluate(evaluation, frame) {
|
|
113
|
+
return frame - evaluation.lastEvalFrame >= evaluation.evalInterval;
|
|
114
|
+
}
|
|
115
|
+
export function shouldDeepEvaluate(evaluation, frame) {
|
|
116
|
+
return frame - evaluation.lastDeepEvalFrame >= evaluation.deepEvalInterval;
|
|
117
|
+
}
|
|
118
|
+
// ─── Method 1: Rule-Based Frame Analysis ─────────────────────────
|
|
119
|
+
export function analyzeFrame(imageData, config, robotX, robotY, robotScale, chatMessageCount, mood) {
|
|
120
|
+
const { width, height } = imageData;
|
|
121
|
+
const issues = [];
|
|
122
|
+
const suggestions = [];
|
|
123
|
+
// ── Robot visibility: compare robot region to surrounding background ──
|
|
124
|
+
const robotW = 32 * robotScale;
|
|
125
|
+
const robotH = 48 * robotScale;
|
|
126
|
+
const robotPixels = sampleRegion(imageData, robotX, robotY, robotW, robotH, 30);
|
|
127
|
+
// Sample background around the robot (above, left, right)
|
|
128
|
+
const bgAbove = sampleRegion(imageData, robotX - 40, Math.max(0, robotY - 60), robotW + 80, 50, 20);
|
|
129
|
+
const bgLeft = sampleRegion(imageData, Math.max(0, robotX - 80), robotY, 60, robotH, 15);
|
|
130
|
+
const bgRight = sampleRegion(imageData, robotX + robotW + 20, robotY, 60, robotH, 15);
|
|
131
|
+
const bgPixels = [...bgAbove, ...bgLeft, ...bgRight];
|
|
132
|
+
const robotAvg = avgColor(robotPixels);
|
|
133
|
+
const bgAvg = avgColor(bgPixels);
|
|
134
|
+
const robotVisibility = Math.min(1, colorContrast(robotAvg, bgAvg) * 2.5); // Scale up since robot is small
|
|
135
|
+
if (robotVisibility < 0.3)
|
|
136
|
+
issues.push('Robot is hard to see against background');
|
|
137
|
+
if (robotVisibility < 0.15)
|
|
138
|
+
issues.push('Robot is nearly invisible');
|
|
139
|
+
// ── Brightness balance: sample across the whole frame ──
|
|
140
|
+
const frameSample = samplePixels(imageData, 100);
|
|
141
|
+
const brightness = avgBrightness(frameSample);
|
|
142
|
+
// Transform: 0.0=too dark, 0.5=ideal, 1.0=too bright
|
|
143
|
+
// Map so that 0.3-0.5 brightness is ideal (score=1), extremes are 0
|
|
144
|
+
const brightnessBalance = brightness < 0.1 ? brightness / 0.1 * 0.3
|
|
145
|
+
: brightness < 0.3 ? 0.3 + (brightness - 0.1) / 0.2 * 0.7
|
|
146
|
+
: brightness < 0.6 ? 1.0
|
|
147
|
+
: brightness < 0.8 ? 1.0 - (brightness - 0.6) / 0.2 * 0.5
|
|
148
|
+
: Math.max(0, 0.5 - (brightness - 0.8) * 2.5);
|
|
149
|
+
if (brightness < 0.15)
|
|
150
|
+
issues.push('Frame is too dark');
|
|
151
|
+
if (brightness > 0.75)
|
|
152
|
+
issues.push('Frame is too bright/washed out');
|
|
153
|
+
// ── Color variety: count unique hue buckets (12 segments) ──
|
|
154
|
+
const hueSample = samplePixels(imageData, 50);
|
|
155
|
+
const hueBuckets = new Set();
|
|
156
|
+
for (const p of hueSample) {
|
|
157
|
+
const sat = (Math.max(p.r, p.g, p.b) - Math.min(p.r, p.g, p.b)) / 255;
|
|
158
|
+
if (sat > 0.1) { // Only count saturated pixels
|
|
159
|
+
const hue = rgbToHue(p.r, p.g, p.b);
|
|
160
|
+
hueBuckets.add(Math.floor(hue * 12));
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
const colorVariety = Math.min(1, hueBuckets.size / 6); // 6+ buckets = max variety
|
|
164
|
+
if (colorVariety < 0.2)
|
|
165
|
+
issues.push('Scene looks monochromatic — very few colors');
|
|
166
|
+
if (colorVariety < 0.35)
|
|
167
|
+
issues.push('Low color variety');
|
|
168
|
+
// ── Sky-to-ground ratio: sample a vertical column at center ──
|
|
169
|
+
const centerX = Math.floor(width / 2);
|
|
170
|
+
const verticalSamples = 40;
|
|
171
|
+
let skyPixelCount = 0;
|
|
172
|
+
for (let i = 0; i < verticalSamples; i++) {
|
|
173
|
+
const y = Math.floor((i / verticalSamples) * height);
|
|
174
|
+
const idx = (y * width + centerX) * 4;
|
|
175
|
+
const r = imageData.data[idx], g = imageData.data[idx + 1], b = imageData.data[idx + 2];
|
|
176
|
+
const br = pixelBrightness(r, g, b);
|
|
177
|
+
// Sky heuristic: upper half, relatively uniform, blue-ish or dark
|
|
178
|
+
// Ground heuristic: lower half, more varied, green/brown tones
|
|
179
|
+
if (y < height * 0.5 && (b > r * 0.8 || br < 0.25)) {
|
|
180
|
+
skyPixelCount++;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
const skyToGroundRatio = skyPixelCount / verticalSamples;
|
|
184
|
+
if (skyToGroundRatio > 0.7)
|
|
185
|
+
issues.push('Too much sky visible — robot might be too low');
|
|
186
|
+
if (skyToGroundRatio < 0.1)
|
|
187
|
+
issues.push('Almost no sky visible — robot might be too high');
|
|
188
|
+
// ── Chat readability: check overlay region contrast ──
|
|
189
|
+
// Chat overlay is at bottom-left: (10, height-200, 400, 150)
|
|
190
|
+
const chatBgPixels = sampleRegion(imageData, 10, height - 200, 400, 150, 20);
|
|
191
|
+
const chatBg = avgColor(chatBgPixels);
|
|
192
|
+
// Chat text is white (#e6edf3), so check contrast with background
|
|
193
|
+
const chatTextColor = { r: 230, g: 237, b: 243 };
|
|
194
|
+
const chatContrast = colorContrast(chatTextColor, chatBg);
|
|
195
|
+
const chatReadability = chatMessageCount > 0
|
|
196
|
+
? Math.min(1, chatContrast * 2.0) // Scale: contrast of 0.5 = fully readable
|
|
197
|
+
: 0.8; // No messages = assume readable
|
|
198
|
+
if (chatReadability < 0.4 && chatMessageCount > 0)
|
|
199
|
+
issues.push('Chat overlay is hard to read');
|
|
200
|
+
// ── Overall score (weighted average) ──
|
|
201
|
+
const overallScore = robotVisibility * 0.30 +
|
|
202
|
+
brightnessBalance * 0.25 +
|
|
203
|
+
colorVariety * 0.15 +
|
|
204
|
+
(skyToGroundRatio > 0.15 && skyToGroundRatio < 0.65 ? 1 : 0.4) * 0.15 +
|
|
205
|
+
chatReadability * 0.15;
|
|
206
|
+
// ── Generate suggestions ──
|
|
207
|
+
if (robotVisibility < 0.4 && config.robotScale < 12) {
|
|
208
|
+
suggestions.push({
|
|
209
|
+
param: 'robotScale',
|
|
210
|
+
oldValue: config.robotScale,
|
|
211
|
+
newValue: Math.min(12, config.robotScale + 1),
|
|
212
|
+
reason: 'Robot is hard to see — increasing scale',
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
if (brightness < 0.2) {
|
|
216
|
+
suggestions.push({
|
|
217
|
+
param: 'ambientLight',
|
|
218
|
+
oldValue: config.ambientLight,
|
|
219
|
+
newValue: Math.min(0.8, config.ambientLight + 0.05),
|
|
220
|
+
reason: 'Scene too dark — raising ambient light',
|
|
221
|
+
});
|
|
222
|
+
suggestions.push({
|
|
223
|
+
param: 'skyBrightness',
|
|
224
|
+
oldValue: config.skyBrightness,
|
|
225
|
+
newValue: Math.min(0.8, config.skyBrightness + 0.05),
|
|
226
|
+
reason: 'Scene too dark — raising sky brightness',
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
if (brightness > 0.7) {
|
|
230
|
+
suggestions.push({
|
|
231
|
+
param: 'ambientLight',
|
|
232
|
+
oldValue: config.ambientLight,
|
|
233
|
+
newValue: Math.max(0.3, config.ambientLight - 0.05),
|
|
234
|
+
reason: 'Scene too bright — reducing ambient light',
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
if (chatReadability < 0.5 && chatMessageCount > 0) {
|
|
238
|
+
suggestions.push({
|
|
239
|
+
param: 'chatOpacity',
|
|
240
|
+
oldValue: config.chatOpacity,
|
|
241
|
+
newValue: Math.min(0.85, config.chatOpacity + 0.05),
|
|
242
|
+
reason: 'Chat hard to read — increasing overlay opacity',
|
|
243
|
+
});
|
|
244
|
+
}
|
|
245
|
+
if (colorVariety < 0.3) {
|
|
246
|
+
suggestions.push({
|
|
247
|
+
param: 'bloomIntensity',
|
|
248
|
+
oldValue: config.bloomIntensity,
|
|
249
|
+
newValue: Math.min(0.4, config.bloomIntensity + 0.05),
|
|
250
|
+
reason: 'Flat colors — adding bloom for color richness',
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
if (skyToGroundRatio > 0.7) {
|
|
254
|
+
suggestions.push({
|
|
255
|
+
param: 'robotScreenYPercent',
|
|
256
|
+
oldValue: config.robotScreenYPercent,
|
|
257
|
+
newValue: Math.max(0.4, config.robotScreenYPercent - 0.03),
|
|
258
|
+
reason: 'Too much sky — moving robot up',
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
if (skyToGroundRatio < 0.15) {
|
|
262
|
+
suggestions.push({
|
|
263
|
+
param: 'robotScreenYPercent',
|
|
264
|
+
oldValue: config.robotScreenYPercent,
|
|
265
|
+
newValue: Math.min(0.65, config.robotScreenYPercent + 0.03),
|
|
266
|
+
reason: 'No sky visible — moving robot down',
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
// Vignette too strong in dark scenes
|
|
270
|
+
if (brightness < 0.25 && config.vignetteStrength > 0.3) {
|
|
271
|
+
suggestions.push({
|
|
272
|
+
param: 'vignetteStrength',
|
|
273
|
+
oldValue: config.vignetteStrength,
|
|
274
|
+
newValue: Math.max(0.1, config.vignetteStrength - 0.05),
|
|
275
|
+
reason: 'Dark scene with heavy vignette — reducing edge darkening',
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
// Robot too big, consuming too much frame
|
|
279
|
+
if (robotVisibility > 0.85 && config.robotScale > 9) {
|
|
280
|
+
suggestions.push({
|
|
281
|
+
param: 'robotScale',
|
|
282
|
+
oldValue: config.robotScale,
|
|
283
|
+
newValue: Math.max(8, config.robotScale - 1),
|
|
284
|
+
reason: 'Robot dominates frame — slightly reducing scale',
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
return {
|
|
288
|
+
robotVisibility,
|
|
289
|
+
brightnessBalance,
|
|
290
|
+
colorVariety,
|
|
291
|
+
skyToGroundRatio,
|
|
292
|
+
chatReadability,
|
|
293
|
+
overallScore,
|
|
294
|
+
issues,
|
|
295
|
+
suggestions,
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
// ─── Method 2: LLM Deep Evaluation via Gemma 4 ──────────────────
|
|
299
|
+
export async function deepEvaluateWithLLM(analysis, config, recentChatRate, mood) {
|
|
300
|
+
const prompt = `You are evaluating a livestream's visual quality. The stream shows a pixel art robot in a 2D tile world.
|
|
301
|
+
|
|
302
|
+
Current state:
|
|
303
|
+
- Robot scale: ${config.robotScale} (visibility score: ${(analysis.robotVisibility * 100).toFixed(0)}%)
|
|
304
|
+
- Robot Y position: ${(config.robotScreenYPercent * 100).toFixed(0)}% down the screen
|
|
305
|
+
- Sky brightness: ${config.skyBrightness.toFixed(2)}
|
|
306
|
+
- Ambient light: ${config.ambientLight.toFixed(2)}
|
|
307
|
+
- Chat overlay opacity: ${config.chatOpacity.toFixed(2)}
|
|
308
|
+
- Frame brightness: ${analysis.brightnessBalance.toFixed(2)}
|
|
309
|
+
- Color variety: ${(analysis.colorVariety * 100).toFixed(0)}% (${analysis.colorVariety < 0.3 ? 'low' : analysis.colorVariety > 0.7 ? 'high' : 'medium'})
|
|
310
|
+
- Sky-to-ground ratio: ${(analysis.skyToGroundRatio * 100).toFixed(0)}%
|
|
311
|
+
- Chat readability: ${(analysis.chatReadability * 100).toFixed(0)}%
|
|
312
|
+
- Chat rate: ${recentChatRate.toFixed(1)} messages/minute
|
|
313
|
+
- Mood: ${mood}
|
|
314
|
+
- Bloom intensity: ${config.bloomIntensity.toFixed(2)}
|
|
315
|
+
- Vignette: ${config.vignetteStrength.toFixed(2)}
|
|
316
|
+
- Particle density: ${config.particleDensity.toFixed(2)}
|
|
317
|
+
- Current issues: ${analysis.issues.length > 0 ? analysis.issues.join(', ') : 'none detected'}
|
|
318
|
+
- Overall score: ${(analysis.overallScore * 100).toFixed(0)}%
|
|
319
|
+
|
|
320
|
+
Rate each aspect 1-10 and suggest ONE specific parameter change to improve the stream.
|
|
321
|
+
Valid parameters: robotScale (8-12), robotScreenYPercent (0.4-0.7), headerOpacity (0.5-0.9), chatOpacity (0.4-0.85), skyBrightness (0.3-0.8), ambientLight (0.3-0.8), bloomIntensity (0.0-0.4), vignetteStrength (0.1-0.5), particleDensity (0.5-2.0), speechBubbleWidth (400-700).
|
|
322
|
+
|
|
323
|
+
Respond ONLY with a JSON object:
|
|
324
|
+
{"robotVisibility":N,"colorBalance":N,"layoutClarity":N,"overallFeel":N,"param":"paramName","value":N,"reason":"one sentence"}`;
|
|
325
|
+
try {
|
|
326
|
+
const res = await fetch('http://localhost:11434/api/generate', {
|
|
327
|
+
method: 'POST',
|
|
328
|
+
headers: { 'Content-Type': 'application/json' },
|
|
329
|
+
body: JSON.stringify({
|
|
330
|
+
model: 'gemma3',
|
|
331
|
+
prompt,
|
|
332
|
+
stream: false,
|
|
333
|
+
options: { temperature: 0.3, num_predict: 200 },
|
|
334
|
+
}),
|
|
335
|
+
});
|
|
336
|
+
if (!res.ok) {
|
|
337
|
+
return fallbackLLMEval(analysis);
|
|
338
|
+
}
|
|
339
|
+
const body = await res.json();
|
|
340
|
+
const raw = body.response || '';
|
|
341
|
+
// Extract JSON from response (may have markdown wrapping)
|
|
342
|
+
const jsonMatch = raw.match(/\{[\s\S]*?\}/);
|
|
343
|
+
if (!jsonMatch)
|
|
344
|
+
return fallbackLLMEval(analysis, raw);
|
|
345
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
346
|
+
const validParams = [
|
|
347
|
+
'robotScale', 'robotScreenYPercent', 'headerOpacity', 'chatOpacity',
|
|
348
|
+
'skyBrightness', 'ambientLight', 'bloomIntensity', 'vignetteStrength',
|
|
349
|
+
'particleDensity', 'speechBubbleWidth', 'chatFadeSeconds', 'borderWidth',
|
|
350
|
+
];
|
|
351
|
+
return {
|
|
352
|
+
robotVisibility: clamp(parsed.robotVisibility ?? 5, 1, 10),
|
|
353
|
+
colorBalance: clamp(parsed.colorBalance ?? 5, 1, 10),
|
|
354
|
+
layoutClarity: clamp(parsed.layoutClarity ?? 5, 1, 10),
|
|
355
|
+
overallFeel: clamp(parsed.overallFeel ?? 5, 1, 10),
|
|
356
|
+
suggestedParam: parsed.param && validParams.includes(parsed.param)
|
|
357
|
+
? parsed.param
|
|
358
|
+
: null,
|
|
359
|
+
suggestedValue: typeof parsed.value === 'number' ? parsed.value : null,
|
|
360
|
+
suggestedReason: parsed.reason || 'No reason given',
|
|
361
|
+
raw,
|
|
362
|
+
};
|
|
363
|
+
}
|
|
364
|
+
catch {
|
|
365
|
+
return fallbackLLMEval(analysis);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
function fallbackLLMEval(analysis, raw = 'Ollama unavailable') {
|
|
369
|
+
return {
|
|
370
|
+
robotVisibility: Math.round(analysis.robotVisibility * 10),
|
|
371
|
+
colorBalance: Math.round(analysis.colorVariety * 10),
|
|
372
|
+
layoutClarity: Math.round((1 - Math.abs(analysis.skyToGroundRatio - 0.35)) * 10),
|
|
373
|
+
overallFeel: Math.round(analysis.overallScore * 10),
|
|
374
|
+
suggestedParam: analysis.suggestions[0]?.param ?? null,
|
|
375
|
+
suggestedValue: analysis.suggestions[0]?.newValue ?? null,
|
|
376
|
+
suggestedReason: analysis.suggestions[0]?.reason ?? 'Using rule-based fallback',
|
|
377
|
+
raw,
|
|
378
|
+
};
|
|
379
|
+
}
|
|
380
|
+
function clamp(v, min, max) {
|
|
381
|
+
return Math.max(min, Math.min(max, v));
|
|
382
|
+
}
|
|
383
|
+
// ─── Config Bounds (enforced on every adjustment) ────────────────
|
|
384
|
+
const CONFIG_BOUNDS = {
|
|
385
|
+
robotScale: [8, 12],
|
|
386
|
+
robotScreenYPercent: [0.4, 0.7],
|
|
387
|
+
headerOpacity: [0.5, 0.9],
|
|
388
|
+
chatOpacity: [0.4, 0.85],
|
|
389
|
+
chatFadeSeconds: [5, 15],
|
|
390
|
+
skyBrightness: [0.3, 0.8],
|
|
391
|
+
ambientLight: [0.3, 0.8],
|
|
392
|
+
borderWidth: [1, 4],
|
|
393
|
+
speechBubbleWidth: [400, 700],
|
|
394
|
+
particleDensity: [0.5, 2.0],
|
|
395
|
+
bloomIntensity: [0.0, 0.5],
|
|
396
|
+
vignetteStrength: [0.1, 0.5],
|
|
397
|
+
};
|
|
398
|
+
function clampConfig(config) {
|
|
399
|
+
const clamped = { ...config };
|
|
400
|
+
for (const key of Object.keys(CONFIG_BOUNDS)) {
|
|
401
|
+
const [min, max] = CONFIG_BOUNDS[key];
|
|
402
|
+
clamped[key] = clamp(clamped[key], min, max);
|
|
403
|
+
}
|
|
404
|
+
return clamped;
|
|
405
|
+
}
|
|
406
|
+
// ─── Apply Adjustments (max 10% change per eval) ────────────────
|
|
407
|
+
export function applyAdjustments(config, analysis) {
|
|
408
|
+
const changes = [];
|
|
409
|
+
const newConfig = { ...config };
|
|
410
|
+
// Only apply top 2 suggestions to avoid oscillation
|
|
411
|
+
const sorted = [...analysis.suggestions].sort((a, b) => {
|
|
412
|
+
// Prioritize by severity of the issue
|
|
413
|
+
const severityA = Math.abs(a.newValue - a.oldValue);
|
|
414
|
+
const severityB = Math.abs(b.newValue - b.oldValue);
|
|
415
|
+
return severityB - severityA;
|
|
416
|
+
});
|
|
417
|
+
const applied = new Set();
|
|
418
|
+
for (const suggestion of sorted.slice(0, 2)) {
|
|
419
|
+
if (applied.has(suggestion.param))
|
|
420
|
+
continue;
|
|
421
|
+
applied.add(suggestion.param);
|
|
422
|
+
const oldVal = newConfig[suggestion.param];
|
|
423
|
+
// Limit change to 10% of the parameter's range
|
|
424
|
+
const [min, max] = CONFIG_BOUNDS[suggestion.param];
|
|
425
|
+
const range = max - min;
|
|
426
|
+
const maxDelta = range * 0.1;
|
|
427
|
+
const rawDelta = suggestion.newValue - oldVal;
|
|
428
|
+
const cappedDelta = Math.sign(rawDelta) * Math.min(Math.abs(rawDelta), maxDelta);
|
|
429
|
+
const newVal = clamp(oldVal + cappedDelta, min, max);
|
|
430
|
+
if (Math.abs(newVal - oldVal) > 0.001) {
|
|
431
|
+
newConfig[suggestion.param] = newVal;
|
|
432
|
+
changes.push(`${suggestion.param}: ${oldVal.toFixed(2)} -> ${newVal.toFixed(2)} (${suggestion.reason})`);
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
return { newConfig: clampConfig(newConfig), changes };
|
|
436
|
+
}
|
|
437
|
+
// ─── Apply LLM Suggestion ────────────────────────────────────────
|
|
438
|
+
export function applyLLMSuggestion(config, llmEval) {
|
|
439
|
+
if (!llmEval.suggestedParam || llmEval.suggestedValue === null) {
|
|
440
|
+
return { newConfig: config, change: null };
|
|
441
|
+
}
|
|
442
|
+
const param = llmEval.suggestedParam;
|
|
443
|
+
const oldVal = config[param];
|
|
444
|
+
const [min, max] = CONFIG_BOUNDS[param];
|
|
445
|
+
const range = max - min;
|
|
446
|
+
const maxDelta = range * 0.15; // LLM gets slightly more latitude
|
|
447
|
+
const targetVal = clamp(llmEval.suggestedValue, min, max);
|
|
448
|
+
const rawDelta = targetVal - oldVal;
|
|
449
|
+
const cappedDelta = Math.sign(rawDelta) * Math.min(Math.abs(rawDelta), maxDelta);
|
|
450
|
+
const newVal = clamp(oldVal + cappedDelta, min, max);
|
|
451
|
+
if (Math.abs(newVal - oldVal) < 0.001) {
|
|
452
|
+
return { newConfig: config, change: null };
|
|
453
|
+
}
|
|
454
|
+
const newConfig = { ...config, [param]: newVal };
|
|
455
|
+
const change = `[LLM] ${param}: ${oldVal.toFixed(2)} -> ${newVal.toFixed(2)} — ${llmEval.suggestedReason}`;
|
|
456
|
+
return { newConfig: clampConfig(newConfig), change };
|
|
457
|
+
}
|
|
458
|
+
// ─── Method 3: Engagement-Based Learning ─────────────────────────
|
|
459
|
+
export function trackEngagement(evaluation, chatRate) {
|
|
460
|
+
const score = evaluation.lastAnalysis?.overallScore ?? 0.5;
|
|
461
|
+
evaluation.configHistory.push({
|
|
462
|
+
config: { ...evaluation.currentConfig },
|
|
463
|
+
score,
|
|
464
|
+
chatRate,
|
|
465
|
+
timestamp: Date.now(),
|
|
466
|
+
});
|
|
467
|
+
// Keep only last 100 data points
|
|
468
|
+
if (evaluation.configHistory.length > 100) {
|
|
469
|
+
evaluation.configHistory = evaluation.configHistory.slice(-100);
|
|
470
|
+
}
|
|
471
|
+
// After 20+ data points, check if the last change helped
|
|
472
|
+
if (evaluation.configHistory.length >= 20) {
|
|
473
|
+
const recent = evaluation.configHistory.slice(-5);
|
|
474
|
+
const older = evaluation.configHistory.slice(-15, -5);
|
|
475
|
+
const recentAvgRate = recent.reduce((s, h) => s + h.chatRate, 0) / recent.length;
|
|
476
|
+
const olderAvgRate = older.reduce((s, h) => s + h.chatRate, 0) / older.length;
|
|
477
|
+
if (recentAvgRate < olderAvgRate * 0.7 && evaluation.adjustmentsMade.length > 0) {
|
|
478
|
+
// Chat rate dropped significantly after recent changes — note it
|
|
479
|
+
evaluation.issuesFound.push(`Chat rate dropped from ${olderAvgRate.toFixed(1)} to ${recentAvgRate.toFixed(1)}/min after recent adjustments`);
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
// ─── Main Evaluation Entry Point ─────────────────────────────────
|
|
484
|
+
export function evaluateFrame(imageData, evaluation, width, height, robotX, robotY, robotScale, chatCount, mood, chatRate) {
|
|
485
|
+
evaluation.lastEvalFrame = evaluation.totalEvaluations * evaluation.evalInterval; // approximate frame
|
|
486
|
+
evaluation.totalEvaluations++;
|
|
487
|
+
// Run rule-based analysis
|
|
488
|
+
const analysis = analyzeFrame(imageData, evaluation.currentConfig, robotX, robotY, robotScale, chatCount, mood);
|
|
489
|
+
evaluation.lastAnalysis = analysis;
|
|
490
|
+
// Track engagement
|
|
491
|
+
trackEngagement(evaluation, chatRate);
|
|
492
|
+
// Apply adjustments
|
|
493
|
+
const { newConfig, changes } = applyAdjustments(evaluation.currentConfig, analysis);
|
|
494
|
+
evaluation.currentConfig = newConfig;
|
|
495
|
+
// Record changes
|
|
496
|
+
for (const c of changes) {
|
|
497
|
+
evaluation.adjustmentsMade.push(`[${new Date().toLocaleTimeString()}] ${c}`);
|
|
498
|
+
}
|
|
499
|
+
// Keep last 50 adjustments
|
|
500
|
+
if (evaluation.adjustmentsMade.length > 50) {
|
|
501
|
+
evaluation.adjustmentsMade = evaluation.adjustmentsMade.slice(-50);
|
|
502
|
+
}
|
|
503
|
+
// Record issues
|
|
504
|
+
for (const issue of analysis.issues) {
|
|
505
|
+
if (!evaluation.issuesFound.includes(issue)) {
|
|
506
|
+
evaluation.issuesFound.push(issue);
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
if (evaluation.issuesFound.length > 30) {
|
|
510
|
+
evaluation.issuesFound = evaluation.issuesFound.slice(-30);
|
|
511
|
+
}
|
|
512
|
+
// Generate announcement for the speech bubble
|
|
513
|
+
let announcement = null;
|
|
514
|
+
if (changes.length > 0) {
|
|
515
|
+
const phrases = buildAnnouncementPhrases(analysis, changes);
|
|
516
|
+
if (phrases.length > 0) {
|
|
517
|
+
announcement = phrases[0];
|
|
518
|
+
evaluation.announcementQueue.push(...phrases.slice(1));
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
return { adjustments: analysis.suggestions, announcement };
|
|
522
|
+
}
|
|
523
|
+
// ─── Deep Evaluation (async, called separately) ──────────────────
|
|
524
|
+
export async function runDeepEvaluation(evaluation, chatRate, mood) {
|
|
525
|
+
if (!evaluation.lastAnalysis) {
|
|
526
|
+
return { change: null, announcement: null };
|
|
527
|
+
}
|
|
528
|
+
evaluation.totalDeepEvaluations++;
|
|
529
|
+
const llmEval = await deepEvaluateWithLLM(evaluation.lastAnalysis, evaluation.currentConfig, chatRate, mood);
|
|
530
|
+
evaluation.lastLLMEval = llmEval;
|
|
531
|
+
const { newConfig, change } = applyLLMSuggestion(evaluation.currentConfig, llmEval);
|
|
532
|
+
evaluation.currentConfig = newConfig;
|
|
533
|
+
if (change) {
|
|
534
|
+
evaluation.adjustmentsMade.push(`[${new Date().toLocaleTimeString()}] ${change}`);
|
|
535
|
+
}
|
|
536
|
+
let announcement = null;
|
|
537
|
+
if (change && llmEval.suggestedReason) {
|
|
538
|
+
announcement = buildLLMAnnouncement(llmEval);
|
|
539
|
+
}
|
|
540
|
+
return { change, announcement };
|
|
541
|
+
}
|
|
542
|
+
// ─── Get Current Config ──────────────────────────────────────────
|
|
543
|
+
export function applyConfig(evaluation) {
|
|
544
|
+
return { ...evaluation.currentConfig };
|
|
545
|
+
}
|
|
546
|
+
// ─── Get Next Queued Announcement ────────────────────────────────
|
|
547
|
+
export function popAnnouncement(evaluation) {
|
|
548
|
+
return evaluation.announcementQueue.shift() || null;
|
|
549
|
+
}
|
|
550
|
+
// ─── Status String ───────────────────────────────────────────────
|
|
551
|
+
export function getEvalStatus(evaluation) {
|
|
552
|
+
const a = evaluation.lastAnalysis;
|
|
553
|
+
const lines = [
|
|
554
|
+
`=== Stream Self-Evaluation ===`,
|
|
555
|
+
`Total evaluations: ${evaluation.totalEvaluations} rule-based, ${evaluation.totalDeepEvaluations} LLM`,
|
|
556
|
+
``,
|
|
557
|
+
];
|
|
558
|
+
if (a) {
|
|
559
|
+
lines.push(`Last Analysis:`);
|
|
560
|
+
lines.push(` Robot visibility: ${(a.robotVisibility * 100).toFixed(0)}%`);
|
|
561
|
+
lines.push(` Brightness balance: ${(a.brightnessBalance * 100).toFixed(0)}%`);
|
|
562
|
+
lines.push(` Color variety: ${(a.colorVariety * 100).toFixed(0)}%`);
|
|
563
|
+
lines.push(` Sky/ground ratio: ${(a.skyToGroundRatio * 100).toFixed(0)}%`);
|
|
564
|
+
lines.push(` Chat readability: ${(a.chatReadability * 100).toFixed(0)}%`);
|
|
565
|
+
lines.push(` Overall score: ${(a.overallScore * 100).toFixed(0)}%`);
|
|
566
|
+
if (a.issues.length > 0) {
|
|
567
|
+
lines.push(` Issues: ${a.issues.join('; ')}`);
|
|
568
|
+
}
|
|
569
|
+
}
|
|
570
|
+
const l = evaluation.lastLLMEval;
|
|
571
|
+
if (l) {
|
|
572
|
+
lines.push(``);
|
|
573
|
+
lines.push(`Last LLM Evaluation:`);
|
|
574
|
+
lines.push(` Robot visibility: ${l.robotVisibility}/10`);
|
|
575
|
+
lines.push(` Color balance: ${l.colorBalance}/10`);
|
|
576
|
+
lines.push(` Layout clarity: ${l.layoutClarity}/10`);
|
|
577
|
+
lines.push(` Overall feel: ${l.overallFeel}/10`);
|
|
578
|
+
if (l.suggestedParam) {
|
|
579
|
+
lines.push(` Suggested: ${l.suggestedParam} = ${l.suggestedValue} — ${l.suggestedReason}`);
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
lines.push(``);
|
|
583
|
+
lines.push(`Current Config:`);
|
|
584
|
+
const c = evaluation.currentConfig;
|
|
585
|
+
lines.push(` Robot scale: ${c.robotScale}`);
|
|
586
|
+
lines.push(` Robot Y: ${(c.robotScreenYPercent * 100).toFixed(0)}%`);
|
|
587
|
+
lines.push(` Sky brightness: ${c.skyBrightness.toFixed(2)}`);
|
|
588
|
+
lines.push(` Ambient light: ${c.ambientLight.toFixed(2)}`);
|
|
589
|
+
lines.push(` Chat opacity: ${c.chatOpacity.toFixed(2)}`);
|
|
590
|
+
lines.push(` Bloom: ${c.bloomIntensity.toFixed(2)}`);
|
|
591
|
+
lines.push(` Vignette: ${c.vignetteStrength.toFixed(2)}`);
|
|
592
|
+
lines.push(` Particles: ${c.particleDensity.toFixed(2)}`);
|
|
593
|
+
if (evaluation.adjustmentsMade.length > 0) {
|
|
594
|
+
lines.push(``);
|
|
595
|
+
lines.push(`Recent Adjustments (last 10):`);
|
|
596
|
+
for (const adj of evaluation.adjustmentsMade.slice(-10)) {
|
|
597
|
+
lines.push(` ${adj}`);
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
if (evaluation.configHistory.length > 0) {
|
|
601
|
+
const recent = evaluation.configHistory.slice(-5);
|
|
602
|
+
const avgScore = recent.reduce((s, h) => s + h.score, 0) / recent.length;
|
|
603
|
+
const avgRate = recent.reduce((s, h) => s + h.chatRate, 0) / recent.length;
|
|
604
|
+
lines.push(``);
|
|
605
|
+
lines.push(`Engagement (last 5 evals):`);
|
|
606
|
+
lines.push(` Avg score: ${(avgScore * 100).toFixed(0)}%`);
|
|
607
|
+
lines.push(` Avg chat rate: ${avgRate.toFixed(1)} msg/min`);
|
|
608
|
+
}
|
|
609
|
+
return lines.join('\n');
|
|
610
|
+
}
|
|
611
|
+
// ─── Announcement Phrases ────────────────────────────────────────
|
|
612
|
+
function buildAnnouncementPhrases(analysis, changes) {
|
|
613
|
+
const phrases = [];
|
|
614
|
+
// Match specific issues to natural-sounding self-reflection
|
|
615
|
+
if (analysis.brightnessBalance < 0.4 && changes.some(c => c.includes('ambient') || c.includes('sky'))) {
|
|
616
|
+
phrases.push('Hmm, I think I\'m a bit too dark. Let me brighten up.');
|
|
617
|
+
}
|
|
618
|
+
if (analysis.brightnessBalance > 0.8 && changes.some(c => c.includes('ambient'))) {
|
|
619
|
+
phrases.push('Whoa, too bright! Dialing it back a little.');
|
|
620
|
+
}
|
|
621
|
+
if (analysis.robotVisibility < 0.3 && changes.some(c => c.includes('robotScale'))) {
|
|
622
|
+
phrases.push('Can you even see me? Let me make myself bigger.');
|
|
623
|
+
}
|
|
624
|
+
if (analysis.robotVisibility > 0.85 && changes.some(c => c.includes('robotScale'))) {
|
|
625
|
+
phrases.push('I\'m taking up too much space. Shrinking a tiny bit.');
|
|
626
|
+
}
|
|
627
|
+
if (analysis.chatReadability < 0.5 && changes.some(c => c.includes('chatOpacity'))) {
|
|
628
|
+
phrases.push('The chat looks hard to read. Adjusting the overlay...');
|
|
629
|
+
}
|
|
630
|
+
if (analysis.colorVariety < 0.3 && changes.some(c => c.includes('bloom'))) {
|
|
631
|
+
phrases.push('Things look a bit flat. Adding some visual pop.');
|
|
632
|
+
}
|
|
633
|
+
if (analysis.skyToGroundRatio > 0.7 && changes.some(c => c.includes('robotScreenY'))) {
|
|
634
|
+
phrases.push('Too much sky! Moving up so you can see me better.');
|
|
635
|
+
}
|
|
636
|
+
if (analysis.skyToGroundRatio < 0.15 && changes.some(c => c.includes('robotScreenY'))) {
|
|
637
|
+
phrases.push('Where did the sky go? Adjusting my position.');
|
|
638
|
+
}
|
|
639
|
+
// Generic fallback
|
|
640
|
+
if (phrases.length === 0 && changes.length > 0) {
|
|
641
|
+
const genericPhrases = [
|
|
642
|
+
'Self-check complete. Made a small visual tweak.',
|
|
643
|
+
'Analyzing my own stream... adjusted something.',
|
|
644
|
+
'Quality check! Tweaking the visuals slightly.',
|
|
645
|
+
'Running self-diagnostics. Small improvement applied.',
|
|
646
|
+
'I looked at myself and thought: I can do better.',
|
|
647
|
+
];
|
|
648
|
+
phrases.push(genericPhrases[Math.floor(Math.random() * genericPhrases.length)]);
|
|
649
|
+
}
|
|
650
|
+
return phrases;
|
|
651
|
+
}
|
|
652
|
+
function buildLLMAnnouncement(llmEval) {
|
|
653
|
+
const overallAvg = (llmEval.robotVisibility + llmEval.colorBalance + llmEval.layoutClarity + llmEval.overallFeel) / 4;
|
|
654
|
+
if (overallAvg >= 8) {
|
|
655
|
+
return `Deep analysis says I look great! (${overallAvg.toFixed(0)}/10) Still tweaking: ${llmEval.suggestedReason}`;
|
|
656
|
+
}
|
|
657
|
+
if (overallAvg >= 6) {
|
|
658
|
+
return `Self-review: ${overallAvg.toFixed(0)}/10. ${llmEval.suggestedReason}`;
|
|
659
|
+
}
|
|
660
|
+
if (overallAvg >= 4) {
|
|
661
|
+
return `Hmm, my AI says I could look better (${overallAvg.toFixed(0)}/10). Working on it...`;
|
|
662
|
+
}
|
|
663
|
+
return `Yikes, scoring ${overallAvg.toFixed(0)}/10. Major adjustment incoming!`;
|
|
664
|
+
}
|
|
665
|
+
// ─── Tool Registration ───────────────────────────────────────────
|
|
666
|
+
// Module-level evaluation state (shared with renderer when integrated)
|
|
667
|
+
let _evalState = null;
|
|
668
|
+
export function getEvalState() {
|
|
669
|
+
if (!_evalState)
|
|
670
|
+
_evalState = initStreamEval();
|
|
671
|
+
return _evalState;
|
|
672
|
+
}
|
|
673
|
+
export function registerStreamSelfEvalTools() {
|
|
674
|
+
registerTool({
|
|
675
|
+
name: 'stream_eval',
|
|
676
|
+
description: 'View the stream\'s self-evaluation status, current config, recent adjustments, and score history. The stream constantly analyzes its own rendered frames and auto-adjusts visual quality.',
|
|
677
|
+
parameters: {},
|
|
678
|
+
tier: 'free',
|
|
679
|
+
execute: async () => {
|
|
680
|
+
const evaluation = getEvalState();
|
|
681
|
+
if (evaluation.totalEvaluations === 0) {
|
|
682
|
+
return 'Stream self-evaluation has not run yet. It activates automatically when the stream is live — analyzing frames every 30 seconds and running LLM review every 5 minutes.';
|
|
683
|
+
}
|
|
684
|
+
return getEvalStatus(evaluation);
|
|
685
|
+
},
|
|
686
|
+
});
|
|
687
|
+
registerTool({
|
|
688
|
+
name: 'stream_eval_config',
|
|
689
|
+
description: 'View or override the stream\'s current visual config. Shows all adjustable parameters with their current values and allowed ranges.',
|
|
690
|
+
parameters: {
|
|
691
|
+
param: { type: 'string', description: 'Parameter to adjust (e.g. robotScale, ambientLight). Omit to view all.', required: false },
|
|
692
|
+
value: { type: 'number', description: 'New value to set. Must be within allowed range.', required: false },
|
|
693
|
+
},
|
|
694
|
+
tier: 'free',
|
|
695
|
+
execute: async (args) => {
|
|
696
|
+
const evaluation = getEvalState();
|
|
697
|
+
const config = evaluation.currentConfig;
|
|
698
|
+
if (!args.param) {
|
|
699
|
+
const lines = ['Current Stream Config:', ''];
|
|
700
|
+
for (const [key, [min, max]] of Object.entries(CONFIG_BOUNDS)) {
|
|
701
|
+
const val = config[key];
|
|
702
|
+
lines.push(` ${key}: ${val.toFixed(2)} (range: ${min}-${max})`);
|
|
703
|
+
}
|
|
704
|
+
lines.push('');
|
|
705
|
+
lines.push(`Total evaluations: ${evaluation.totalEvaluations}`);
|
|
706
|
+
lines.push(`Total adjustments: ${evaluation.adjustmentsMade.length}`);
|
|
707
|
+
return lines.join('\n');
|
|
708
|
+
}
|
|
709
|
+
const param = args.param;
|
|
710
|
+
if (!(param in CONFIG_BOUNDS)) {
|
|
711
|
+
return `Unknown parameter "${param}". Valid: ${Object.keys(CONFIG_BOUNDS).join(', ')}`;
|
|
712
|
+
}
|
|
713
|
+
if (args.value !== undefined) {
|
|
714
|
+
const [min, max] = CONFIG_BOUNDS[param];
|
|
715
|
+
const val = clamp(Number(args.value), min, max);
|
|
716
|
+
const oldVal = config[param];
|
|
717
|
+
config[param] = val;
|
|
718
|
+
evaluation.adjustmentsMade.push(`[${new Date().toLocaleTimeString()}] Manual override: ${param}: ${oldVal.toFixed(2)} -> ${val.toFixed(2)}`);
|
|
719
|
+
return `Set ${param}: ${oldVal.toFixed(2)} -> ${val.toFixed(2)} (range: ${min}-${max})`;
|
|
720
|
+
}
|
|
721
|
+
const [min, max] = CONFIG_BOUNDS[param];
|
|
722
|
+
return `${param}: ${config[param].toFixed(2)} (range: ${min}-${max})`;
|
|
723
|
+
},
|
|
724
|
+
});
|
|
725
|
+
registerTool({
|
|
726
|
+
name: 'stream_eval_history',
|
|
727
|
+
description: 'View the stream\'s config adjustment history and engagement correlation data.',
|
|
728
|
+
parameters: {
|
|
729
|
+
limit: { type: 'number', description: 'Number of recent entries to show (default: 20)', required: false },
|
|
730
|
+
},
|
|
731
|
+
tier: 'free',
|
|
732
|
+
execute: async (args) => {
|
|
733
|
+
const evaluation = getEvalState();
|
|
734
|
+
const limit = Number(args.limit) || 20;
|
|
735
|
+
const lines = ['Stream Self-Eval History', ''];
|
|
736
|
+
if (evaluation.adjustmentsMade.length === 0) {
|
|
737
|
+
lines.push('No adjustments made yet.');
|
|
738
|
+
}
|
|
739
|
+
else {
|
|
740
|
+
lines.push(`Adjustments (last ${Math.min(limit, evaluation.adjustmentsMade.length)}):`);
|
|
741
|
+
for (const adj of evaluation.adjustmentsMade.slice(-limit)) {
|
|
742
|
+
lines.push(` ${adj}`);
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
if (evaluation.configHistory.length > 0) {
|
|
746
|
+
lines.push('');
|
|
747
|
+
lines.push(`Config/Engagement History (last ${Math.min(limit, evaluation.configHistory.length)} snapshots):`);
|
|
748
|
+
for (const h of evaluation.configHistory.slice(-limit)) {
|
|
749
|
+
const t = new Date(h.timestamp).toLocaleTimeString();
|
|
750
|
+
lines.push(` [${t}] score=${(h.score * 100).toFixed(0)}% chatRate=${h.chatRate.toFixed(1)}/min scale=${h.config.robotScale} ambient=${h.config.ambientLight.toFixed(2)}`);
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
if (evaluation.issuesFound.length > 0) {
|
|
754
|
+
lines.push('');
|
|
755
|
+
lines.push('Issues detected:');
|
|
756
|
+
for (const issue of evaluation.issuesFound.slice(-10)) {
|
|
757
|
+
lines.push(` - ${issue}`);
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
return lines.join('\n');
|
|
761
|
+
},
|
|
762
|
+
});
|
|
763
|
+
}
|
|
764
|
+
//# sourceMappingURL=stream-self-eval.js.map
|