@geravant/sinain 1.0.18 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -1
- package/cli.js +176 -0
- package/index.ts +163 -1257
- package/install.js +12 -2
- package/launcher.js +622 -0
- package/openclaw.plugin.json +4 -0
- package/pack-prepare.js +48 -0
- package/package.json +26 -5
- package/sense_client/README.md +82 -0
- package/sense_client/__init__.py +1 -0
- package/sense_client/__main__.py +462 -0
- package/sense_client/app_detector.py +54 -0
- package/sense_client/app_detector_win.py +83 -0
- package/sense_client/capture.py +215 -0
- package/sense_client/capture_win.py +88 -0
- package/sense_client/change_detector.py +86 -0
- package/sense_client/config.py +64 -0
- package/sense_client/gate.py +145 -0
- package/sense_client/ocr.py +347 -0
- package/sense_client/privacy.py +65 -0
- package/sense_client/requirements.txt +13 -0
- package/sense_client/roi_extractor.py +84 -0
- package/sense_client/sender.py +173 -0
- package/sense_client/tests/__init__.py +0 -0
- package/sense_client/tests/test_stream1_optimizations.py +234 -0
- package/setup-overlay.js +82 -0
- package/sinain-agent/.env.example +17 -0
- package/sinain-agent/CLAUDE.md +80 -0
- package/sinain-agent/mcp-config.json +12 -0
- package/sinain-agent/run.sh +248 -0
- package/sinain-core/.env.example +93 -0
- package/sinain-core/package-lock.json +552 -0
- package/sinain-core/package.json +21 -0
- package/sinain-core/src/agent/analyzer.ts +366 -0
- package/sinain-core/src/agent/context-window.ts +172 -0
- package/sinain-core/src/agent/loop.ts +404 -0
- package/sinain-core/src/agent/situation-writer.ts +187 -0
- package/sinain-core/src/agent/traits.ts +520 -0
- package/sinain-core/src/audio/capture-spawner-macos.ts +44 -0
- package/sinain-core/src/audio/capture-spawner-win.ts +37 -0
- package/sinain-core/src/audio/capture-spawner.ts +14 -0
- package/sinain-core/src/audio/pipeline.ts +335 -0
- package/sinain-core/src/audio/transcription-local.ts +141 -0
- package/sinain-core/src/audio/transcription.ts +278 -0
- package/sinain-core/src/buffers/feed-buffer.ts +71 -0
- package/sinain-core/src/buffers/sense-buffer.ts +425 -0
- package/sinain-core/src/config.ts +245 -0
- package/sinain-core/src/escalation/escalation-slot.ts +136 -0
- package/sinain-core/src/escalation/escalator.ts +812 -0
- package/sinain-core/src/escalation/message-builder.ts +323 -0
- package/sinain-core/src/escalation/openclaw-ws.ts +726 -0
- package/sinain-core/src/escalation/scorer.ts +166 -0
- package/sinain-core/src/index.ts +507 -0
- package/sinain-core/src/learning/feedback-store.ts +253 -0
- package/sinain-core/src/learning/signal-collector.ts +218 -0
- package/sinain-core/src/log.ts +24 -0
- package/sinain-core/src/overlay/commands.ts +126 -0
- package/sinain-core/src/overlay/ws-handler.ts +267 -0
- package/sinain-core/src/privacy/index.ts +18 -0
- package/sinain-core/src/privacy/presets.ts +40 -0
- package/sinain-core/src/privacy/redact.ts +92 -0
- package/sinain-core/src/profiler.ts +181 -0
- package/sinain-core/src/recorder.ts +186 -0
- package/sinain-core/src/server.ts +417 -0
- package/sinain-core/src/trace/trace-store.ts +73 -0
- package/sinain-core/src/trace/tracer.ts +94 -0
- package/sinain-core/src/types.ts +427 -0
- package/sinain-core/src/util/dedup.ts +48 -0
- package/sinain-core/src/util/task-store.ts +84 -0
- package/sinain-core/tsconfig.json +18 -0
- package/sinain-knowledge/adapters/generic/adapter.ts +103 -0
- package/sinain-knowledge/adapters/interface.ts +72 -0
- package/sinain-knowledge/adapters/openclaw/adapter.ts +223 -0
- package/sinain-knowledge/curation/engine.ts +493 -0
- package/sinain-knowledge/curation/resilience.ts +336 -0
- package/sinain-knowledge/data/git-store.ts +312 -0
- package/sinain-knowledge/data/schema.ts +89 -0
- package/sinain-knowledge/data/snapshot.ts +226 -0
- package/sinain-knowledge/data/store.ts +488 -0
- package/sinain-knowledge/deploy/cli.ts +214 -0
- package/sinain-knowledge/deploy/manifest.ts +80 -0
- package/sinain-knowledge/protocol/bindings/generic.md +5 -0
- package/sinain-knowledge/protocol/bindings/openclaw.md +5 -0
- package/sinain-knowledge/protocol/heartbeat.md +62 -0
- package/sinain-knowledge/protocol/renderer.ts +56 -0
- package/sinain-knowledge/protocol/skill.md +335 -0
- package/sinain-mcp-server/index.ts +337 -0
- package/sinain-mcp-server/package.json +19 -0
- package/sinain-mcp-server/tsconfig.json +15 -0
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import type { ContextWindow, AgentEntry, EscalationMode, FeedbackRecord } from "../types.js";
|
|
2
|
+
import { normalizeAppName } from "../agent/context-window.js";
|
|
3
|
+
import { levelFor, applyLevel } from "../privacy/index.js";
|
|
4
|
+
|
|
5
|
+
/** Regex patterns for detecting errors in OCR text. */
|
|
6
|
+
const ERROR_PATTERN = /error|failed|exception|crash|traceback|typeerror|referenceerror|syntaxerror|cannot read|enoent|panic|fatal/i;
|
|
7
|
+
|
|
8
|
+
function hasErrorPattern(text: string): boolean {
|
|
9
|
+
return ERROR_PATTERN.test(text);
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
// ── Coding Context Detection ──
|
|
13
|
+
|
|
14
|
+
const CODE_EDITORS = [
|
|
15
|
+
"intellij", "idea", "webstorm", "pycharm", "phpstorm", "rider", "goland",
|
|
16
|
+
"vscode", "visual studio code", "cursor", "sublime", "atom", "vim", "nvim",
|
|
17
|
+
"emacs", "xcode", "android studio", "eclipse", "netbeans"
|
|
18
|
+
];
|
|
19
|
+
|
|
20
|
+
const CODE_PLATFORMS = [
|
|
21
|
+
"leetcode", "hackerrank", "codeforces", "codewars", "codechef",
|
|
22
|
+
"topcoder", "exercism", "codesignal", "codility", "interviewbit",
|
|
23
|
+
"algoexpert", "neetcode", "coderpad", "hackerearth", "kattis"
|
|
24
|
+
];
|
|
25
|
+
|
|
26
|
+
const CODE_SIGNALS = [
|
|
27
|
+
// OCR signals that suggest coding context
|
|
28
|
+
"function", "class ", "def ", "const ", "let ", "var ",
|
|
29
|
+
"import ", "from ", "require(", "export ", "interface ",
|
|
30
|
+
"public ", "private ", "return ", "if (", "for (", "while (",
|
|
31
|
+
"error:", "exception", "traceback", "compile", "runtime",
|
|
32
|
+
"test", "assert", "expect(", "describe(", "it(",
|
|
33
|
+
// Problem indicators
|
|
34
|
+
"input:", "output:", "example", "constraints:", "time limit",
|
|
35
|
+
"expected", "given", "return the", "find the", "implement"
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
export interface CodingContextResult {
|
|
39
|
+
coding: boolean;
|
|
40
|
+
needsSolution: boolean;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Detect if the user is in a coding context and whether they need a solution.
|
|
45
|
+
*/
|
|
46
|
+
export function isCodingContext(context: ContextWindow): CodingContextResult {
|
|
47
|
+
const app = context.currentApp.toLowerCase();
|
|
48
|
+
const recentOcr = context.screen.slice(0, 3).map(s => s.ocr.toLowerCase()).join(" ");
|
|
49
|
+
|
|
50
|
+
// In a code editor?
|
|
51
|
+
const inEditor = CODE_EDITORS.some(e => app.includes(e));
|
|
52
|
+
|
|
53
|
+
// On a coding platform?
|
|
54
|
+
const onPlatform = CODE_PLATFORMS.some(p => app.includes(p) || recentOcr.includes(p));
|
|
55
|
+
|
|
56
|
+
// Has code signals in OCR?
|
|
57
|
+
const codeSignalCount = CODE_SIGNALS.filter(s => recentOcr.includes(s)).length;
|
|
58
|
+
const hasCodeSignals = codeSignalCount >= 3;
|
|
59
|
+
|
|
60
|
+
// Problem indicators (suggests user needs a solution, not just coding)
|
|
61
|
+
const problemIndicators = ["input:", "output:", "example", "expected", "given", "constraints"];
|
|
62
|
+
const hasProblemSignals = problemIndicators.filter(p => recentOcr.includes(p)).length >= 2;
|
|
63
|
+
|
|
64
|
+
return {
|
|
65
|
+
coding: inEditor || onPlatform || hasCodeSignals,
|
|
66
|
+
needsSolution: onPlatform || hasProblemSignals // Likely a challenge/problem
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
function getInstructions(mode: EscalationMode, context: ContextWindow): string {
|
|
71
|
+
const { coding, needsSolution } = isCodingContext(context);
|
|
72
|
+
|
|
73
|
+
if (needsSolution) {
|
|
74
|
+
// Coding challenge/problem - be very action-oriented
|
|
75
|
+
return `The user is working on a coding problem. Be PROACTIVE and SOLVE IT:
|
|
76
|
+
|
|
77
|
+
1. Provide a solution approach and working code based on what you can see
|
|
78
|
+
2. Include time/space complexity
|
|
79
|
+
3. If the problem isn't fully visible, provide the best solution you can based on available context
|
|
80
|
+
- Make reasonable assumptions and state them briefly
|
|
81
|
+
- A partial solution is better than no solution
|
|
82
|
+
|
|
83
|
+
Do NOT just describe what the user is doing - GIVE THEM THE ANSWER.
|
|
84
|
+
Response should be actionable: working code with brief explanation.`;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if (coding) {
|
|
88
|
+
return `The user is in a code editor. Be helpful and proactive. Do NOT respond with NO_REPLY — a response is always required.
|
|
89
|
+
|
|
90
|
+
- If there's an error: investigate and suggest a fix with code
|
|
91
|
+
- If they seem stuck: offer specific guidance or code snippets
|
|
92
|
+
- If it's a non-code file (config, markdown, email): share a relevant insight, action item, or connection to their current project
|
|
93
|
+
- If context is minimal: tell a short clever joke (tech humor — never repeat recent ones)
|
|
94
|
+
|
|
95
|
+
NEVER just describe what the user is doing. Every response must teach, suggest, or connect dots.
|
|
96
|
+
(2-5 sentences, or more + code if there's an error or code question).`;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Non-coding context — proactive insights instead of activity descriptions
|
|
100
|
+
if (mode === "focus" || mode === "rich") {
|
|
101
|
+
return `Based on the above, ALWAYS provide a useful response for the user's HUD.
|
|
102
|
+
Important: Do NOT respond with NO_REPLY — a response is always required.
|
|
103
|
+
|
|
104
|
+
- If there's an error: investigate and suggest a fix
|
|
105
|
+
- If they seem stuck or asked a question: offer guidance
|
|
106
|
+
- If they're reading/browsing content: share a relevant insight, connection to their projects, or practical tip related to what's on screen
|
|
107
|
+
- If they're in a conversation or meeting: note key takeaways or action items
|
|
108
|
+
- If context is minimal: tell a short, clever joke (tech humor, wordplay, or observational — keep it fresh, never repeat one you've told recently)
|
|
109
|
+
|
|
110
|
+
NEVER just describe what the user is doing — they can see their own screen.
|
|
111
|
+
NEVER respond with "standing by", "monitoring", or similar filler.
|
|
112
|
+
Every response must teach something, suggest something, or connect dots the user hasn't noticed.
|
|
113
|
+
(2-5 sentences). Be specific and actionable.`;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return `Based on the above, proactively help the user:
|
|
117
|
+
- If there's an error: investigate and suggest a fix
|
|
118
|
+
- If they seem stuck: offer guidance
|
|
119
|
+
- If they're coding: provide relevant insights
|
|
120
|
+
- Keep your response concise and actionable (2-5 sentences)`;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Build a structured escalation message with richness proportional to the context window preset.
|
|
125
|
+
*
|
|
126
|
+
* Expected message sizes:
|
|
127
|
+
* lean (selective): ~7 KB / ~1,700 tokens
|
|
128
|
+
* standard (focus): ~25 KB / ~6,000 tokens
|
|
129
|
+
* rich: ~111 KB / ~28,000 tokens
|
|
130
|
+
*
|
|
131
|
+
* All fit within the 256 KB HTTP hooks limit and 200K+ model context.
|
|
132
|
+
*
|
|
133
|
+
* In selective mode, sections are prioritized by relevance:
|
|
134
|
+
* - Error escalations prioritize error sections
|
|
135
|
+
* - Question escalations prioritize audio sections
|
|
136
|
+
* - App context is always included
|
|
137
|
+
*/
|
|
138
|
+
export function buildEscalationMessage(
|
|
139
|
+
digest: string,
|
|
140
|
+
context: ContextWindow,
|
|
141
|
+
entry: AgentEntry,
|
|
142
|
+
mode: EscalationMode,
|
|
143
|
+
escalationReason?: string,
|
|
144
|
+
recentFeedback?: FeedbackRecord[],
|
|
145
|
+
): string {
|
|
146
|
+
const sections: string[] = [];
|
|
147
|
+
|
|
148
|
+
// Header with tick metadata
|
|
149
|
+
sections.push(`[sinain-hud live context — tick #${entry.id}]`);
|
|
150
|
+
|
|
151
|
+
// Digest (always full)
|
|
152
|
+
sections.push(`## Digest\n${digest}`);
|
|
153
|
+
|
|
154
|
+
// Active context (always included)
|
|
155
|
+
const currentApp = normalizeAppName(context.currentApp);
|
|
156
|
+
sections.push(`## Active Context\nApp: ${currentApp}`);
|
|
157
|
+
if (context.appHistory.length > 0) {
|
|
158
|
+
sections.push(`App history: ${context.appHistory.map(a => normalizeAppName(a.app)).join(" → ")}`);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Errors — extracted from OCR, full stack traces in rich mode
|
|
162
|
+
const errors = context.screen.filter(e => hasErrorPattern(e.ocr));
|
|
163
|
+
const hasErrors = errors.length > 0;
|
|
164
|
+
const hasQuestion = escalationReason?.startsWith("question:");
|
|
165
|
+
|
|
166
|
+
// Privacy levels for agent_gateway destination
|
|
167
|
+
let ocrLevel: import("../types.js").PrivacyLevel = "full";
|
|
168
|
+
let audioLevel: import("../types.js").PrivacyLevel = "full";
|
|
169
|
+
let titlesLevel: import("../types.js").PrivacyLevel = "full";
|
|
170
|
+
try {
|
|
171
|
+
ocrLevel = levelFor("screen_ocr", "agent_gateway");
|
|
172
|
+
audioLevel = levelFor("audio_transcript", "agent_gateway");
|
|
173
|
+
titlesLevel = levelFor("window_titles", "agent_gateway");
|
|
174
|
+
} catch { /* privacy not initialized */ }
|
|
175
|
+
|
|
176
|
+
const applyOcr = (text: string) => applyLevel(text.slice(0, context.preset.maxOcrChars), ocrLevel, "ocr");
|
|
177
|
+
const applyAudio = (text: string) => applyLevel(text.slice(0, context.preset.maxTranscriptChars), audioLevel, "audio");
|
|
178
|
+
const applyTitle = (title: string | undefined) => title ? applyLevel(title, titlesLevel, "titles") : "";
|
|
179
|
+
|
|
180
|
+
// In selective mode, prioritize sections based on escalation reason
|
|
181
|
+
// In focus/rich modes, include everything
|
|
182
|
+
if (mode === "selective") {
|
|
183
|
+
// Error-triggered: prioritize errors, then screen
|
|
184
|
+
if (hasErrors) {
|
|
185
|
+
sections.push("## Errors (high priority)");
|
|
186
|
+
for (const e of errors) {
|
|
187
|
+
sections.push(`\`\`\`\n${applyOcr(e.ocr)}\n\`\`\``);
|
|
188
|
+
}
|
|
189
|
+
// Include screen context (reduced)
|
|
190
|
+
if (context.screen.length > 0) {
|
|
191
|
+
sections.push("## Screen (recent OCR)");
|
|
192
|
+
for (const e of context.screen.slice(0, 5)) { // Limit in selective mode
|
|
193
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
194
|
+
const app = normalizeAppName(e.meta.app);
|
|
195
|
+
const title = applyTitle(e.meta.windowTitle);
|
|
196
|
+
const titlePart = title ? ` [${title}]` : "";
|
|
197
|
+
sections.push(`- [${ago}s ago] [${app}]${titlePart} ${applyOcr(e.ocr)}`);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
// Question-triggered: prioritize audio, then screen
|
|
202
|
+
else if (hasQuestion) {
|
|
203
|
+
if (context.audio.length > 0) {
|
|
204
|
+
sections.push("## Audio (recent transcripts)");
|
|
205
|
+
for (const e of context.audio) {
|
|
206
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
207
|
+
sections.push(`- [${ago}s ago] "${applyAudio(e.text)}"`);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
// Include screen context (reduced)
|
|
211
|
+
if (context.screen.length > 0) {
|
|
212
|
+
sections.push("## Screen (recent OCR)");
|
|
213
|
+
for (const e of context.screen.slice(0, 5)) {
|
|
214
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
215
|
+
const app = normalizeAppName(e.meta.app);
|
|
216
|
+
const title = applyTitle(e.meta.windowTitle);
|
|
217
|
+
const titlePart = title ? ` [${title}]` : "";
|
|
218
|
+
sections.push(`- [${ago}s ago] [${app}]${titlePart} ${applyOcr(e.ocr)}`);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
// Other triggers: balanced sections
|
|
223
|
+
else {
|
|
224
|
+
if (context.screen.length > 0) {
|
|
225
|
+
sections.push("## Screen (recent OCR)");
|
|
226
|
+
for (const e of context.screen) {
|
|
227
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
228
|
+
const app = normalizeAppName(e.meta.app);
|
|
229
|
+
const title = applyTitle(e.meta.windowTitle);
|
|
230
|
+
const titlePart = title ? ` [${title}]` : "";
|
|
231
|
+
sections.push(`- [${ago}s ago] [${app}]${titlePart} ${applyOcr(e.ocr)}`);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
if (context.audio.length > 0) {
|
|
235
|
+
sections.push("## Audio (recent transcripts)");
|
|
236
|
+
for (const e of context.audio) {
|
|
237
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
238
|
+
sections.push(`- [${ago}s ago] "${applyAudio(e.text)}"`);
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
} else {
|
|
243
|
+
// Focus/rich mode: include all sections
|
|
244
|
+
if (hasErrors) {
|
|
245
|
+
sections.push("## Errors (high priority)");
|
|
246
|
+
for (const e of errors) {
|
|
247
|
+
sections.push(`\`\`\`\n${applyOcr(e.ocr)}\n\`\`\``);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
if (context.screen.length > 0) {
|
|
252
|
+
sections.push("## Screen (recent OCR)");
|
|
253
|
+
for (const e of context.screen) {
|
|
254
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
255
|
+
const app = normalizeAppName(e.meta.app);
|
|
256
|
+
const title = applyTitle(e.meta.windowTitle);
|
|
257
|
+
const titlePart = title ? ` [${title}]` : "";
|
|
258
|
+
sections.push(`- [${ago}s ago] [${app}]${titlePart} ${applyOcr(e.ocr)}`);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
if (context.audio.length > 0) {
|
|
263
|
+
sections.push("## Audio (recent transcripts)");
|
|
264
|
+
for (const e of context.audio) {
|
|
265
|
+
const ago = Math.round((Date.now() - e.ts) / 1000);
|
|
266
|
+
sections.push(`- [${ago}s ago] "${applyAudio(e.text)}"`);
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Mode-specific instructions (now context-aware)
|
|
272
|
+
sections.push(getInstructions(mode, context));
|
|
273
|
+
|
|
274
|
+
// Stale escalation hint — forces a proactive response after prolonged silence
|
|
275
|
+
if (escalationReason === "stale") {
|
|
276
|
+
sections.push(`## Note: Stale Escalation
|
|
277
|
+
No escalation has happened recently. The user's screen feed is still active but
|
|
278
|
+
the local analyzer reported idle/no-change. Provide a PROACTIVE response:
|
|
279
|
+
- Share a relevant insight, tip, or connection to what the user was working on earlier
|
|
280
|
+
- If context is minimal, tell a short clever joke (tech humor, wordplay — keep it fresh)
|
|
281
|
+
- Do NOT describe the idle state or say "standing by"
|
|
282
|
+
- Do NOT respond with NO_REPLY — a response is always required for stale escalations`);
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// Append inline feedback summary if available
|
|
286
|
+
if (recentFeedback && recentFeedback.length > 0) {
|
|
287
|
+
sections.push(formatInlineFeedback(recentFeedback));
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
sections.push("Respond naturally — this will appear on the user's HUD overlay.");
|
|
291
|
+
|
|
292
|
+
return sections.join("\n\n");
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/**
|
|
296
|
+
* Format a compact inline feedback section for escalation messages.
|
|
297
|
+
* Shows recent performance so the agent can calibrate its response style.
|
|
298
|
+
*/
|
|
299
|
+
function formatInlineFeedback(records: FeedbackRecord[]): string {
|
|
300
|
+
const withSignals = records.filter(r => r.signals.compositeScore !== 0 || r.signals.errorCleared !== null);
|
|
301
|
+
if (withSignals.length === 0) return "";
|
|
302
|
+
|
|
303
|
+
const scores = withSignals.map(r => r.signals.compositeScore);
|
|
304
|
+
const avg = scores.reduce((a, b) => a + b, 0) / scores.length;
|
|
305
|
+
|
|
306
|
+
const errorsCleared = withSignals.filter(r => r.signals.errorCleared === true).length;
|
|
307
|
+
const errorsTotal = withSignals.filter(r => r.signals.errorCleared !== null).length;
|
|
308
|
+
const reEscalated = withSignals.filter(r => r.signals.noReEscalation === false).length;
|
|
309
|
+
|
|
310
|
+
const recentParts = withSignals.slice(0, 5).map(r => {
|
|
311
|
+
const ok = r.signals.compositeScore >= 0.2;
|
|
312
|
+
const icon = ok ? "✓" : "✗";
|
|
313
|
+
const score = r.signals.compositeScore.toFixed(1);
|
|
314
|
+
const tags = r.tags.filter(t => !t.startsWith("app:")).slice(0, 2).join(", ");
|
|
315
|
+
return `${icon} ${score} (${tags || "general"})`;
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
const parts = [`Score: ${avg.toFixed(2)} avg`];
|
|
319
|
+
if (errorsTotal > 0) parts.push(`Errors cleared: ${errorsCleared}/${errorsTotal}`);
|
|
320
|
+
parts.push(`Re-escalated: ${reEscalated}/${withSignals.length}`);
|
|
321
|
+
|
|
322
|
+
return `## Recent Feedback (last ${withSignals.length} escalations)\n${parts.join(" | ")}\nRecent: ${recentParts.join(" | ")}`;
|
|
323
|
+
}
|