@plexor-dev/claude-code-plugin-staging 0.1.0-beta.26 → 0.1.0-beta.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,7 @@
17
17
  const path = require('path');
18
18
 
19
19
  // Use lib modules
20
- let ConfigManager, SessionManager, LocalCache, Logger, ServerSync;
20
+ let ConfigManager, SessionManager, LocalCache, Logger, ServerSync, SupervisorEmitter;
21
21
  try {
22
22
  ConfigManager = require('../lib/config');
23
23
  SessionManager = require('../lib/session');
@@ -26,6 +26,8 @@ try {
26
26
  // Issue #701: Phase 2 - Server sync for persistent session state
27
27
  const serverSyncModule = require('../lib/server-sync');
28
28
  ServerSync = serverSyncModule.getServerSync;
29
+ // Phase 1 supervisor UX
30
+ SupervisorEmitter = require('../lib/supervisor').SupervisorEmitter;
29
31
  } catch {
30
32
  // Fallback inline implementations if lib not found
31
33
  const fs = require('fs');
@@ -171,12 +173,18 @@ try {
171
173
  scheduleSync: () => {},
172
174
  needsSync: () => false
173
175
  });
176
+
177
+ // Fallback SupervisorEmitter (no-op)
178
+ SupervisorEmitter = class {
179
+ emit() {}
180
+ };
174
181
  }
175
182
 
176
183
  const logger = new Logger('track-response');
177
184
  const config = new ConfigManager();
178
185
  const cache = new LocalCache();
179
186
  const session = new SessionManager();
187
+ const supervisor = new SupervisorEmitter();
180
188
 
181
189
  // Issue #701: Phase 2 - Initialize server sync (lazy, initialized on first use)
182
190
  let serverSync = null;
@@ -189,7 +197,7 @@ async function getServerSync() {
189
197
  if (settings.apiKey && settings.enabled) {
190
198
  serverSync = ServerSync({
191
199
  apiKey: settings.apiKey,
192
- baseUrl: settings.apiUrl || 'https://api.plexor.dev',
200
+ baseUrl: settings.apiUrl || 'http://127.0.0.1:8000',
193
201
  enabled: settings.serverSyncEnabled !== false
194
202
  });
195
203
  } else {
@@ -221,6 +229,12 @@ async function main() {
221
229
  const plexorMeta = response._plexor;
222
230
  emitPlexorOutcomeSummary(response, plexorMeta, outputTokens);
223
231
 
232
+ // Phase 1 supervisor UX: concise single-line routing summary
233
+ supervisor.emit(response, plexorMeta);
234
+
235
+ // Proactive compact warning: check prompt token count against provider limits
236
+ emitCompactWarning(response, plexorMeta);
237
+
224
238
  // Issue #701: Track ALL responses, not just when enabled
225
239
  // This ensures session stats are always accurate
226
240
  if (plexorMeta) {
@@ -674,3 +688,29 @@ function emitPlexorOutcomeSummary(response, plexorMeta, outputTokens) {
674
688
  logger.ux(msg);
675
689
  }
676
690
  }
691
+
692
+ /**
693
+ * Proactive compact warning: emit context-size alerts at 70K and 80K prompt tokens.
694
+ * Uses the per-request prompt token count (usage.input_tokens / usage.prompt_tokens)
695
+ * which represents the current context window size for that call.
696
+ */
697
+ function emitCompactWarning(response, plexorMeta) {
698
+ if (!logger || typeof logger.ux !== 'function') return;
699
+
700
+ const promptTokens =
701
+ toNumber(response?.plexor_prompt_tokens) ??
702
+ toNumber(response?.usage?.input_tokens) ??
703
+ toNumber(response?.usage?.prompt_tokens) ??
704
+ toNumber(plexorMeta?.optimized_tokens) ??
705
+ null;
706
+
707
+ if (promptTokens === null || promptTokens < 70000) return;
708
+
709
+ const tokensK = Math.round(promptTokens / 1000);
710
+
711
+ if (promptTokens >= 80000) {
712
+ logger.ux(`\u26a0 Context at ${tokensK}K tokens \u2014 recommend /compact to prevent errors`);
713
+ } else {
714
+ logger.ux(`Context at ${tokensK}K tokens \u2014 approaching provider limits`);
715
+ }
716
+ }
@@ -0,0 +1,423 @@
1
+ /**
2
+ * Plexor Supervisor Emitter — Phases 1-5
3
+ *
4
+ * Phase 1: Basic routing summary
5
+ * [PLEXOR: Routed to {provider}/{model}, {latency}ms, {routing_source}]
6
+ *
7
+ * Phase 2: Enhanced routing with cohort from response body fields
8
+ * [PLEXOR: {provider}/{model}, {latency}ms, {source} | {cohort}]
9
+ *
10
+ * Phase 3: Zero-tool escalation detection (agent_halt / escalation signals)
11
+ * [PLEXOR: Zero-tool escalation: {provider1} → {provider2}]
12
+ *
13
+ * Phase 4: Scaffolding gate blocked detection
14
+ * [PLEXOR: Scaffolding gate: {model} blocked, using {alternative}]
15
+ *
16
+ * Phase 5: Session narration — cumulative tokens, cost, provider reliability,
17
+ * context warnings, auto-compact suggestions
18
+ * [PLEXOR: Session: 12 turns, $0.42 cost, 45K tokens]
19
+ * [PLEXOR: Provider reliability (last 5): DeepSeek 4/5 tool_calls, Gemini 1/5]
20
+ * [PLEXOR: Context at 72K tokens — approaching provider limits]
21
+ * [PLEXOR: Context at 82K — recommend /compact to prevent errors]
22
+ *
23
+ * This module is consumed by track-response.js to surface routing
24
+ * decisions to the developer without requiring them to parse verbose logs.
25
+ */
26
+
27
+ const CYAN = '\x1b[36m';
28
+ const YELLOW = '\x1b[33m';
29
+ const RED = '\x1b[31m';
30
+ const MAGENTA = '\x1b[35m';
31
+ const RESET = '\x1b[0m';
32
+
33
+ const CONTEXT_WARNING_THRESHOLD = 70000;
34
+ const CONTEXT_COMPACT_THRESHOLD = 80000;
35
+
36
+ class SupervisorEmitter {
37
+ /**
38
+ * @param {object} [opts]
39
+ * @param {boolean} [opts.enabled] — honour PLEXOR_SUPERVISOR env var (default true)
40
+ */
41
+ constructor(opts = {}) {
42
+ const envFlag = process.env.PLEXOR_SUPERVISOR;
43
+ if (envFlag !== undefined) {
44
+ this.enabled = !/^(0|false|no|off)$/i.test(String(envFlag));
45
+ } else {
46
+ this.enabled = opts.enabled !== false;
47
+ }
48
+
49
+ // Phase 5: Session-level state
50
+ this._turnCount = 0;
51
+ this._cumulativeTokens = 0;
52
+ this._cumulativeCost = 0;
53
+ this._providerHistory = []; // last N entries: { provider, hadToolCalls }
54
+ }
55
+
56
+ /**
57
+ * Build the Phase 2 enhanced supervisor summary from a gateway response.
58
+ * Reads plexor_provider_used, plexor_selected_model, plexor_latency_ms,
59
+ * plexor_routing_source from the response body (not just headers).
60
+ *
61
+ * Format: [PLEXOR: provider/model, latencyms, source | cohort]
62
+ *
63
+ * @param {object} response — the full LLM response object
64
+ * @param {object} [plexorMeta] — the _plexor metadata block (may be absent)
65
+ * @returns {string|null}
66
+ */
67
+ buildSummary(response, plexorMeta) {
68
+ if (!response || typeof response !== 'object') {
69
+ return null;
70
+ }
71
+
72
+ const provider = this._resolveProvider(response, plexorMeta);
73
+ const model = this._resolveModel(response, plexorMeta);
74
+ const latencyMs = this._resolveLatency(response, plexorMeta);
75
+ const routingSource = this._resolveRoutingSource(response, plexorMeta);
76
+ const cohort = this._resolveCohort(response, plexorMeta);
77
+
78
+ // Need at least provider or model to emit anything useful
79
+ if (!provider && !model) {
80
+ return null;
81
+ }
82
+
83
+ const target = [provider, model].filter(Boolean).join('/');
84
+ const parts = [target];
85
+
86
+ if (latencyMs !== null) {
87
+ parts.push(`${latencyMs}ms`);
88
+ }
89
+
90
+ if (routingSource) {
91
+ parts.push(routingSource);
92
+ }
93
+
94
+ let line = parts.join(', ');
95
+
96
+ if (cohort) {
97
+ line += ` | ${cohort}`;
98
+ }
99
+
100
+ return `[PLEXOR: ${line}]`;
101
+ }
102
+
103
+ /**
104
+ * Phase 3: Detect zero-tool escalation signals in the response.
105
+ * Fires when agent_halt is set or escalation_chain / fallback provider data
106
+ * indicates a provider switch due to tool incapability.
107
+ *
108
+ * @param {object} response
109
+ * @param {object} [plexorMeta]
110
+ * @returns {string|null} — escalation message or null
111
+ */
112
+ buildEscalationNotice(response, plexorMeta) {
113
+ if (!response || typeof response !== 'object') {
114
+ return null;
115
+ }
116
+
117
+ const agentHalt = this._toBool(
118
+ response?.plexor_agent_halt ??
119
+ response?.plexor?.agent_halt ??
120
+ plexorMeta?.agent_halt
121
+ );
122
+
123
+ const escalationChain =
124
+ response?.plexor_escalation_chain ||
125
+ response?.plexor?.escalation_chain ||
126
+ plexorMeta?.escalation_chain ||
127
+ null;
128
+
129
+ const fallbackUsed = this._toBool(
130
+ response?.plexor_fallback_used ??
131
+ response?.fallback_used ??
132
+ response?.plexor?.fallback_used
133
+ );
134
+
135
+ const originalProvider =
136
+ response?.plexor_original_provider ||
137
+ response?.plexor?.original_provider ||
138
+ plexorMeta?.original_provider ||
139
+ null;
140
+
141
+ const currentProvider = this._resolveProvider(response, plexorMeta);
142
+
143
+ // Case 1: Explicit escalation chain present (e.g., ["openai-mini", "openai"])
144
+ if (Array.isArray(escalationChain) && escalationChain.length >= 2) {
145
+ const from = escalationChain[0];
146
+ const to = escalationChain[escalationChain.length - 1];
147
+ return `[PLEXOR: Zero-tool escalation: ${from} \u2192 ${to}]`;
148
+ }
149
+
150
+ // Case 2: agent_halt with fallback — provider switched
151
+ if (agentHalt && fallbackUsed && originalProvider && currentProvider && originalProvider !== currentProvider) {
152
+ return `[PLEXOR: Zero-tool escalation: ${originalProvider} \u2192 ${currentProvider}]`;
153
+ }
154
+
155
+ // Case 3: agent_halt alone (escalation happened but we may not know the full chain)
156
+ if (agentHalt && fallbackUsed) {
157
+ const from = originalProvider || 'original';
158
+ const to = currentProvider || 'fallback';
159
+ return `[PLEXOR: Zero-tool escalation: ${from} \u2192 ${to}]`;
160
+ }
161
+
162
+ return null;
163
+ }
164
+
165
+ /**
166
+ * Phase 4: Detect scaffolding gate blocks.
167
+ * Fires when scaffolding_gate_blocked is present in the response,
168
+ * indicating a model was blocked by the scaffolding gate and an
169
+ * alternative was used.
170
+ *
171
+ * @param {object} response
172
+ * @param {object} [plexorMeta]
173
+ * @returns {string|null}
174
+ */
175
+ buildScaffoldingGateNotice(response, plexorMeta) {
176
+ if (!response || typeof response !== 'object') {
177
+ return null;
178
+ }
179
+
180
+ const gateBlocked = this._toBool(
181
+ response?.plexor_scaffolding_gate_blocked ??
182
+ response?.scaffolding_gate_blocked ??
183
+ response?.plexor?.scaffolding_gate_blocked ??
184
+ plexorMeta?.scaffolding_gate_blocked
185
+ );
186
+
187
+ if (!gateBlocked) {
188
+ return null;
189
+ }
190
+
191
+ const blockedModel =
192
+ response?.plexor_scaffolding_blocked_model ||
193
+ response?.plexor?.scaffolding_blocked_model ||
194
+ plexorMeta?.scaffolding_blocked_model ||
195
+ response?.plexor_original_model ||
196
+ response?.plexor?.original_model ||
197
+ plexorMeta?.original_model ||
198
+ 'model';
199
+
200
+ const alternative =
201
+ response?.plexor_selected_model ||
202
+ response?.plexor?.selected_model ||
203
+ plexorMeta?.recommended_model ||
204
+ response?.model ||
205
+ 'alternative';
206
+
207
+ return `[PLEXOR: Scaffolding gate: ${blockedModel} blocked, using ${alternative}]`;
208
+ }
209
+
210
+ /**
211
+ * Emit all applicable supervisor lines to stderr if enabled.
212
+ *
213
+ * @param {object} response
214
+ * @param {object} [plexorMeta]
215
+ */
216
+ emit(response, plexorMeta) {
217
+ if (!this.enabled) {
218
+ return;
219
+ }
220
+
221
+ // Phase 5: Accumulate session state before emitting
222
+ this._accumulateSessionState(response, plexorMeta);
223
+
224
+ // Phase 4: Scaffolding gate (highest priority — emit first if present)
225
+ const scaffoldingNotice = this.buildScaffoldingGateNotice(response, plexorMeta);
226
+ if (scaffoldingNotice) {
227
+ process.stderr.write(`${RED}${scaffoldingNotice}${RESET}\n`);
228
+ }
229
+
230
+ // Phase 3: Escalation notice
231
+ const escalationNotice = this.buildEscalationNotice(response, plexorMeta);
232
+ if (escalationNotice) {
233
+ process.stderr.write(`${YELLOW}${escalationNotice}${RESET}\n`);
234
+ }
235
+
236
+ // Phase 2: Enhanced routing summary (always emitted when data available)
237
+ const summary = this.buildSummary(response, plexorMeta);
238
+ if (summary) {
239
+ process.stderr.write(`${CYAN}${summary}${RESET}\n`);
240
+ }
241
+
242
+ // Phase 5: Session narration (after per-turn messages)
243
+ this._emitSessionNarration();
244
+ }
245
+
246
+ // ---- Phase 5: session narration ----
247
+
248
+ /**
249
+ * Accumulate per-turn data into session state.
250
+ * Called at the start of emit() so all session fields are current
251
+ * before any Phase 5 messages are built.
252
+ */
253
+ _accumulateSessionState(response, plexorMeta) {
254
+ this._turnCount++;
255
+
256
+ // Cumulative token counter — read prompt_tokens from usage block
257
+ const usage = response?.usage || response?.plexor?.usage || {};
258
+ const promptTokens = Number(usage.prompt_tokens) || 0;
259
+ const completionTokens = Number(usage.completion_tokens) || 0;
260
+ this._cumulativeTokens += promptTokens + completionTokens;
261
+
262
+ // Cumulative cost
263
+ const costUsd = Number(
264
+ response?.plexor_cost_usd ??
265
+ response?.plexor?.cost_usd ??
266
+ plexorMeta?.cost_usd ??
267
+ 0
268
+ );
269
+ if (Number.isFinite(costUsd)) {
270
+ this._cumulativeCost += costUsd;
271
+ }
272
+
273
+ // Provider reliability tracking
274
+ const provider = this._resolveProvider(response, plexorMeta);
275
+ if (provider) {
276
+ const stopReason =
277
+ response?.stop_reason ||
278
+ response?.choices?.[0]?.finish_reason ||
279
+ response?.plexor?.stop_reason ||
280
+ null;
281
+ this._providerHistory.push({
282
+ provider,
283
+ hadToolCalls: stopReason === 'tool_use' || stopReason === 'tool_calls',
284
+ });
285
+ }
286
+ }
287
+
288
+ /**
289
+ * Emit session-level narration lines based on accumulated state.
290
+ */
291
+ _emitSessionNarration() {
292
+ // Session summary line — every turn
293
+ const tokenStr = this._formatTokenCount(this._cumulativeTokens);
294
+ const costStr = this._cumulativeCost < 0.01
295
+ ? `$${this._cumulativeCost.toFixed(4)}`
296
+ : `$${this._cumulativeCost.toFixed(2)}`;
297
+ process.stderr.write(
298
+ `${MAGENTA}[PLEXOR: Session: ${this._turnCount} turns, ${costStr} cost, ${tokenStr} tokens]${RESET}\n`
299
+ );
300
+
301
+ // Provider reliability digest — every 5th turn
302
+ if (this._turnCount % 5 === 0 && this._providerHistory.length > 0) {
303
+ const digest = this._buildProviderReliabilityDigest();
304
+ if (digest) {
305
+ process.stderr.write(`${MAGENTA}${digest}${RESET}\n`);
306
+ }
307
+ }
308
+
309
+ // Context warning at 70K tokens
310
+ if (this._cumulativeTokens >= CONTEXT_COMPACT_THRESHOLD) {
311
+ const kTokens = Math.round(this._cumulativeTokens / 1000);
312
+ process.stderr.write(
313
+ `${YELLOW}[PLEXOR: Context at ${kTokens}K \u2014 recommend /compact to prevent errors]${RESET}\n`
314
+ );
315
+ } else if (this._cumulativeTokens >= CONTEXT_WARNING_THRESHOLD) {
316
+ const kTokens = Math.round(this._cumulativeTokens / 1000);
317
+ process.stderr.write(
318
+ `${YELLOW}[PLEXOR: Context at ${kTokens}K tokens \u2014 approaching provider limits]${RESET}\n`
319
+ );
320
+ }
321
+ }
322
+
323
+ /**
324
+ * Build provider reliability digest from the last 5 entries in history.
325
+ * Format: [PLEXOR: Provider reliability (last 5): DeepSeek 4/5 tool_calls, Gemini 1/5]
326
+ */
327
+ _buildProviderReliabilityDigest() {
328
+ const recent = this._providerHistory.slice(-5);
329
+ const totals = {};
330
+ const toolHits = {};
331
+
332
+ for (const entry of recent) {
333
+ const p = entry.provider;
334
+ totals[p] = (totals[p] || 0) + 1;
335
+ if (entry.hadToolCalls) {
336
+ toolHits[p] = (toolHits[p] || 0) + 1;
337
+ }
338
+ }
339
+
340
+ const parts = Object.keys(totals).map(p => {
341
+ const hits = toolHits[p] || 0;
342
+ return `${p}: ${hits}/${totals[p]} tool_calls`;
343
+ });
344
+
345
+ if (parts.length === 0) return null;
346
+ return `[PLEXOR: Provider reliability (last 5): ${parts.join(', ')}]`;
347
+ }
348
+
349
+ /**
350
+ * Format token count: 1234 -> "1.2K", 123456 -> "123K"
351
+ */
352
+ _formatTokenCount(tokens) {
353
+ if (tokens < 1000) return String(tokens);
354
+ if (tokens < 10000) return `${(tokens / 1000).toFixed(1)}K`;
355
+ return `${Math.round(tokens / 1000)}K`;
356
+ }
357
+
358
+ // ---- Phase 5 accessors (for testing) ----
359
+
360
+ get turnCount() { return this._turnCount; }
361
+ get cumulativeTokens() { return this._cumulativeTokens; }
362
+ get cumulativeCost() { return this._cumulativeCost; }
363
+
364
+ // ---- private helpers ----
365
+
366
+ _resolveProvider(response, meta) {
367
+ return (
368
+ response?.plexor_provider_used ||
369
+ response?.plexor?.provider_used ||
370
+ meta?.recommended_provider ||
371
+ null
372
+ );
373
+ }
374
+
375
+ _resolveModel(response, meta) {
376
+ return (
377
+ response?.plexor_selected_model ||
378
+ response?.plexor?.selected_model ||
379
+ meta?.recommended_model ||
380
+ response?.model ||
381
+ null
382
+ );
383
+ }
384
+
385
+ _resolveLatency(response, meta) {
386
+ const raw =
387
+ response?.plexor_latency_ms ??
388
+ response?.plexor?.latency_ms ??
389
+ meta?.latency_ms ??
390
+ null;
391
+ if (raw === null || raw === undefined) {
392
+ return null;
393
+ }
394
+ const n = Number(raw);
395
+ return Number.isFinite(n) ? Math.round(n) : null;
396
+ }
397
+
398
+ _resolveRoutingSource(response, meta) {
399
+ return (
400
+ response?.plexor_routing_source ||
401
+ response?.plexor?.routing_source ||
402
+ meta?.source ||
403
+ null
404
+ );
405
+ }
406
+
407
+ _resolveCohort(response, meta) {
408
+ return (
409
+ response?.plexor_cohort ||
410
+ response?.plexor?.cohort ||
411
+ meta?.cohort ||
412
+ null
413
+ );
414
+ }
415
+
416
+ _toBool(value) {
417
+ if (value === true || value === 'true' || value === '1' || value === 1) return true;
418
+ if (value === false || value === 'false' || value === '0' || value === 0) return false;
419
+ return null;
420
+ }
421
+ }
422
+
423
+ module.exports = { SupervisorEmitter };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@plexor-dev/claude-code-plugin-staging",
3
- "version": "0.1.0-beta.26",
3
+ "version": "0.1.0-beta.28",
4
4
  "description": "STAGING - LLM cost optimization plugin for Claude Code (internal testing)",
5
5
  "main": "lib/constants.js",
6
6
  "bin": {