clementine-agent 1.18.77 → 1.18.78

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Goal evaluation — PRD Phase 1.
3
+ *
4
+ * Two evaluators run at the END of a successful cron run, when the Task
5
+ * defines `successSchema` (JSON Schema validated against the agent's output)
6
+ * and/or `successCriteriaText` (free-text criterion graded by an evaluator
7
+ * sub-agent). The verdicts merge into a single `goalCheck` object that
8
+ * gets stamped on the run's CronRunEntry.
9
+ *
10
+ * Design constraints:
11
+ * - Never block run completion. Any thrown error becomes status='error' on
12
+ * goalCheck and the rest of the run logs unchanged.
13
+ * - Bounded budgets — schema validation is sub-millisecond; evaluator agent
14
+ * gets max_turns=1, ~30s wall clock, Haiku-class model.
15
+ * - No new top-level deps — ajv is a transitive install; we import it lazily
16
+ * inside the function so test fixtures that don't need it never load it.
17
+ */
18
+ import type { CronJobDefinition, CronRunEntry } from '../types.js';
19
+ type SchemaResult = {
20
+ pass: boolean;
21
+ errors: string[];
22
+ tried: boolean;
23
+ };
24
+ type EvaluatorResult = {
25
+ pass: boolean;
26
+ reason: string;
27
+ };
28
+ /**
29
+ * Validate the agent's response against a JSON Schema. Returns:
30
+ * - tried=false if no JSON could be extracted from the response
31
+ * - tried=true with pass + errors otherwise
32
+ * Schema-compile errors throw — caller catches.
33
+ */
34
+ export declare function validateAgainstSchema(responseText: string, schema: Record<string, unknown>): Promise<SchemaResult>;
35
+ /**
36
+ * Ask a small evaluator sub-agent whether the run accomplished the
37
+ * `successCriteriaText` criterion. Returns null if the evaluator failed
38
+ * to produce a parseable verdict (caller treats null as goalCheck.status='error').
39
+ *
40
+ * The evaluator is intentionally minimal — Haiku, max_turns=1, focused
41
+ * system prompt, ~30s budget. We're grading text, not running tools.
42
+ */
43
+ export declare function evaluateAgainstCriterion(responseText: string, criterion: string, opts?: {
44
+ model?: string;
45
+ timeoutMs?: number;
46
+ }): Promise<EvaluatorResult | null>;
47
+ /**
48
+ * Orchestrator: runs whichever evaluators are configured on the Task and
49
+ * merges their verdicts into a single goalCheck record. Returns undefined
50
+ * when no goal is configured — the field then stays absent on the run entry.
51
+ */
52
+ export declare function runGoalCheck(responseText: string, job: CronJobDefinition): Promise<CronRunEntry['goalCheck']>;
53
+ export {};
54
+ //# sourceMappingURL=goal-evaluator.d.ts.map
@@ -0,0 +1,235 @@
1
+ /**
2
+ * Goal evaluation — PRD Phase 1.
3
+ *
4
+ * Two evaluators run at the END of a successful cron run, when the Task
5
+ * defines `successSchema` (JSON Schema validated against the agent's output)
6
+ * and/or `successCriteriaText` (free-text criterion graded by an evaluator
7
+ * sub-agent). The verdicts merge into a single `goalCheck` object that
8
+ * gets stamped on the run's CronRunEntry.
9
+ *
10
+ * Design constraints:
11
+ * - Never block run completion. Any thrown error becomes status='error' on
12
+ * goalCheck and the rest of the run logs unchanged.
13
+ * - Bounded budgets — schema validation is sub-millisecond; evaluator agent
14
+ * gets max_turns=1, ~30s wall clock, Haiku-class model.
15
+ * - No new top-level deps — ajv is a transitive install; we import it lazily
16
+ * inside the function so test fixtures that don't need it never load it.
17
+ */
18
+ /**
19
+ * Try to extract a JSON object from the agent's response. Looks first at the
20
+ * whole text, then at fenced ```json blocks (the common Claude output shape),
21
+ * then at any {...} substring as a last resort.
22
+ */
23
+ function extractJson(responseText) {
24
+ if (!responseText || typeof responseText !== 'string')
25
+ return null;
26
+ // Whole-text parse first.
27
+ try {
28
+ return JSON.parse(responseText);
29
+ }
30
+ catch { /* fall through */ }
31
+ // Fenced ```json ... ``` block.
32
+ const fenced = responseText.match(/```(?:json|JSON)?\s*([\s\S]*?)```/);
33
+ if (fenced && fenced[1]) {
34
+ try {
35
+ return JSON.parse(fenced[1].trim());
36
+ }
37
+ catch { /* fall through */ }
38
+ }
39
+ // First {...} substring (greedy through last brace).
40
+ const first = responseText.indexOf('{');
41
+ const last = responseText.lastIndexOf('}');
42
+ if (first >= 0 && last > first) {
43
+ try {
44
+ return JSON.parse(responseText.slice(first, last + 1));
45
+ }
46
+ catch { /* fall through */ }
47
+ }
48
+ return null;
49
+ }
50
+ /**
51
+ * Validate the agent's response against a JSON Schema. Returns:
52
+ * - tried=false if no JSON could be extracted from the response
53
+ * - tried=true with pass + errors otherwise
54
+ * Schema-compile errors throw — caller catches.
55
+ */
56
+ export async function validateAgainstSchema(responseText, schema) {
57
+ const candidate = extractJson(responseText);
58
+ if (candidate === null) {
59
+ return { tried: false, pass: false, errors: ['No JSON object found in agent response'] };
60
+ }
61
+ // Lazy import so this module costs nothing when no Task has a schema.
62
+ const ajvMod = await import('ajv').catch(() => null);
63
+ if (!ajvMod) {
64
+ throw new Error('ajv not available — cannot validate success_schema');
65
+ }
66
+ // Handle CJS default-export interop (ajv@8 ships as CJS; the ESM bridge
67
+ // sometimes lands the constructor on .default and sometimes at the top
68
+ // level).
69
+ const AjvCtor = ajvMod.default ?? ajvMod;
70
+ const ajv = new AjvCtor({ allErrors: true, strict: false });
71
+ const validator = ajv.compile(schema);
72
+ const ok = validator(candidate);
73
+ if (ok)
74
+ return { tried: true, pass: true, errors: [] };
75
+ // ajv stamps errors on the compiled validator; the instance fallback covers
76
+ // older versions that put them on the ajv instance instead.
77
+ const rawErrors = validator.errors ?? ajv.errors ?? [];
78
+ const errs = rawErrors.slice(0, 5).map((e) => {
79
+ const path = e.instancePath || '';
80
+ const msg = e.message || 'invalid';
81
+ return path ? `${path} ${msg}` : msg;
82
+ });
83
+ return { tried: true, pass: false, errors: errs.length ? errs : ['validation failed'] };
84
+ }
85
+ /**
86
+ * Ask a small evaluator sub-agent whether the run accomplished the
87
+ * `successCriteriaText` criterion. Returns null if the evaluator failed
88
+ * to produce a parseable verdict (caller treats null as goalCheck.status='error').
89
+ *
90
+ * The evaluator is intentionally minimal — Haiku, max_turns=1, focused
91
+ * system prompt, ~30s budget. We're grading text, not running tools.
92
+ */
93
+ export async function evaluateAgainstCriterion(responseText, criterion, opts = {}) {
94
+ const trimmedResponse = (responseText || '').slice(0, 8000);
95
+ const trimmedCriterion = (criterion || '').slice(0, 2000);
96
+ if (!trimmedCriterion)
97
+ return null;
98
+ const sdk = await import('@anthropic-ai/claude-agent-sdk').catch(() => null);
99
+ if (!sdk || typeof sdk.query !== 'function') {
100
+ return null;
101
+ }
102
+ const systemPrompt = 'You are a strict evaluator. Grade whether a scheduled task accomplished its stated goal.\n' +
103
+ 'Reply with EXACTLY one line in this format:\n' +
104
+ 'PASS — <one-sentence reason> | FAIL — <one-sentence reason>\n' +
105
+ 'Be honest. If the run did not achieve the goal, say FAIL even if the agent claimed success.';
106
+ const userPrompt = `GOAL:\n${trimmedCriterion}\n\nRUN OUTPUT:\n${trimmedResponse}\n\nVerdict:`;
107
+ const timeoutMs = opts.timeoutMs ?? 30_000;
108
+ const model = opts.model ?? 'claude-haiku-4-5-20251001';
109
+ // Race the SDK query against a hard timeout so a hung evaluator never
110
+ // blocks run logging.
111
+ const queryPromise = (async () => {
112
+ let collected = '';
113
+ try {
114
+ const queryFn = sdk.query;
115
+ const iter = queryFn({
116
+ prompt: userPrompt,
117
+ options: {
118
+ systemPrompt,
119
+ model,
120
+ maxTurns: 1,
121
+ permissionMode: 'default',
122
+ allowedTools: [],
123
+ settingSources: [],
124
+ // No tools, no network beyond model — purely text-in / text-out.
125
+ },
126
+ });
127
+ for await (const message of iter) {
128
+ const m = message;
129
+ if (m.type === 'assistant' && Array.isArray(m.content)) {
130
+ for (const block of m.content) {
131
+ const b = block;
132
+ if (b.type === 'text' && typeof b.text === 'string')
133
+ collected += b.text;
134
+ }
135
+ }
136
+ else if (m.type === 'result' && typeof m.result === 'string') {
137
+ collected += m.result;
138
+ }
139
+ }
140
+ }
141
+ catch {
142
+ return null;
143
+ }
144
+ return collected;
145
+ })();
146
+ const timeoutPromise = new Promise((resolve) => setTimeout(() => resolve(null), timeoutMs));
147
+ const collected = await Promise.race([queryPromise, timeoutPromise]);
148
+ if (!collected || typeof collected !== 'string')
149
+ return null;
150
+ // Parse the strict verdict line. Accept variants: "PASS — reason", "FAIL: reason",
151
+ // "Verdict: PASS — reason", etc.
152
+ const match = collected.match(/\b(PASS|FAIL)\b\s*[—\-:]?\s*(.+)/i);
153
+ if (!match)
154
+ return null;
155
+ const verdict = match[1].toUpperCase() === 'PASS';
156
+ const reason = (match[2] || '').replace(/[\r\n].*$/s, '').trim().slice(0, 280);
157
+ return { pass: verdict, reason: reason || (verdict ? 'Pass' : 'Fail') };
158
+ }
159
+ /**
160
+ * Orchestrator: runs whichever evaluators are configured on the Task and
161
+ * merges their verdicts into a single goalCheck record. Returns undefined
162
+ * when no goal is configured — the field then stays absent on the run entry.
163
+ */
164
+ export async function runGoalCheck(responseText, job) {
165
+ const hasSchema = !!(job.successSchema && Object.keys(job.successSchema).length > 0);
166
+ const hasCriterion = !!(job.successCriteriaText && job.successCriteriaText.trim());
167
+ if (!hasSchema && !hasCriterion)
168
+ return undefined;
169
+ let schemaResult = null;
170
+ let evaluatorResult = null;
171
+ let errored = false;
172
+ let errorMessage = '';
173
+ if (hasSchema) {
174
+ try {
175
+ schemaResult = await validateAgainstSchema(responseText, job.successSchema);
176
+ }
177
+ catch (err) {
178
+ errored = true;
179
+ errorMessage = `schema validator threw: ${String(err).slice(0, 200)}`;
180
+ }
181
+ }
182
+ if (hasCriterion) {
183
+ try {
184
+ evaluatorResult = await evaluateAgainstCriterion(responseText, job.successCriteriaText);
185
+ if (evaluatorResult === null && !errored) {
186
+ // Treat unparseable evaluator output as 'error' rather than 'fail' — we
187
+ // don't want a flaky evaluator to mark a healthy run as failed.
188
+ errored = true;
189
+ errorMessage = 'evaluator did not return a parseable PASS/FAIL verdict';
190
+ }
191
+ }
192
+ catch (err) {
193
+ errored = true;
194
+ errorMessage = `evaluator threw: ${String(err).slice(0, 200)}`;
195
+ }
196
+ }
197
+ // Decide overall status. Both passed = pass. Either failed = fail. Neither
198
+ // ran cleanly but both were configured = error.
199
+ const mode = hasSchema && hasCriterion ? 'both' : hasSchema ? 'schema' : 'evaluator';
200
+ let status;
201
+ if (errored && (!schemaResult || !evaluatorResult)) {
202
+ status = 'error';
203
+ }
204
+ else {
205
+ const schemaPassed = schemaResult?.pass !== false; // true if not run, or true if run + passed
206
+ const evaluatorPassed = evaluatorResult?.pass !== false; // same
207
+ const schemaFailed = schemaResult ? !schemaResult.pass || !schemaResult.tried : false;
208
+ const evaluatorFailed = evaluatorResult ? !evaluatorResult.pass : false;
209
+ if (schemaFailed || evaluatorFailed)
210
+ status = 'fail';
211
+ else if (schemaPassed && evaluatorPassed)
212
+ status = 'pass';
213
+ else
214
+ status = 'error';
215
+ }
216
+ const out = { status, mode };
217
+ if (schemaResult) {
218
+ out.schemaPass = schemaResult.pass && schemaResult.tried;
219
+ if (!schemaResult.pass || !schemaResult.tried) {
220
+ out.schemaErrors = schemaResult.errors.slice(0, 5);
221
+ }
222
+ }
223
+ if (evaluatorResult) {
224
+ out.evaluatorPass = evaluatorResult.pass;
225
+ out.evaluatorReason = evaluatorResult.reason;
226
+ }
227
+ if (errored && errorMessage) {
228
+ // Stash the error in evaluatorReason if we don't already have one — the
229
+ // dashboard surfaces this string in the tooltip.
230
+ if (!out.evaluatorReason)
231
+ out.evaluatorReason = errorMessage;
232
+ }
233
+ return out;
234
+ }
235
+ //# sourceMappingURL=goal-evaluator.js.map
package/dist/cli/cron.js CHANGED
@@ -140,7 +140,7 @@ export async function cmdCronRun(jobName) {
140
140
  try {
141
141
  const response = await gateway.handleCronJob(job.name, job.prompt, job.tier, job.maxTurns, job.model, job.workDir, job.mode, job.maxHours);
142
142
  const finishedAt = new Date();
143
- runLog.append({
143
+ const entry = {
144
144
  jobName: job.name,
145
145
  startedAt: startedAt.toISOString(),
146
146
  finishedAt: finishedAt.toISOString(),
@@ -148,7 +148,21 @@ export async function cmdCronRun(jobName) {
148
148
  durationMs: finishedAt.getTime() - startedAt.getTime(),
149
149
  attempt: 1,
150
150
  outputPreview: response ? response.slice(0, 200) : undefined,
151
- });
151
+ };
152
+ // PRD Phase 1.1: goal-orientation evaluator (mirrors the daemon path).
153
+ if (job.successSchema || (job.successCriteriaText && job.successCriteriaText.trim())) {
154
+ try {
155
+ const { runGoalCheck } = await import('../agent/goal-evaluator.js');
156
+ const goalCheck = await runGoalCheck(response ?? '', job);
157
+ if (goalCheck)
158
+ entry.goalCheck = goalCheck;
159
+ }
160
+ catch (err) {
161
+ // Never block logging on evaluator failure.
162
+ entry.goalCheck = { status: 'error', mode: 'evaluator', evaluatorReason: `evaluator orchestrator threw: ${String(err).slice(0, 200)}` };
163
+ }
164
+ }
165
+ runLog.append(entry);
152
166
  console.log(response || '(no output)');
153
167
  if (response && response !== '__NOTHING__') {
154
168
  console.log('\n(Note: Standalone runner — output not delivered to channels. Use the daemon for channel delivery.)');
@@ -23274,6 +23274,22 @@ function renderScheduledTaskCard(task) {
23274
23274
  var ok = lr.status === 'ok';
23275
23275
  var statusIcon = ok ? '<span style="color:var(--green)">&#10003;</span>' : '<span style="color:var(--red)">&#10007;</span>';
23276
23276
  lastRunHtml = statusIcon + ' ' + esc(lr.status || 'unknown') + ' · ' + esc(timeAgo(lr.finishedAt || lr.startedAt || ''));
23277
+ // PRD Phase 1.1: goal pill. Orthogonal to status — a run can be status='ok'
23278
+ // but goalCheck.status='fail' (the agent finished cleanly without
23279
+ // accomplishing the stated goal). That's exactly the failure mode the
23280
+ // PRD's goal-orientation feature is designed to surface.
23281
+ if (lr.goalCheck) {
23282
+ var gc = lr.goalCheck;
23283
+ var gIcon = gc.status === 'pass' ? '🎯' : gc.status === 'fail' ? '✗' : gc.status === 'error' ? '⚠' : '';
23284
+ var gColor = gc.status === 'pass' ? 'var(--green)' : gc.status === 'fail' ? 'var(--red)' : 'var(--yellow)';
23285
+ var gLabel = gc.status === 'pass' ? 'goal met' : gc.status === 'fail' ? 'goal not met' : gc.status === 'error' ? 'goal eval failed' : '';
23286
+ var gTip = '';
23287
+ if (gc.evaluatorReason) gTip = gc.evaluatorReason;
23288
+ else if (Array.isArray(gc.schemaErrors) && gc.schemaErrors.length) gTip = 'Schema errors: ' + gc.schemaErrors.join('; ');
23289
+ if (gIcon && gLabel) {
23290
+ lastRunHtml += ' <span style="color:' + gColor + ';font-size:11px;font-weight:500" title="' + esc(gTip || gLabel) + '">· ' + gIcon + ' ' + esc(gLabel) + '</span>';
23291
+ }
23292
+ }
23277
23293
  // "ran with: …" — surface the skills + MCP that were live for this run.
23278
23294
  var ranWith = [];
23279
23295
  if (Array.isArray(lr.skillsApplied) && lr.skillsApplied.length > 0) {
@@ -23380,8 +23396,23 @@ function renderRecentHistoryList(runs) {
23380
23396
  var preview = String(entry.outputPreview).slice(0, 140);
23381
23397
  errorPreview = '<div style="font-size:11px;color:var(--text-muted);margin-top:2px;word-break:break-word">' + esc(preview) + '</div>';
23382
23398
  }
23383
- rowsHtml += '<div class="history-row" data-trace-job="' + esc(jobName) + '" style="display:grid;grid-template-columns:24px minmax(180px,1.2fr) minmax(180px,1fr) 90px auto;gap:10px;align-items:start;padding:8px 14px;border-bottom:1px solid var(--border);cursor:pointer">'
23399
+ // PRD Phase 1.1: goal cell. Empty cell when no goal configured (status='skipped'
23400
+ // returned by runGoalCheck means "no goal" — but we omit goalCheck entirely
23401
+ // in that case, so missing field == no goal). The cell stays present in the
23402
+ // grid for column alignment.
23403
+ var goalCellHtml = '<div></div>';
23404
+ if (entry.goalCheck) {
23405
+ var gc2 = entry.goalCheck;
23406
+ var gIcon2 = gc2.status === 'pass' ? '🎯' : gc2.status === 'fail' ? '✗' : gc2.status === 'error' ? '⚠' : '';
23407
+ var gColor2 = gc2.status === 'pass' ? 'var(--green)' : gc2.status === 'fail' ? 'var(--red)' : 'var(--yellow)';
23408
+ var gTip2 = gc2.evaluatorReason
23409
+ ? gc2.evaluatorReason
23410
+ : (Array.isArray(gc2.schemaErrors) && gc2.schemaErrors.length ? 'Schema errors: ' + gc2.schemaErrors.join('; ') : gc2.status);
23411
+ goalCellHtml = '<div style="color:' + gColor2 + ';font-size:13px;line-height:18px;text-align:center" title="' + esc(gTip2) + '">' + gIcon2 + '</div>';
23412
+ }
23413
+ rowsHtml += '<div class="history-row" data-trace-job="' + esc(jobName) + '" style="display:grid;grid-template-columns:24px 24px minmax(180px,1.2fr) minmax(180px,1fr) 90px auto;gap:10px;align-items:start;padding:8px 14px;border-bottom:1px solid var(--border);cursor:pointer">'
23384
23414
  + '<div style="color:' + statusColor + ';font-size:14px;line-height:18px;text-align:center" title="' + esc(status) + '">' + statusIcon + '</div>'
23415
+ + goalCellHtml
23385
23416
  + '<div style="min-width:0">'
23386
23417
  + '<div style="font-weight:500;color:var(--text-primary);font-size:13px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap" title="' + esc(jobName) + '">' + esc(jobName) + attemptLabel + '</div>'
23387
23418
  + errorPreview
@@ -23392,8 +23423,10 @@ function renderRecentHistoryList(runs) {
23392
23423
  + '</div>';
23393
23424
  }
23394
23425
  return '<div class="history-list" style="background:var(--bg-secondary);border:1px solid var(--border);border-radius:var(--radius)">'
23395
- + '<div style="display:grid;grid-template-columns:24px minmax(180px,1.2fr) minmax(180px,1fr) 90px auto;gap:10px;padding:8px 14px;border-bottom:1px solid var(--border);font-size:11px;color:var(--text-muted);text-transform:uppercase;letter-spacing:0.04em;font-weight:500">'
23396
- + '<div></div><div>Task</div><div>Started</div><div>Duration</div><div></div>'
23426
+ + '<div style="display:grid;grid-template-columns:24px 24px minmax(180px,1.2fr) minmax(180px,1fr) 90px auto;gap:10px;padding:8px 14px;border-bottom:1px solid var(--border);font-size:11px;color:var(--text-muted);text-transform:uppercase;letter-spacing:0.04em;font-weight:500">'
23427
+ + '<div title="Run status (ok / error / etc.)"></div>'
23428
+ + '<div title="Goal check result — orthogonal to run status">Goal</div>'
23429
+ + '<div>Task</div><div>Started</div><div>Duration</div><div></div>'
23397
23430
  + '</div>'
23398
23431
  + rowsHtml
23399
23432
  + '</div>';
@@ -1236,6 +1236,23 @@ export class CronScheduler {
1236
1236
  this.gateway.injectContext(`discord:user:${DISCORD_OWNER_ID}`, `[Scheduled cron: ${job.name}]`, response);
1237
1237
  }
1238
1238
  }
1239
+ // PRD Phase 1.1: goal-orientation. If the Task has successSchema or
1240
+ // successCriteriaText, run the evaluator now (before logging) so the
1241
+ // entry carries the goalCheck verdict. Errors here NEVER block
1242
+ // logging — runGoalCheck catches its own throws and emits
1243
+ // status='error' on the goalCheck instead.
1244
+ if (job.successSchema || (job.successCriteriaText && job.successCriteriaText.trim())) {
1245
+ try {
1246
+ const { runGoalCheck } = await import('../agent/goal-evaluator.js');
1247
+ const goalCheck = await runGoalCheck(response ?? '', job);
1248
+ if (goalCheck)
1249
+ entry.goalCheck = goalCheck;
1250
+ }
1251
+ catch (err) {
1252
+ logger.warn({ err, job: job.name }, 'Goal evaluator failed — proceeding without goalCheck');
1253
+ entry.goalCheck = { status: 'error', mode: 'evaluator', evaluatorReason: `evaluator orchestrator threw: ${String(err).slice(0, 200)}` };
1254
+ }
1255
+ }
1239
1256
  this._logRun(entry);
1240
1257
  this.logAutonomy('completed', job, { durationMs: entry.durationMs, deliveryFailed: entry.deliveryFailed, advisorApplied: !!advisorApplied });
1241
1258
  // Fire-and-forget: extract procedural skill from successful long-running cron jobs
package/dist/types.d.ts CHANGED
@@ -447,6 +447,24 @@ export interface CronRunEntry {
447
447
  allowedToolsApplied?: string[];
448
448
  /** MCP servers live for this run (post profile + trick allowlist intersection). */
449
449
  mcpServersApplied?: string[];
450
+ /** PRD Phase 1: did the run accomplish what it was supposed to?
451
+ * Computed at run-end when the Task has successSchema or successCriteriaText.
452
+ * - status='pass' both configured checks passed (or the only one configured did)
453
+ * - status='fail' a configured check failed
454
+ * - status='skipped' no goal configured on the Task (don't show the pill)
455
+ * - status='error' evaluator/validator threw; does NOT mark the run failed
456
+ * This is orthogonal to CronRunEntry.status — a run can be status='ok' with
457
+ * goalCheck.status='fail' (the agent finished cleanly but didn't accomplish
458
+ * the stated goal), and that's the failure mode the PRD is designed to surface. */
459
+ goalCheck?: {
460
+ status: 'pass' | 'fail' | 'skipped' | 'error';
461
+ /** Which evaluators ran. 'both' means schema + evaluator agreed. */
462
+ mode: 'schema' | 'evaluator' | 'both';
463
+ schemaPass?: boolean;
464
+ schemaErrors?: string[];
465
+ evaluatorPass?: boolean;
466
+ evaluatorReason?: string;
467
+ };
450
468
  }
451
469
  export interface Models {
452
470
  haiku: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "clementine-agent",
3
- "version": "1.18.77",
3
+ "version": "1.18.78",
4
4
  "description": "Clementine — Personal AI Assistant (TypeScript)",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",