openhermes 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +281 -0
- package/autorecall.mjs +167 -0
- package/bootstrap.mjs +255 -0
- package/curator.mjs +470 -0
- package/harness/commands/build-fix.md +60 -0
- package/harness/commands/code-review.md +71 -0
- package/harness/commands/doctor.md +42 -0
- package/harness/commands/learn.md +37 -0
- package/harness/commands/memory-search.md +37 -0
- package/harness/commands/plan.md +53 -0
- package/harness/commands/security.md +93 -0
- package/harness/constitution/soul.md +76 -0
- package/harness/instructions/RUNTIME.md +21 -0
- package/harness/prompts/architect.txt +175 -0
- package/harness/prompts/build-error-resolver.md +37 -0
- package/harness/prompts/code-reviewer.md +33 -0
- package/harness/prompts/e2e-runner.txt +305 -0
- package/harness/prompts/explore.md +29 -0
- package/harness/prompts/planner.md +30 -0
- package/harness/prompts/security-reviewer.md +35 -0
- package/harness/rules/audit.md +84 -0
- package/harness/rules/checkpointing.md +75 -0
- package/harness/rules/context-loading.md +33 -0
- package/harness/rules/credential-exposure.md +0 -0
- package/harness/rules/delegation.md +76 -0
- package/harness/rules/memory-management.md +28 -0
- package/harness/rules/precedence.md +52 -0
- package/harness/rules/promotion.md +46 -0
- package/harness/rules/ranking.md +64 -0
- package/harness/rules/retrieval.md +94 -0
- package/harness/rules/runtime-guards.md +196 -0
- package/harness/rules/self-heal.md +79 -0
- package/harness/rules/session-start.md +34 -0
- package/harness/rules/skills-management.md +165 -0
- package/harness/rules/state-drift.md +192 -0
- package/harness/rules/verification.md +88 -0
- package/harness/skills/.bundled_manifest +17 -0
- package/harness/skills/.usage.json +6 -0
- package/harness/skills/api-design/SKILL.md +523 -0
- package/harness/skills/backend-patterns/SKILL.md +598 -0
- package/harness/skills/coding-standards/SKILL.md +549 -0
- package/harness/skills/e2e-testing/SKILL.md +326 -0
- package/harness/skills/frontend-patterns/SKILL.md +642 -0
- package/harness/skills/frontend-slides/SKILL.md +184 -0
- package/harness/skills/security-review/SKILL.md +495 -0
- package/harness/skills/strategic-compact/SKILL.md +131 -0
- package/harness/skills/tdd-workflow/SKILL.md +463 -0
- package/harness/skills/verification-loop/SKILL.md +126 -0
- package/index.mjs +5 -0
- package/lib/hardening.mjs +113 -0
- package/lib/memory-tools-plugin.mjs +265 -0
- package/lib/schema-validator.mjs +77 -0
- package/lib/tools/_memory.mjs +230 -0
- package/lib/tools/hm_get.mjs +13 -0
- package/lib/tools/hm_latest.mjs +12 -0
- package/lib/tools/hm_list.mjs +13 -0
- package/lib/tools/hm_put.mjs +14 -0
- package/lib/tools/hm_search.mjs +16 -0
- package/package.json +49 -0
- package/schemas/audit.schema.json +61 -0
- package/schemas/backlog.schema.json +42 -0
- package/schemas/checkpoint.schema.json +44 -0
- package/schemas/constraint.schema.json +41 -0
- package/schemas/decision.schema.json +42 -0
- package/schemas/instinct.schema.json +42 -0
- package/schemas/loop-state.schema.json +33 -0
- package/schemas/mistake.schema.json +43 -0
- package/schemas/verification_receipt.schema.json +67 -0
- package/skill-builder.mjs +113 -0
package/curator.mjs
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
import path from "node:path"
|
|
2
|
+
import fs from "node:fs"
|
|
3
|
+
import os from "node:os"
|
|
4
|
+
import { findUnsupportedSchemaKeywords, validateSchema } from "./lib/schema-validator.mjs"
|
|
5
|
+
import { atomicWriteJson, fingerprintEnvironment, fingerprintFile, isTruthy, redactSensitiveText, sanitizeRecord, truncateText } from "./lib/hardening.mjs"
|
|
6
|
+
import { fileURLToPath } from "node:url"
|
|
7
|
+
import { dirname } from "node:path"
|
|
8
|
+
|
|
9
|
+
const __dirname = dirname(fileURLToPath(import.meta.url))
|
|
10
|
+
|
|
11
|
+
const CHECKPOINT_DEBOUNCE_MS = 300000
|
|
12
|
+
const COMPACTION_CONTEXT_LIMIT = 12000
|
|
13
|
+
const lastCheckpoint = { ts: 0 }
|
|
14
|
+
const writtenThisSession = []
|
|
15
|
+
const CURATOR_LOGS = /^(1|true|yes)$/i.test(process.env.OPENCODE_CURATOR_LOGS || "")
|
|
16
|
+
|
|
17
|
+
function curatorLog(message) {
|
|
18
|
+
if (!CURATOR_LOGS) return
|
|
19
|
+
process.stderr.write(`${message}\n`)
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function getHarnessRoot(directory) {
|
|
23
|
+
const home = process.env.USERPROFILE || os.homedir()
|
|
24
|
+
const configRoot = path.join(home, ".config", "opencode")
|
|
25
|
+
const projectHarness = path.join(directory, ".opencode", "openhermes")
|
|
26
|
+
const projectMemory = path.join(projectHarness, "memory")
|
|
27
|
+
if (isTruthy(process.env.OPENCODE_ALLOW_PROJECT_HARNESS)) {
|
|
28
|
+
try {
|
|
29
|
+
fs.accessSync(projectMemory)
|
|
30
|
+
return projectHarness
|
|
31
|
+
} catch {
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
return path.join(configRoot, "openhermes")
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
function readJson(fp, fallback) {
|
|
38
|
+
try { return JSON.parse(fs.readFileSync(fp, "utf8")) } catch { return fallback }
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
function buildEnvironmentFingerprint(root, directory, project) {
|
|
42
|
+
return fingerprintEnvironment({
|
|
43
|
+
cwd: directory,
|
|
44
|
+
harnessRoot: root,
|
|
45
|
+
projectRoot: directory,
|
|
46
|
+
project: project?.name || path.basename(directory),
|
|
47
|
+
sessionId: project?.session_id || null,
|
|
48
|
+
})
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
function isMeaningfulText(value) {
|
|
52
|
+
return typeof value === "string" && value.trim().length > 0 && !/^(n\/a|none|tbd|placeholder|pending|session in progress|no active checkpoint content)$/i.test(value.trim())
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function safeLogMessage(message, limit = 160) {
|
|
56
|
+
return truncateText(redactSensitiveText(message || ""), limit)
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
function indexEntry(root, plural, record) {
|
|
60
|
+
const indexPath = path.join(root, "memory", plural, "index.json")
|
|
61
|
+
let index = readJson(indexPath, [])
|
|
62
|
+
if (!Array.isArray(index)) index = []
|
|
63
|
+
const entry = {
|
|
64
|
+
id: record.id,
|
|
65
|
+
summary: record.summary,
|
|
66
|
+
status: record.status,
|
|
67
|
+
updated_at: record.updated_at || record.created_at,
|
|
68
|
+
path: `openhermes/memory/${plural}/${record.id}.json`
|
|
69
|
+
}
|
|
70
|
+
const existing = index.findIndex(e => e.id === record.id)
|
|
71
|
+
if (existing >= 0) index[existing] = entry
|
|
72
|
+
else index.push(entry)
|
|
73
|
+
atomicWriteJson(indexPath, index)
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function updateLoopState(root, patch) {
|
|
77
|
+
const statePath = path.join(root, "runtime", "loop-state.json")
|
|
78
|
+
const state = readJson(statePath, null) || {}
|
|
79
|
+
const next = {
|
|
80
|
+
...state,
|
|
81
|
+
...patch,
|
|
82
|
+
updated_at: patch.updated_at || new Date().toISOString(),
|
|
83
|
+
}
|
|
84
|
+
const schema = loadSchema("loop-state")
|
|
85
|
+
if (!schema) return false
|
|
86
|
+
const unsupported = findUnsupportedSchemaKeywords(schema)
|
|
87
|
+
if (unsupported.length) return false
|
|
88
|
+
const errors = validateSchema(schema, next, "$")
|
|
89
|
+
if (errors.length) return false
|
|
90
|
+
atomicWriteJson(statePath, next)
|
|
91
|
+
return true
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
function loadSchema(classId) {
|
|
95
|
+
const home = process.env.USERPROFILE || os.homedir()
|
|
96
|
+
const fp = path.join(home, ".config", "opencode", "openhermes", "schemas", `${classId}.schema.json`)
|
|
97
|
+
try { return JSON.parse(fs.readFileSync(fp, "utf8")) } catch {}
|
|
98
|
+
const bundled = path.join(__dirname, "schemas", `${classId}.schema.json`)
|
|
99
|
+
try { return JSON.parse(fs.readFileSync(bundled, "utf8")) } catch { return null }
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
function validateRecordAgainstSchema(record) {
|
|
103
|
+
const schema = loadSchema(record.class)
|
|
104
|
+
if (!schema) {
|
|
105
|
+
curatorLog(`[curator] no schema found for class "${record.class}", fallback check`)
|
|
106
|
+
const required = record.class === "checkpoint"
|
|
107
|
+
? ["id", "class", "summary", "mission", "current_state", "next_actions", "provenance", "created_at", "status"]
|
|
108
|
+
: ["id", "class", "summary", "provenance", "created_at", "status"]
|
|
109
|
+
const missing = required.filter(r => !record[r] && record[r] !== null)
|
|
110
|
+
if (missing.length) {
|
|
111
|
+
curatorLog(`[curator] validation failed: missing ${missing.join(", ")}`)
|
|
112
|
+
return false
|
|
113
|
+
}
|
|
114
|
+
if (record.class === "checkpoint" && record.provenance && !record.provenance.session_id) {
|
|
115
|
+
curatorLog(`[curator] validation failed: provenance.session_id required`)
|
|
116
|
+
return false
|
|
117
|
+
}
|
|
118
|
+
return true
|
|
119
|
+
}
|
|
120
|
+
const unsupported = findUnsupportedSchemaKeywords(schema)
|
|
121
|
+
if (unsupported.length) {
|
|
122
|
+
curatorLog(`[curator] schema validation failed: unsupported keywords ${unsupported.join(", ")}`)
|
|
123
|
+
return false
|
|
124
|
+
}
|
|
125
|
+
const errors = validateSchema(schema, record, "$")
|
|
126
|
+
if (errors.length) {
|
|
127
|
+
curatorLog(`[curator] schema validation failed: ${errors.join("; ")}`)
|
|
128
|
+
return false
|
|
129
|
+
}
|
|
130
|
+
if (record.class === "checkpoint") {
|
|
131
|
+
const requiredText = [record.mission, record.current_state]
|
|
132
|
+
if (!requiredText.every(isMeaningfulText)) return false
|
|
133
|
+
if (!Array.isArray(record.next_actions) || record.next_actions.length === 0) return false
|
|
134
|
+
if (!Array.isArray(record.blockers) || record.blockers.length === 0) return false
|
|
135
|
+
if (!Array.isArray(record.risk_notes) || record.risk_notes.length === 0) return false
|
|
136
|
+
}
|
|
137
|
+
return true
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
async function writeCheckpoint(root, project, directory, trigger, summary, options = {}) {
|
|
141
|
+
const now = Date.now()
|
|
142
|
+
if (!options.force && now - lastCheckpoint.ts < CHECKPOINT_DEBOUNCE_MS) return null
|
|
143
|
+
lastCheckpoint.ts = now
|
|
144
|
+
|
|
145
|
+
const ts = new Date().toISOString()
|
|
146
|
+
const id = `chk_${ts.replace(/[:.]/g, "-")}`
|
|
147
|
+
const environmentFingerprint = buildEnvironmentFingerprint(root, directory, project)
|
|
148
|
+
const isCompaction = trigger === "experimental.session.compacting"
|
|
149
|
+
const record = {
|
|
150
|
+
id,
|
|
151
|
+
class: "checkpoint",
|
|
152
|
+
scope: "session",
|
|
153
|
+
summary: summary || (isCompaction ? `Pre-compaction checkpoint for ${project?.name || path.basename(directory)}` : `Idle checkpoint for ${project?.name || path.basename(directory)}`),
|
|
154
|
+
mission: isCompaction
|
|
155
|
+
? "Preserve the current OpenHermes runtime state before compaction trims working context."
|
|
156
|
+
: "Preserve the current OpenHermes session state so the next turn can resume safely.",
|
|
157
|
+
current_state: isCompaction
|
|
158
|
+
? "A compaction run is starting; runtime memory, recall freshness, and loop-state validation are being hardened before context is reduced."
|
|
159
|
+
: "The session is idle and the curator is maintaining durable state, schema validation, and memory hygiene for the next turn.",
|
|
160
|
+
next_actions: isCompaction
|
|
161
|
+
? [
|
|
162
|
+
"Verify the pre-compaction checkpoint was written and indexed.",
|
|
163
|
+
"Inject only fresh recall context into the compaction buffer.",
|
|
164
|
+
"Confirm loop-state writes remain schema-valid after compaction.",
|
|
165
|
+
]
|
|
166
|
+
: [
|
|
167
|
+
"Keep loop-state and memory writes atomic.",
|
|
168
|
+
"Redact sensitive text before persisting new records.",
|
|
169
|
+
"Recheck stale recall cache handling on the next compaction pass.",
|
|
170
|
+
],
|
|
171
|
+
blockers: [
|
|
172
|
+
"No blocking runtime failure is present; this checkpoint captures a safe handoff state.",
|
|
173
|
+
],
|
|
174
|
+
risk_notes: isCompaction
|
|
175
|
+
? [
|
|
176
|
+
"A stale recall cache can reintroduce outdated compaction context if fingerprints drift.",
|
|
177
|
+
"Loop-state writes must continue to satisfy the required status, phase, heartbeat_at, and updated_at fields.",
|
|
178
|
+
]
|
|
179
|
+
: [
|
|
180
|
+
"Placeholder checkpoint content is no longer acceptable for durable handoff.",
|
|
181
|
+
"Sensitive text must be redacted before memory persistence.",
|
|
182
|
+
],
|
|
183
|
+
provenance: {
|
|
184
|
+
session_id: project?.session_id || `session-${Date.now()}`,
|
|
185
|
+
harness_root: root,
|
|
186
|
+
project_root: directory,
|
|
187
|
+
},
|
|
188
|
+
description: `Auto-curated checkpoint from curator plugin on ${trigger}`,
|
|
189
|
+
source: "agent",
|
|
190
|
+
status: "active",
|
|
191
|
+
created_at: ts,
|
|
192
|
+
updated_at: ts,
|
|
193
|
+
project: project?.name || path.basename(directory),
|
|
194
|
+
environment_fingerprint: environmentFingerprint,
|
|
195
|
+
}
|
|
196
|
+
const safeRecord = sanitizeRecord(record, { maxStringLength: 4000 })
|
|
197
|
+
if (!validateRecordAgainstSchema(safeRecord)) return null
|
|
198
|
+
const dir = path.join(root, "memory", "checkpoints")
|
|
199
|
+
fs.mkdirSync(dir, { recursive: true })
|
|
200
|
+
atomicWriteJson(path.join(dir, `${id}.json`), safeRecord)
|
|
201
|
+
indexEntry(root, "checkpoints", safeRecord)
|
|
202
|
+
updateLoopState(root, {
|
|
203
|
+
last_checkpoint_id: id,
|
|
204
|
+
phase: trigger,
|
|
205
|
+
heartbeat_at: ts,
|
|
206
|
+
updated_at: ts,
|
|
207
|
+
status: trigger === "experimental.session.compacting" ? "active" : "idle",
|
|
208
|
+
})
|
|
209
|
+
writtenThisSession.push(id)
|
|
210
|
+
curatorLog(`[curator] checkpoint written: ${id} (trigger: ${trigger})`)
|
|
211
|
+
return id
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
function writeMistakeRecord(root, project, directory, error) {
|
|
215
|
+
const ts = new Date().toISOString()
|
|
216
|
+
const errorMsg = typeof error === "object" && error !== null
|
|
217
|
+
? (error.message || JSON.stringify(error).slice(0, 200))
|
|
218
|
+
: (error?.toString() || "Unknown error")
|
|
219
|
+
const safeErrorMsg = truncateText(redactSensitiveText(errorMsg), 200)
|
|
220
|
+
const id = `mist_${ts.replace(/[:.]/g, "-")}`
|
|
221
|
+
const environmentFingerprint = buildEnvironmentFingerprint(root, directory, project)
|
|
222
|
+
const record = {
|
|
223
|
+
id,
|
|
224
|
+
class: "mistake",
|
|
225
|
+
scope: "harness",
|
|
226
|
+
summary: `Session error: ${safeErrorMsg}`,
|
|
227
|
+
failure: `Session error: ${safeErrorMsg}`,
|
|
228
|
+
root_cause: safeErrorMsg,
|
|
229
|
+
provenance: {
|
|
230
|
+
session_id: project?.session_id || `session-${Date.now()}`,
|
|
231
|
+
harness_root: root,
|
|
232
|
+
project_root: directory,
|
|
233
|
+
},
|
|
234
|
+
source: "agent",
|
|
235
|
+
type: "other",
|
|
236
|
+
fix: `Auto-generated - investigate via session_id: ${project?.session_id || "unknown"}`,
|
|
237
|
+
prevention: "curator.js writeMistakeRecord must validate against mistake.schema.json",
|
|
238
|
+
strike: 1,
|
|
239
|
+
status: "active",
|
|
240
|
+
created_at: ts,
|
|
241
|
+
updated_at: ts,
|
|
242
|
+
project: project?.name || path.basename(directory),
|
|
243
|
+
environment_fingerprint: environmentFingerprint,
|
|
244
|
+
}
|
|
245
|
+
const dir = path.join(root, "memory", "mistakes")
|
|
246
|
+
fs.mkdirSync(dir, { recursive: true })
|
|
247
|
+
const fp = path.join(dir, "mistakes.jsonl")
|
|
248
|
+
const line = JSON.stringify(sanitizeRecord(record, { maxStringLength: 4000 }))
|
|
249
|
+
try { fs.appendFileSync(fp, line + "\n") } catch { fs.writeFileSync(fp, line + "\n") }
|
|
250
|
+
curatorLog(`[curator] mistake logged: ${id} - ${safeLogMessage(errorMsg, 80)}`)
|
|
251
|
+
return id
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
function writeVerificationReceipt(root, project, directory, checkpointId) {
|
|
255
|
+
const ts = new Date().toISOString()
|
|
256
|
+
const id = `vr_${ts.replace(/[:.]/g, "-")}`
|
|
257
|
+
const artifactPath = path.join(root, "memory", "checkpoints", `${checkpointId}.json`)
|
|
258
|
+
const environmentFingerprint = buildEnvironmentFingerprint(root, directory, project)
|
|
259
|
+
const record = {
|
|
260
|
+
id,
|
|
261
|
+
class: "verification_receipt",
|
|
262
|
+
scope: "session",
|
|
263
|
+
summary: `Auto-verification receipt for checkpoint ${checkpointId}`,
|
|
264
|
+
artifact: `openhermes/memory/checkpoints/${checkpointId}.json`,
|
|
265
|
+
artifact_type: "file",
|
|
266
|
+
fingerprint: fingerprintFile(artifactPath) || { path: `openhermes/memory/checkpoints/${checkpointId}.json` },
|
|
267
|
+
environment: {
|
|
268
|
+
cwd: directory,
|
|
269
|
+
os: "win32",
|
|
270
|
+
shell: "cmd.exe",
|
|
271
|
+
provider: "lmstudio"
|
|
272
|
+
},
|
|
273
|
+
method: "manual-inspection",
|
|
274
|
+
command: `curator.js auto-verification on session.idle`,
|
|
275
|
+
result: "pass",
|
|
276
|
+
result_detail: `Checkpoint ${checkpointId} written and indexed. Session completed successfully.`,
|
|
277
|
+
provenance: {
|
|
278
|
+
session_id: project?.session_id || `session-${Date.now()}`,
|
|
279
|
+
harness_root: root,
|
|
280
|
+
project_root: directory,
|
|
281
|
+
},
|
|
282
|
+
confidence: 0.8,
|
|
283
|
+
source: "agent",
|
|
284
|
+
status: "active",
|
|
285
|
+
created_at: ts,
|
|
286
|
+
updated_at: ts,
|
|
287
|
+
project: project?.name || path.basename(directory),
|
|
288
|
+
environment_fingerprint: environmentFingerprint,
|
|
289
|
+
}
|
|
290
|
+
const dir = path.join(root, "memory", "verification_receipts")
|
|
291
|
+
fs.mkdirSync(dir, { recursive: true })
|
|
292
|
+
const safeRecord = sanitizeRecord(record, { maxStringLength: 4000 })
|
|
293
|
+
atomicWriteJson(path.join(dir, `${id}.json`), safeRecord)
|
|
294
|
+
indexEntry(root, "verification_receipts", safeRecord)
|
|
295
|
+
curatorLog(`[curator] verification_receipt written: ${id}`)
|
|
296
|
+
return id
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
async function handleSessionIdle(directory, project) {
|
|
300
|
+
try {
|
|
301
|
+
const root = getHarnessRoot(directory)
|
|
302
|
+
const checkpointId = await writeCheckpoint(root, project, directory, "session.idle", null)
|
|
303
|
+
if (checkpointId) {
|
|
304
|
+
writeVerificationReceipt(root, project, directory, checkpointId)
|
|
305
|
+
}
|
|
306
|
+
} catch (err) {
|
|
307
|
+
curatorLog(`[curator] handleSessionIdle error: ${safeLogMessage(err.message)}`)
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
async function handleSessionCompacted(directory, project) {
|
|
312
|
+
try {
|
|
313
|
+
const root = getHarnessRoot(directory)
|
|
314
|
+
const ts = new Date().toISOString()
|
|
315
|
+
updateLoopState(root, {
|
|
316
|
+
status: "compacted",
|
|
317
|
+
last_gate_result: "compaction completed",
|
|
318
|
+
phase: "session.compacted",
|
|
319
|
+
heartbeat_at: ts,
|
|
320
|
+
updated_at: ts,
|
|
321
|
+
})
|
|
322
|
+
} catch (err) {
|
|
323
|
+
curatorLog(`[curator] handleSessionCompacted error: ${safeLogMessage(err.message)}`)
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
async function handleSessionError(directory, project, event) {
|
|
328
|
+
try {
|
|
329
|
+
const root = getHarnessRoot(directory)
|
|
330
|
+
const ts = new Date().toISOString()
|
|
331
|
+
const errorMsg = typeof event?.error === "object" && event.error !== null
|
|
332
|
+
? (event.error.message || JSON.stringify(event.error).slice(0, 200))
|
|
333
|
+
: (event?.error?.toString() || event?.message || "Unknown error")
|
|
334
|
+
const safeErrorMsg = safeLogMessage(errorMsg, 200)
|
|
335
|
+
updateLoopState(root, {
|
|
336
|
+
status: "error",
|
|
337
|
+
phase: "session.error",
|
|
338
|
+
last_error: safeErrorMsg,
|
|
339
|
+
heartbeat_at: ts,
|
|
340
|
+
updated_at: ts,
|
|
341
|
+
})
|
|
342
|
+
writeMistakeRecord(root, project, directory, event.error || event)
|
|
343
|
+
} catch (err) {
|
|
344
|
+
curatorLog(`[curator] handleSessionError error: ${safeLogMessage(err.message)}`)
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
async function handlePermissionReplied(directory, project, event) {
|
|
349
|
+
try {
|
|
350
|
+
const root = getHarnessRoot(directory)
|
|
351
|
+
const ts = new Date().toISOString()
|
|
352
|
+
const id = `audit_perm_${ts.replace(/[:.]/g, "-")}`
|
|
353
|
+
const environmentFingerprint = buildEnvironmentFingerprint(root, directory, project)
|
|
354
|
+
const record = {
|
|
355
|
+
id,
|
|
356
|
+
class: "audit",
|
|
357
|
+
scope: "harness",
|
|
358
|
+
summary: `Permission reply: ${event.action || "?"} for ${event.tool || "?"}`,
|
|
359
|
+
target: event.tool || "unknown",
|
|
360
|
+
description: `Permission decision: ${event.action || "?"} - tool: ${event.tool || "?"}, pattern: ${event.pattern || "?"}`,
|
|
361
|
+
overall_score: 0,
|
|
362
|
+
checks: [
|
|
363
|
+
{
|
|
364
|
+
name: "permission decision audit",
|
|
365
|
+
status: "pass",
|
|
366
|
+
details: `Permission ${event.action || "?"} logged for tool ${event.tool || "?"}`,
|
|
367
|
+
},
|
|
368
|
+
],
|
|
369
|
+
top_actions: [
|
|
370
|
+
"Auto-recorded permission event — no manual remediation needed.",
|
|
371
|
+
],
|
|
372
|
+
integrity: {
|
|
373
|
+
refs_ok: true,
|
|
374
|
+
provenance_ok: true,
|
|
375
|
+
duplicates_ok: true,
|
|
376
|
+
},
|
|
377
|
+
provenance: {
|
|
378
|
+
session_id: project?.session_id || `session-${Date.now()}`,
|
|
379
|
+
harness_root: root,
|
|
380
|
+
project_root: directory,
|
|
381
|
+
},
|
|
382
|
+
source: "agent",
|
|
383
|
+
status: "closed",
|
|
384
|
+
created_at: ts,
|
|
385
|
+
updated_at: ts,
|
|
386
|
+
project: project?.name || path.basename(directory),
|
|
387
|
+
environment_fingerprint: environmentFingerprint,
|
|
388
|
+
}
|
|
389
|
+
const safeRecord = sanitizeRecord(record, { maxStringLength: 4000 })
|
|
390
|
+
if (!validateRecordAgainstSchema(safeRecord)) return
|
|
391
|
+
const dir = path.join(root, "memory", "audits")
|
|
392
|
+
fs.mkdirSync(dir, { recursive: true })
|
|
393
|
+
atomicWriteJson(path.join(dir, `${id}.json`), safeRecord)
|
|
394
|
+
indexEntry(root, "audits", safeRecord)
|
|
395
|
+
curatorLog(`[curator] permission audit logged: ${event.tool} -> ${event.action}`)
|
|
396
|
+
} catch (err) {
|
|
397
|
+
curatorLog(`[curator] handlePermissionReplied error: ${safeLogMessage(err.message)}`)
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
export const CuratorPlugin = async ({ project, directory }) => {
|
|
402
|
+
return {
|
|
403
|
+
event: async ({ event }) => {
|
|
404
|
+
if (event.type === "session.idle") {
|
|
405
|
+
await handleSessionIdle(directory, project)
|
|
406
|
+
} else if (event.type === "session.compacted") {
|
|
407
|
+
await handleSessionCompacted(directory, project)
|
|
408
|
+
} else if (event.type === "session.error") {
|
|
409
|
+
await handleSessionError(directory, project, event)
|
|
410
|
+
} else if (event.type === "permission.replied") {
|
|
411
|
+
await handlePermissionReplied(directory, project, event)
|
|
412
|
+
} else if (event.type === "command.executed") {
|
|
413
|
+
curatorLog(`[curator] command executed: ${event.command || "?"}`)
|
|
414
|
+
}
|
|
415
|
+
},
|
|
416
|
+
"experimental.session.compacting": async (input, output) => {
|
|
417
|
+
try {
|
|
418
|
+
const root = getHarnessRoot(directory)
|
|
419
|
+
const projectKey = project?.name || path.basename(directory)
|
|
420
|
+
const checkpointIndex = readJson(path.join(root, "memory", "checkpoints", "index.json"), [])
|
|
421
|
+
const constraintsIndex = readJson(path.join(root, "memory", "constraints", "index.json"), [])
|
|
422
|
+
const environmentFingerprint = buildEnvironmentFingerprint(root, directory, project)
|
|
423
|
+
const preCompactionCheckpointId = await writeCheckpoint(root, project, directory, "experimental.session.compacting", `Pre-compaction checkpoint for ${projectKey}`, { force: true })
|
|
424
|
+
|
|
425
|
+
const latestCheckpoint = Array.isArray(checkpointIndex) && checkpointIndex.length > 0
|
|
426
|
+
? checkpointIndex.sort((a, b) => new Date(b.updated_at) - new Date(a.updated_at))[0]
|
|
427
|
+
: null
|
|
428
|
+
|
|
429
|
+
const activeConstraints = Array.isArray(constraintsIndex)
|
|
430
|
+
? constraintsIndex.filter(c => c.status === "active")
|
|
431
|
+
: []
|
|
432
|
+
|
|
433
|
+
const inject = [
|
|
434
|
+
`## OpenHermes State`,
|
|
435
|
+
`- Project: ${projectKey}`,
|
|
436
|
+
`- Latest checkpoint: ${latestCheckpoint ? latestCheckpoint.summary : "none"}`,
|
|
437
|
+
preCompactionCheckpointId ? `- Pre-compaction checkpoint: ${preCompactionCheckpointId}` : null,
|
|
438
|
+
`- Active constraints: ${activeConstraints.length}`,
|
|
439
|
+
`- Memory writes this session: ${writtenThisSession.length}`,
|
|
440
|
+
writtenThisSession.length > 0 ? `- Recent writes: ${writtenThisSession.slice(-3).join(", ")}` : null,
|
|
441
|
+
`- Session hook: experimental.session.compacting`,
|
|
442
|
+
].filter(Boolean).join("\n")
|
|
443
|
+
|
|
444
|
+
const recallCache = readJson(path.join(root, "memory", "recall", "cache.json"), null)
|
|
445
|
+
const cacheMatches = recallCache && recallCache.fingerprint && recallCache.fingerprint.sha256 === environmentFingerprint.sha256
|
|
446
|
+
const cacheFresh = cacheMatches && recallCache.freshness_marker && recallCache.freshness_marker.updated_at
|
|
447
|
+
? (Date.now() - Date.parse(recallCache.freshness_marker.updated_at) <= (recallCache.freshness_marker.ttl_ms || 0))
|
|
448
|
+
: false
|
|
449
|
+
const contextSink = Array.isArray(output.context) ? output.context : (output.context = [])
|
|
450
|
+
if (recallCache && recallCache.context && cacheFresh) {
|
|
451
|
+
const merged = truncateText(`${inject}\n\n${recallCache.context}`, COMPACTION_CONTEXT_LIMIT)
|
|
452
|
+
contextSink.push(merged)
|
|
453
|
+
curatorLog(`[curator] compaction injected harness state + autorecall (${merged.length} chars)`)
|
|
454
|
+
} else {
|
|
455
|
+
contextSink.push(truncateText(inject, COMPACTION_CONTEXT_LIMIT))
|
|
456
|
+
curatorLog(`[curator] compaction injected harness state (stale or missing autorecall cache)`)
|
|
457
|
+
}
|
|
458
|
+
updateLoopState(root, {
|
|
459
|
+
phase: "compress",
|
|
460
|
+
heartbeat_at: new Date().toISOString(),
|
|
461
|
+
status: "active",
|
|
462
|
+
})
|
|
463
|
+
} catch (err) {
|
|
464
|
+
curatorLog(`[curator] compaction error: ${safeLogMessage(err.message)}`)
|
|
465
|
+
const contextSink = Array.isArray(output.context) ? output.context : (output.context = [])
|
|
466
|
+
contextSink.push(truncateText(`## OpenHermes State\n- Project: ${project?.name || path.basename(directory)}\n- Hook: experimental.session.compacting (error state)\n`, COMPACTION_CONTEXT_LIMIT))
|
|
467
|
+
}
|
|
468
|
+
},
|
|
469
|
+
}
|
|
470
|
+
}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Fix build and TypeScript errors with minimal changes
|
|
3
|
+
agent: build-error-resolver
|
|
4
|
+
subtask: true
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Build Fix Command
|
|
8
|
+
|
|
9
|
+
Fix build and TypeScript errors with minimal changes: $ARGUMENTS
|
|
10
|
+
|
|
11
|
+
## Current type errors
|
|
12
|
+
|
|
13
|
+
!npx tsc --noEmit 2>&1 || echo "tsc not available or no errors"
|
|
14
|
+
|
|
15
|
+
## Your Task
|
|
16
|
+
|
|
17
|
+
1. **Run type check**: `npx tsc --noEmit`
|
|
18
|
+
2. **Collect all errors**
|
|
19
|
+
3. **Fix errors one by one** with minimal changes
|
|
20
|
+
4. **Verify each fix** doesn't introduce new errors
|
|
21
|
+
5. **Run final check** to confirm all errors resolved
|
|
22
|
+
|
|
23
|
+
## Approach
|
|
24
|
+
|
|
25
|
+
### DO:
|
|
26
|
+
- PASS: Fix type errors with correct types
|
|
27
|
+
- PASS: Add missing imports
|
|
28
|
+
- PASS: Fix syntax errors
|
|
29
|
+
- PASS: Make minimal changes
|
|
30
|
+
- PASS: Preserve existing behavior
|
|
31
|
+
- PASS: Run `tsc --noEmit` after each change
|
|
32
|
+
|
|
33
|
+
### DON'T:
|
|
34
|
+
- FAIL: Refactor code
|
|
35
|
+
- FAIL: Add new features
|
|
36
|
+
- FAIL: Change architecture
|
|
37
|
+
- FAIL: Use `any` type (unless absolutely necessary)
|
|
38
|
+
- FAIL: Add `@ts-ignore` comments
|
|
39
|
+
- FAIL: Change business logic
|
|
40
|
+
|
|
41
|
+
## Common Error Fixes
|
|
42
|
+
|
|
43
|
+
| Error | Fix |
|
|
44
|
+
|-------|-----|
|
|
45
|
+
| Type 'X' is not assignable to type 'Y' | Add correct type annotation |
|
|
46
|
+
| Property 'X' does not exist | Add property to interface or fix property name |
|
|
47
|
+
| Cannot find module 'X' | Install package or fix import path |
|
|
48
|
+
| Argument of type 'X' is not assignable | Cast or fix function signature |
|
|
49
|
+
| Object is possibly 'undefined' | Add null check or optional chaining |
|
|
50
|
+
|
|
51
|
+
## Verification Steps
|
|
52
|
+
|
|
53
|
+
After fixes:
|
|
54
|
+
1. `npx tsc --noEmit` - should show 0 errors
|
|
55
|
+
2. `npm run build` - should succeed
|
|
56
|
+
3. `npm test` - tests should still pass
|
|
57
|
+
|
|
58
|
+
---
|
|
59
|
+
|
|
60
|
+
**IMPORTANT**: Focus on fixing errors only. No refactoring, no improvements, no architectural changes. Get the build green with minimal diff.
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Review code for quality, security, and maintainability
|
|
3
|
+
agent: code-reviewer
|
|
4
|
+
subtask: true
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Code Review Command
|
|
8
|
+
|
|
9
|
+
Review code changes for quality, security, and maintainability: $ARGUMENTS
|
|
10
|
+
|
|
11
|
+
## Changed files
|
|
12
|
+
|
|
13
|
+
!git diff --name-only HEAD 2>&1 || echo "No git repo or no changes"
|
|
14
|
+
|
|
15
|
+
## Your Task
|
|
16
|
+
|
|
17
|
+
1. **Analyze each changed file** for issues
|
|
18
|
+
3. **Generate structured report**
|
|
19
|
+
4. **Provide actionable recommendations**
|
|
20
|
+
|
|
21
|
+
## Check Categories
|
|
22
|
+
|
|
23
|
+
### Security Issues (CRITICAL)
|
|
24
|
+
- [ ] Hardcoded credentials, API keys, tokens
|
|
25
|
+
- [ ] SQL injection vulnerabilities
|
|
26
|
+
- [ ] XSS vulnerabilities
|
|
27
|
+
- [ ] Missing input validation
|
|
28
|
+
- [ ] Insecure dependencies
|
|
29
|
+
- [ ] Path traversal risks
|
|
30
|
+
- [ ] Authentication/authorization flaws
|
|
31
|
+
|
|
32
|
+
### Code Quality (HIGH)
|
|
33
|
+
- [ ] Functions > 50 lines
|
|
34
|
+
- [ ] Files > 800 lines
|
|
35
|
+
- [ ] Nesting depth > 4 levels
|
|
36
|
+
- [ ] Missing error handling
|
|
37
|
+
- [ ] console.log statements
|
|
38
|
+
- [ ] TODO/FIXME comments
|
|
39
|
+
- [ ] Missing JSDoc for public APIs
|
|
40
|
+
|
|
41
|
+
### Best Practices (MEDIUM)
|
|
42
|
+
- [ ] Mutation patterns (use immutable instead)
|
|
43
|
+
- [ ] Unnecessary complexity
|
|
44
|
+
- [ ] Missing tests for new code
|
|
45
|
+
- [ ] Accessibility issues (a11y)
|
|
46
|
+
- [ ] Performance concerns
|
|
47
|
+
|
|
48
|
+
### Style (LOW)
|
|
49
|
+
- [ ] Inconsistent naming
|
|
50
|
+
- [ ] Missing type annotations
|
|
51
|
+
- [ ] Formatting issues
|
|
52
|
+
|
|
53
|
+
## Report Format
|
|
54
|
+
|
|
55
|
+
For each issue found:
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
**[SEVERITY]** file.ts:123
|
|
59
|
+
Issue: [Description]
|
|
60
|
+
Fix: [How to fix]
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Decision
|
|
64
|
+
|
|
65
|
+
- **CRITICAL or HIGH issues**: Block commit, require fixes
|
|
66
|
+
- **MEDIUM issues**: Recommend fixes before merge
|
|
67
|
+
- **LOW issues**: Optional improvements
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
**IMPORTANT**: Never approve code with security vulnerabilities!
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Run OpenCode openhermes health diagnostics
|
|
3
|
+
agent: OpenHermes
|
|
4
|
+
subtask: true
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Doctor Command
|
|
8
|
+
|
|
9
|
+
Run full OpenCode openhermes diagnostics. $ARGUMENTS
|
|
10
|
+
|
|
11
|
+
## Your Task
|
|
12
|
+
|
|
13
|
+
1. Load the opencode-doctor skill: `skill({ name: "opencode-doctor" })`
|
|
14
|
+
2. Follow its instructions to validate:
|
|
15
|
+
- Config syntax (opencode.json valid JSON)
|
|
16
|
+
- Provider connectivity (LM Studio at http://127.0.0.1:1234/v1)
|
|
17
|
+
- Cache state (memory records, recall cache)
|
|
18
|
+
- Auth file integrity
|
|
19
|
+
3. Report results with any fix suggestions
|
|
20
|
+
|
|
21
|
+
## Automated Checks
|
|
22
|
+
|
|
23
|
+
Run these commands and report results:
|
|
24
|
+
|
|
25
|
+
!opencode debug config
|
|
26
|
+
!opencode debug info
|
|
27
|
+
!opencode debug paths
|
|
28
|
+
|
|
29
|
+
## Report Format
|
|
30
|
+
|
|
31
|
+
| Check | Result | Issue |
|
|
32
|
+
|-------|--------|-------|
|
|
33
|
+
| Config JSON | PASS/FAIL | |
|
|
34
|
+
| Provider | PASS/FAIL | |
|
|
35
|
+
| Memory MCP | PASS/FAIL | |
|
|
36
|
+
| Plugins | PASS/FAIL | |
|
|
37
|
+
| Skills | PASS/FAIL | |
|
|
38
|
+
|
|
39
|
+
## After Diagnosis
|
|
40
|
+
|
|
41
|
+
If issues found: propose fixes, wait for approval, apply.
|
|
42
|
+
If clean: report "OpenHermes: HEALTHY."
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Create a new skill from recent work patterns
|
|
3
|
+
agent: OpenHermes
|
|
4
|
+
subtask: true
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Learn Command
|
|
8
|
+
|
|
9
|
+
Create a new reusable skill from recent work patterns. $ARGUMENTS
|
|
10
|
+
|
|
11
|
+
## Your Task
|
|
12
|
+
|
|
13
|
+
1. **Search backlog** for pending skill candidates:
|
|
14
|
+
- Use `hm_search` with query="skill-candidate" classes=["backlog"]
|
|
15
|
+
- If $ARGUMENTS is non-empty, narrow search to that topic
|
|
16
|
+
2. **Analyze the candidate** — what pattern did the session reveal?
|
|
17
|
+
3. **Create the skill**:
|
|
18
|
+
- Load the skill-creator skill: `skill({ name: "skill-creator" })`
|
|
19
|
+
- Follow its instructions to create a new SKILL.md
|
|
20
|
+
- Target: `%USERPROFILE%\.config\opencode\skills\<name>\SKILL.md`
|
|
21
|
+
- Naming: lowercase, hyphenated, descriptive
|
|
22
|
+
4. **Close the backlog entry**: `hm_put(class="backlog", id="<candidate-id>", data={..., status:"closed"})`
|
|
23
|
+
5. **Report**: What skill was created, where, and what it does
|
|
24
|
+
|
|
25
|
+
## Skill Requirements
|
|
26
|
+
|
|
27
|
+
- name: lowercase-hyphenated, 1-64 chars
|
|
28
|
+
- description: 1-1024 chars, specific enough for agent to know when to load
|
|
29
|
+
- Must include frontmatter with name + description
|
|
30
|
+
- Must include: what it does, when to use it, step-by-step workflow
|
|
31
|
+
|
|
32
|
+
## Report Format
|
|
33
|
+
|
|
34
|
+
**Skill Created**: `<name>`
|
|
35
|
+
**Path**: `%USERPROFILE%\.config\opencode\skills\<name>\SKILL.md`
|
|
36
|
+
**Purpose**: [one-line summary]
|
|
37
|
+
**Trigger words**: [when agent should load this skill]
|