@joshuaswarren/openclaw-engram 9.0.39 → 9.0.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/dist/index.js +306 -0
- package/dist/index.js.map +1 -1
- package/openclaw.plugin.json +12 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -41,6 +41,8 @@ AI agents forget everything between conversations. Engram fixes that.
|
|
|
41
41
|
- **Cue-anchor index foundation** — Engram can now, when `harmonicRetrievalEnabled` and `abstractionAnchorsEnabled` are enabled, persist typed cue anchors for entities, files, tools, outcomes, constraints, and dates, inspect them with `openclaw engram cue-anchor-status`, and keep harmonic retrieval grounded in explicit abstraction-to-cue links before blending logic lands.
|
|
42
42
|
- **Harmonic retrieval diagnostics** — Engram can now, when `harmonicRetrievalEnabled` is enabled, blend abstraction-node evidence with cue-anchor matches into a dedicated `Harmonic Retrieval` recall section and inspect those blended results with `openclaw engram harmonic-search`.
|
|
43
43
|
- **Verified episodic recall** — Engram can now, when `verifiedRecallEnabled` is enabled, inject a dedicated `Verified Episodes` recall section that reuses memory boxes but only surfaces boxes whose cited source memories still verify as non-archived episodes.
|
|
44
|
+
- **Semantic rule promotion** — Engram can now, when `semanticRulePromotionEnabled` is enabled, promote explicit `IF ... THEN ...` rules from verified episodic memories into durable `rule` memories with lineage, source-memory provenance, duplicate suppression, and the operator-facing `openclaw engram semantic-rule-promote` CLI.
|
|
45
|
+
- **Verified rule recall** — Engram can now, when `semanticRuleVerificationEnabled` is enabled, inject a dedicated `Verified Rules` recall section that re-checks promoted rule memories against their cited source episodes at recall time and downgrades stale provenance before the rule can surface.
|
|
44
46
|
- **Zero-config start** — Install, add an API key, restart. Engram works out of the box with sensible defaults and progressively unlocks advanced features as you enable them.
|
|
45
47
|
|
|
46
48
|
## Quick Start
|
|
@@ -209,7 +211,8 @@ Key settings:
|
|
|
209
211
|
| `harmonicRetrievalEnabled` | `false` | Enable harmonic retrieval blending over abstraction nodes and cue anchors, including the dedicated recall section and `harmonic-search` diagnostics |
|
|
210
212
|
| `abstractionAnchorsEnabled` | `false` | Enable typed cue-anchor indexing for abstraction nodes and expose the anchor store through status tooling |
|
|
211
213
|
| `verifiedRecallEnabled` | `false` | Inject prompt-relevant memory boxes only when their cited source memories verify as non-archived episodes |
|
|
212
|
-
| `semanticRulePromotionEnabled` | `false` |
|
|
214
|
+
| `semanticRulePromotionEnabled` | `false` | Enable deterministic promotion of explicit `IF ... THEN ...` rules from verified episodic memories via `openclaw engram semantic-rule-promote` |
|
|
215
|
+
| `semanticRuleVerificationEnabled` | `false` | Verify promoted semantic rules against their cited source episodes at recall time and inject a dedicated `Verified Rules` section via `openclaw engram semantic-rule-verify` |
|
|
213
216
|
|
|
214
217
|
Full reference: [Config Reference](docs/config-reference.md)
|
|
215
218
|
|
package/dist/index.js
CHANGED
|
@@ -306,6 +306,7 @@ function parseConfig(raw) {
|
|
|
306
306
|
abstractionAnchorsEnabled: cfg.abstractionAnchorsEnabled === true,
|
|
307
307
|
verifiedRecallEnabled: cfg.verifiedRecallEnabled === true,
|
|
308
308
|
semanticRulePromotionEnabled: cfg.semanticRulePromotionEnabled === true,
|
|
309
|
+
semanticRuleVerificationEnabled: cfg.semanticRuleVerificationEnabled === true,
|
|
309
310
|
abstractionNodeStoreDir: typeof cfg.abstractionNodeStoreDir === "string" && cfg.abstractionNodeStoreDir.trim().length > 0 ? cfg.abstractionNodeStoreDir.trim() : path.join(memoryDir, "state", "abstraction-nodes"),
|
|
310
311
|
// Local LLM Provider (v2.1)
|
|
311
312
|
localLlmEnabled: cfg.localLlmEnabled === true || cfg.localLlmEnabled === "true",
|
|
@@ -595,6 +596,12 @@ function buildDefaultRecallPipeline(cfg) {
|
|
|
595
596
|
maxResults: 3,
|
|
596
597
|
maxChars: 1800
|
|
597
598
|
},
|
|
599
|
+
{
|
|
600
|
+
id: "verified-rules",
|
|
601
|
+
enabled: cfg.semanticRuleVerificationEnabled === true,
|
|
602
|
+
maxResults: 3,
|
|
603
|
+
maxChars: 1800
|
|
604
|
+
},
|
|
598
605
|
{
|
|
599
606
|
id: "memories",
|
|
600
607
|
enabled: true,
|
|
@@ -15430,6 +15437,100 @@ async function searchVerifiedEpisodes(options) {
|
|
|
15430
15437
|
).slice(0, options.maxResults);
|
|
15431
15438
|
}
|
|
15432
15439
|
|
|
15440
|
+
// src/semantic-rule-verifier.ts
|
|
15441
|
+
var DEFAULT_MIN_EFFECTIVE_CONFIDENCE = 0.45;
|
|
15442
|
+
function verificationConfidenceMultiplier(status) {
|
|
15443
|
+
switch (status) {
|
|
15444
|
+
case "verified":
|
|
15445
|
+
return 1;
|
|
15446
|
+
case "source-memory-not-episode":
|
|
15447
|
+
return 0.45;
|
|
15448
|
+
case "source-memory-archived":
|
|
15449
|
+
return 0.4;
|
|
15450
|
+
case "source-memory-missing":
|
|
15451
|
+
return 0.35;
|
|
15452
|
+
default:
|
|
15453
|
+
return 0.35;
|
|
15454
|
+
}
|
|
15455
|
+
}
|
|
15456
|
+
function resolveVerificationStatus(sourceMemory) {
|
|
15457
|
+
if (!sourceMemory) return "source-memory-missing";
|
|
15458
|
+
if (sourceMemory.frontmatter.status === "archived") return "source-memory-archived";
|
|
15459
|
+
if (sourceMemory.frontmatter.memoryKind !== "episode") return "source-memory-not-episode";
|
|
15460
|
+
return "verified";
|
|
15461
|
+
}
|
|
15462
|
+
function resolveEffectiveConfidence(rule, sourceMemory) {
|
|
15463
|
+
const status = resolveVerificationStatus(sourceMemory);
|
|
15464
|
+
const ruleConfidence = Number.isFinite(rule.frontmatter.confidence) ? rule.frontmatter.confidence : 0.8;
|
|
15465
|
+
const sourceConfidence = Number.isFinite(sourceMemory?.frontmatter.confidence) ? sourceMemory.frontmatter.confidence : ruleConfidence;
|
|
15466
|
+
const anchoredConfidence = Math.min(ruleConfidence, sourceConfidence);
|
|
15467
|
+
const effectiveConfidence = Math.max(
|
|
15468
|
+
0,
|
|
15469
|
+
Math.min(1, anchoredConfidence * verificationConfidenceMultiplier(status))
|
|
15470
|
+
);
|
|
15471
|
+
return { status, effectiveConfidence };
|
|
15472
|
+
}
|
|
15473
|
+
function scoreVerifiedSemanticRuleCandidate(rule, sourceMemory, queryTokens, effectiveConfidence) {
|
|
15474
|
+
const matchedFields = /* @__PURE__ */ new Set();
|
|
15475
|
+
let score = 0;
|
|
15476
|
+
const ruleContentMatches = countRecallTokenOverlap(queryTokens, rule.content);
|
|
15477
|
+
if (ruleContentMatches > 0) {
|
|
15478
|
+
score += ruleContentMatches * 5;
|
|
15479
|
+
matchedFields.add("ruleContent");
|
|
15480
|
+
}
|
|
15481
|
+
const tagMatches = countRecallTokenOverlap(queryTokens, rule.frontmatter.tags?.join(" "));
|
|
15482
|
+
if (tagMatches > 0) {
|
|
15483
|
+
score += tagMatches * 2;
|
|
15484
|
+
matchedFields.add("tags");
|
|
15485
|
+
}
|
|
15486
|
+
const sourceContentMatches = countRecallTokenOverlap(queryTokens, sourceMemory?.content);
|
|
15487
|
+
if (sourceContentMatches > 0) {
|
|
15488
|
+
score += sourceContentMatches * 2;
|
|
15489
|
+
matchedFields.add("sourceContent");
|
|
15490
|
+
}
|
|
15491
|
+
if (score > 0) {
|
|
15492
|
+
score += effectiveConfidence;
|
|
15493
|
+
}
|
|
15494
|
+
return { score, matchedFields };
|
|
15495
|
+
}
|
|
15496
|
+
async function searchVerifiedSemanticRules(options) {
|
|
15497
|
+
const queryTokens = new Set(normalizeRecallTokens(options.query, ["what", "which"]));
|
|
15498
|
+
if (queryTokens.size === 0 || options.maxResults <= 0) return [];
|
|
15499
|
+
const storage = new StorageManager(options.memoryDir);
|
|
15500
|
+
const allMemories = await storage.readAllMemories();
|
|
15501
|
+
const memoryById = new Map(allMemories.map((memory) => [memory.frontmatter.id, memory]));
|
|
15502
|
+
const minEffectiveConfidence = options.minEffectiveConfidence ?? DEFAULT_MIN_EFFECTIVE_CONFIDENCE;
|
|
15503
|
+
const candidates = [];
|
|
15504
|
+
for (const memory of allMemories) {
|
|
15505
|
+
if (memory.frontmatter.category !== "rule") continue;
|
|
15506
|
+
if (memory.frontmatter.status === "archived") continue;
|
|
15507
|
+
if (memory.frontmatter.source !== "semantic-rule-promotion") continue;
|
|
15508
|
+
const sourceMemoryId = memory.frontmatter.sourceMemoryId;
|
|
15509
|
+
if (!sourceMemoryId) continue;
|
|
15510
|
+
const sourceMemory = memoryById.get(sourceMemoryId);
|
|
15511
|
+
const { status, effectiveConfidence } = resolveEffectiveConfidence(memory, sourceMemory);
|
|
15512
|
+
if (effectiveConfidence < minEffectiveConfidence) continue;
|
|
15513
|
+
const { score, matchedFields } = scoreVerifiedSemanticRuleCandidate(
|
|
15514
|
+
memory,
|
|
15515
|
+
sourceMemory,
|
|
15516
|
+
queryTokens,
|
|
15517
|
+
effectiveConfidence
|
|
15518
|
+
);
|
|
15519
|
+
if (score <= 0) continue;
|
|
15520
|
+
candidates.push({
|
|
15521
|
+
rule: memory,
|
|
15522
|
+
score,
|
|
15523
|
+
sourceMemoryId,
|
|
15524
|
+
verificationStatus: status,
|
|
15525
|
+
effectiveConfidence,
|
|
15526
|
+
matchedFields: [...matchedFields].sort()
|
|
15527
|
+
});
|
|
15528
|
+
}
|
|
15529
|
+
return candidates.sort(
|
|
15530
|
+
(left, right) => right.score - left.score || right.effectiveConfidence - left.effectiveConfidence || right.rule.frontmatter.updated.localeCompare(left.rule.frontmatter.updated)
|
|
15531
|
+
).slice(0, options.maxResults);
|
|
15532
|
+
}
|
|
15533
|
+
|
|
15433
15534
|
// src/replay/types.ts
|
|
15434
15535
|
var VALID_SOURCES = /* @__PURE__ */ new Set(["openclaw", "claude", "chatgpt"]);
|
|
15435
15536
|
var VALID_ROLES = /* @__PURE__ */ new Set(["user", "assistant"]);
|
|
@@ -19234,6 +19335,25 @@ ${r.snippet.trim()}
|
|
|
19234
19335
|
timings.verifiedRecall = `${Date.now() - t0}ms`;
|
|
19235
19336
|
return results.length > 0 ? this.formatVerifiedEpisodeResults(results) : null;
|
|
19236
19337
|
})();
|
|
19338
|
+
const verifiedRulesPromise = (async () => {
|
|
19339
|
+
const t0 = Date.now();
|
|
19340
|
+
if (!this.config.semanticRuleVerificationEnabled || !this.isRecallSectionEnabled("verified-rules", this.config.semanticRuleVerificationEnabled === true)) {
|
|
19341
|
+
timings.verifiedRules = "skip";
|
|
19342
|
+
return null;
|
|
19343
|
+
}
|
|
19344
|
+
const maxResults = this.getRecallSectionNumber("verified-rules", "maxResults") ?? 3;
|
|
19345
|
+
if (maxResults <= 0) {
|
|
19346
|
+
timings.verifiedRules = "skip(limit=0)";
|
|
19347
|
+
return null;
|
|
19348
|
+
}
|
|
19349
|
+
const results = await searchVerifiedSemanticRules({
|
|
19350
|
+
memoryDir: this.config.memoryDir,
|
|
19351
|
+
query: retrievalQuery,
|
|
19352
|
+
maxResults
|
|
19353
|
+
});
|
|
19354
|
+
timings.verifiedRules = `${Date.now() - t0}ms`;
|
|
19355
|
+
return results.length > 0 ? this.formatVerifiedSemanticRuleResults(results) : null;
|
|
19356
|
+
})();
|
|
19237
19357
|
const qmdPromise = (async () => {
|
|
19238
19358
|
if (recallResultLimit <= 0) {
|
|
19239
19359
|
timings.qmd = "skip(limit=0)";
|
|
@@ -19442,6 +19562,7 @@ ${formatted}`;
|
|
|
19442
19562
|
trustZoneSection,
|
|
19443
19563
|
harmonicRetrievalSection,
|
|
19444
19564
|
verifiedRecallSection,
|
|
19565
|
+
verifiedRulesSection,
|
|
19445
19566
|
qmdResult,
|
|
19446
19567
|
transcriptSection,
|
|
19447
19568
|
compactionSection,
|
|
@@ -19459,6 +19580,7 @@ ${formatted}`;
|
|
|
19459
19580
|
trustZonePromise,
|
|
19460
19581
|
harmonicRetrievalPromise,
|
|
19461
19582
|
verifiedRecallPromise,
|
|
19583
|
+
verifiedRulesPromise,
|
|
19462
19584
|
qmdPromise,
|
|
19463
19585
|
transcriptPromise,
|
|
19464
19586
|
compactionPromise,
|
|
@@ -19528,6 +19650,9 @@ ${tmtNode.summary}`);
|
|
|
19528
19650
|
if (verifiedRecallSection) {
|
|
19529
19651
|
this.appendRecallSection(sectionBuckets, "verified-episodes", verifiedRecallSection);
|
|
19530
19652
|
}
|
|
19653
|
+
if (verifiedRulesSection) {
|
|
19654
|
+
this.appendRecallSection(sectionBuckets, "verified-rules", verifiedRulesSection);
|
|
19655
|
+
}
|
|
19531
19656
|
if (qmdResult) {
|
|
19532
19657
|
const t0 = Date.now();
|
|
19533
19658
|
const { memoryResultsLists, globalResults } = qmdResult;
|
|
@@ -21751,6 +21876,27 @@ ${details.join("\n")}`;
|
|
|
21751
21876
|
});
|
|
21752
21877
|
return `## Verified Episodes
|
|
21753
21878
|
|
|
21879
|
+
${lines.join("\n\n")}`;
|
|
21880
|
+
}
|
|
21881
|
+
formatVerifiedSemanticRuleResults(results) {
|
|
21882
|
+
const lines = results.map(({ rule, sourceMemoryId, verificationStatus, effectiveConfidence, matchedFields }, index) => {
|
|
21883
|
+
const header = [
|
|
21884
|
+
`[${index + 1}] ${rule.frontmatter.updated.replace("T", " ").slice(0, 16)}`,
|
|
21885
|
+
verificationStatus,
|
|
21886
|
+
`confidence:${effectiveConfidence.toFixed(2)}`
|
|
21887
|
+
].join(" | ");
|
|
21888
|
+
const details = [
|
|
21889
|
+
rule.content,
|
|
21890
|
+
`source memory: ${sourceMemoryId}`
|
|
21891
|
+
];
|
|
21892
|
+
if (matchedFields.length > 0) {
|
|
21893
|
+
details.push(`matched: ${matchedFields.join(", ")}`);
|
|
21894
|
+
}
|
|
21895
|
+
return `${header}
|
|
21896
|
+
${details.join("\n")}`;
|
|
21897
|
+
});
|
|
21898
|
+
return `## Verified Rules
|
|
21899
|
+
|
|
21754
21900
|
${lines.join("\n\n")}`;
|
|
21755
21901
|
}
|
|
21756
21902
|
summarizeIdentityText(raw, maxLines, maxChars) {
|
|
@@ -27248,6 +27394,126 @@ async function runCompatChecks(options) {
|
|
|
27248
27394
|
};
|
|
27249
27395
|
}
|
|
27250
27396
|
|
|
27397
|
+
// src/semantic-rule-promotion.ts
|
|
27398
|
+
function normalizeRuleWhitespace(value) {
|
|
27399
|
+
return value.replace(/\s+/g, " ").trim();
|
|
27400
|
+
}
|
|
27401
|
+
function stripTrailingClausePunctuation(value) {
|
|
27402
|
+
return value.replace(/[,:;]+$/g, "").trim();
|
|
27403
|
+
}
|
|
27404
|
+
function canonicalizeRuleContent(value) {
|
|
27405
|
+
return extractExplicitIfThenRule(value) ?? normalizeRuleWhitespace(value);
|
|
27406
|
+
}
|
|
27407
|
+
function canonicalizeRuleKey(value) {
|
|
27408
|
+
return canonicalizeRuleContent(value).toLowerCase();
|
|
27409
|
+
}
|
|
27410
|
+
function extractExplicitIfThenRule(content) {
|
|
27411
|
+
const match = content.match(/\bif\b([\s\S]+?)\bthen\b([\s\S]+?)(?:[.!?](?:\s|$)|$)/i);
|
|
27412
|
+
if (!match) return null;
|
|
27413
|
+
const condition = stripTrailingClausePunctuation(normalizeRuleWhitespace(match[1] ?? ""));
|
|
27414
|
+
const outcome = stripTrailingClausePunctuation(normalizeRuleWhitespace(match[2] ?? ""));
|
|
27415
|
+
if (condition.length === 0 || outcome.length === 0) return null;
|
|
27416
|
+
return `IF ${condition} THEN ${outcome}.`;
|
|
27417
|
+
}
|
|
27418
|
+
function promotionConfidence(memory) {
|
|
27419
|
+
const base = Number.isFinite(memory.frontmatter.confidence) ? memory.frontmatter.confidence : 0.8;
|
|
27420
|
+
return Math.max(0.6, Math.min(0.98, base));
|
|
27421
|
+
}
|
|
27422
|
+
function promotionTags(memory) {
|
|
27423
|
+
return Array.from(/* @__PURE__ */ new Set([...memory.frontmatter.tags ?? [], "semantic-rule", "promoted-rule"]));
|
|
27424
|
+
}
|
|
27425
|
+
function buildSupportLinks(sourceMemoryId, confidence) {
|
|
27426
|
+
return [
|
|
27427
|
+
{
|
|
27428
|
+
targetId: sourceMemoryId,
|
|
27429
|
+
linkType: "supports",
|
|
27430
|
+
strength: confidence,
|
|
27431
|
+
reason: "Promoted from verified episodic memory"
|
|
27432
|
+
}
|
|
27433
|
+
];
|
|
27434
|
+
}
|
|
27435
|
+
async function promoteSemanticRuleFromMemory(options) {
|
|
27436
|
+
const report = {
|
|
27437
|
+
enabled: options.enabled,
|
|
27438
|
+
dryRun: options.dryRun === true,
|
|
27439
|
+
promoted: [],
|
|
27440
|
+
skipped: []
|
|
27441
|
+
};
|
|
27442
|
+
if (!options.enabled) {
|
|
27443
|
+
report.skipped.push({
|
|
27444
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27445
|
+
reason: "disabled"
|
|
27446
|
+
});
|
|
27447
|
+
return report;
|
|
27448
|
+
}
|
|
27449
|
+
const storage = new StorageManager(options.memoryDir);
|
|
27450
|
+
const sourceMemory = await storage.getMemoryById(options.sourceMemoryId);
|
|
27451
|
+
if (!sourceMemory) {
|
|
27452
|
+
report.skipped.push({
|
|
27453
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27454
|
+
reason: "source-memory-missing"
|
|
27455
|
+
});
|
|
27456
|
+
return report;
|
|
27457
|
+
}
|
|
27458
|
+
if (sourceMemory.frontmatter.status === "archived" || sourceMemory.frontmatter.memoryKind !== "episode") {
|
|
27459
|
+
report.skipped.push({
|
|
27460
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27461
|
+
reason: "source-memory-not-episode"
|
|
27462
|
+
});
|
|
27463
|
+
return report;
|
|
27464
|
+
}
|
|
27465
|
+
const content = extractExplicitIfThenRule(sourceMemory.content);
|
|
27466
|
+
if (!content) {
|
|
27467
|
+
report.skipped.push({
|
|
27468
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27469
|
+
reason: "no-explicit-rule"
|
|
27470
|
+
});
|
|
27471
|
+
return report;
|
|
27472
|
+
}
|
|
27473
|
+
const ruleKey = canonicalizeRuleKey(content);
|
|
27474
|
+
const existingRule = (await storage.readAllMemories()).find(
|
|
27475
|
+
(memory) => memory.frontmatter.category === "rule" && memory.frontmatter.status !== "archived" && canonicalizeRuleKey(memory.content) === ruleKey
|
|
27476
|
+
);
|
|
27477
|
+
if (existingRule) {
|
|
27478
|
+
report.skipped.push({
|
|
27479
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27480
|
+
reason: "duplicate-rule",
|
|
27481
|
+
existingRuleId: existingRule.frontmatter.id
|
|
27482
|
+
});
|
|
27483
|
+
return report;
|
|
27484
|
+
}
|
|
27485
|
+
const confidence = promotionConfidence(sourceMemory);
|
|
27486
|
+
const candidateBase = {
|
|
27487
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27488
|
+
content,
|
|
27489
|
+
confidence,
|
|
27490
|
+
tags: promotionTags(sourceMemory),
|
|
27491
|
+
memoryKind: "note",
|
|
27492
|
+
lineage: [options.sourceMemoryId]
|
|
27493
|
+
};
|
|
27494
|
+
if (options.dryRun === true) {
|
|
27495
|
+
report.promoted.push({
|
|
27496
|
+
id: `dry-run:${options.sourceMemoryId}`,
|
|
27497
|
+
...candidateBase
|
|
27498
|
+
});
|
|
27499
|
+
return report;
|
|
27500
|
+
}
|
|
27501
|
+
const id = await storage.writeMemory("rule", content, {
|
|
27502
|
+
confidence,
|
|
27503
|
+
tags: candidateBase.tags,
|
|
27504
|
+
source: "semantic-rule-promotion",
|
|
27505
|
+
lineage: candidateBase.lineage,
|
|
27506
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27507
|
+
memoryKind: "note",
|
|
27508
|
+
links: buildSupportLinks(options.sourceMemoryId, confidence)
|
|
27509
|
+
});
|
|
27510
|
+
report.promoted.push({
|
|
27511
|
+
id,
|
|
27512
|
+
...candidateBase
|
|
27513
|
+
});
|
|
27514
|
+
return report;
|
|
27515
|
+
}
|
|
27516
|
+
|
|
27251
27517
|
// src/cli.ts
|
|
27252
27518
|
function rankCandidateForKeep(a, b) {
|
|
27253
27519
|
const aConfidence = typeof a.frontmatter.confidence === "number" ? a.frontmatter.confidence : 0;
|
|
@@ -27493,6 +27759,22 @@ async function runVerifiedRecallSearchCliCommand(options) {
|
|
|
27493
27759
|
boxRecallDays: options.boxRecallDays
|
|
27494
27760
|
});
|
|
27495
27761
|
}
|
|
27762
|
+
async function runSemanticRulePromoteCliCommand(options) {
|
|
27763
|
+
return promoteSemanticRuleFromMemory({
|
|
27764
|
+
memoryDir: options.memoryDir,
|
|
27765
|
+
enabled: options.semanticRulePromotionEnabled,
|
|
27766
|
+
sourceMemoryId: options.sourceMemoryId,
|
|
27767
|
+
dryRun: options.dryRun
|
|
27768
|
+
});
|
|
27769
|
+
}
|
|
27770
|
+
async function runSemanticRuleVerifyCliCommand(options) {
|
|
27771
|
+
if (!options.semanticRuleVerificationEnabled) return [];
|
|
27772
|
+
return searchVerifiedSemanticRules({
|
|
27773
|
+
memoryDir: options.memoryDir,
|
|
27774
|
+
query: options.query,
|
|
27775
|
+
maxResults: Math.max(1, Math.floor(options.maxResults ?? 3))
|
|
27776
|
+
});
|
|
27777
|
+
}
|
|
27496
27778
|
async function runTrustZonePromoteCliCommand(options) {
|
|
27497
27779
|
const result = await promoteTrustZoneRecord({
|
|
27498
27780
|
memoryDir: options.memoryDir,
|
|
@@ -28752,6 +29034,30 @@ function registerCli(api, orchestrator) {
|
|
|
28752
29034
|
console.log(JSON.stringify(results, null, 2));
|
|
28753
29035
|
console.log("OK");
|
|
28754
29036
|
});
|
|
29037
|
+
cmd.command("semantic-rule-promote").description("Promote an explicit IF/THEN rule from a verified episodic memory").requiredOption("--memory-id <memoryId>", "Verified episodic memory id to promote from").option("--dry-run", "Preview the promoted semantic rule without writing it").action(async (...args) => {
|
|
29038
|
+
const options = args[0] ?? {};
|
|
29039
|
+
const result = await runSemanticRulePromoteCliCommand({
|
|
29040
|
+
memoryDir: orchestrator.config.memoryDir,
|
|
29041
|
+
semanticRulePromotionEnabled: orchestrator.config.semanticRulePromotionEnabled,
|
|
29042
|
+
sourceMemoryId: String(options.memoryId ?? ""),
|
|
29043
|
+
dryRun: options.dryRun === true
|
|
29044
|
+
});
|
|
29045
|
+
console.log(JSON.stringify(result, null, 2));
|
|
29046
|
+
console.log("OK");
|
|
29047
|
+
});
|
|
29048
|
+
cmd.command("semantic-rule-verify").description("Preview verified semantic-rule recall with provenance-aware confidence downgrades").argument("<query>", "Prompt-like query to evaluate against verified semantic-rule recall").option("--max-results <count>", "Maximum number of verified semantic rules to return", "3").action(async (...args) => {
|
|
29049
|
+
const query = typeof args[0] === "string" ? args[0] : "";
|
|
29050
|
+
const options = args[1] ?? {};
|
|
29051
|
+
const maxResults = typeof options.maxResults === "string" ? Number.parseInt(options.maxResults, 10) : 3;
|
|
29052
|
+
const results = await runSemanticRuleVerifyCliCommand({
|
|
29053
|
+
memoryDir: orchestrator.config.memoryDir,
|
|
29054
|
+
semanticRuleVerificationEnabled: orchestrator.config.semanticRuleVerificationEnabled,
|
|
29055
|
+
query,
|
|
29056
|
+
maxResults: Number.isFinite(maxResults) ? maxResults : 3
|
|
29057
|
+
});
|
|
29058
|
+
console.log(JSON.stringify(results, null, 2));
|
|
29059
|
+
console.log("OK");
|
|
29060
|
+
});
|
|
28755
29061
|
cmd.command("conversation-index-health").description("Show conversation index backend health and index stats").action(async () => {
|
|
28756
29062
|
const health = await runConversationIndexHealthCliCommand(orchestrator);
|
|
28757
29063
|
console.log(JSON.stringify(health, null, 2));
|