@totalreclaw/totalreclaw 3.3.1-rc.8 → 3.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +268 -1
- package/SKILL.md +29 -23
- package/api-client.ts +18 -11
- package/claims-helper.ts +47 -1
- package/config.ts +108 -4
- package/confirm-indexed.ts +191 -0
- package/crypto.ts +10 -2
- package/dist/api-client.js +226 -0
- package/dist/billing-cache.js +100 -0
- package/dist/claims-helper.js +624 -0
- package/dist/config.js +297 -0
- package/dist/confirm-indexed.js +127 -0
- package/dist/consolidation.js +258 -0
- package/dist/contradiction-sync.js +1034 -0
- package/dist/crypto.js +138 -0
- package/dist/digest-sync.js +361 -0
- package/dist/download-ux.js +63 -0
- package/dist/embedder-cache.js +185 -0
- package/dist/embedder-loader.js +121 -0
- package/dist/embedder-network.js +301 -0
- package/dist/embedding.js +141 -0
- package/dist/extractor.js +1225 -0
- package/dist/first-run.js +103 -0
- package/dist/fs-helpers.js +725 -0
- package/dist/gateway-url.js +197 -0
- package/dist/generate-mnemonic.js +13 -0
- package/dist/hot-cache-wrapper.js +101 -0
- package/dist/import-adapters/base-adapter.js +64 -0
- package/dist/import-adapters/chatgpt-adapter.js +238 -0
- package/dist/import-adapters/claude-adapter.js +114 -0
- package/dist/import-adapters/gemini-adapter.js +201 -0
- package/dist/import-adapters/index.js +26 -0
- package/dist/import-adapters/mcp-memory-adapter.js +219 -0
- package/dist/import-adapters/mem0-adapter.js +158 -0
- package/dist/import-adapters/types.js +1 -0
- package/dist/index.js +5388 -0
- package/dist/llm-client.js +687 -0
- package/dist/llm-profile-reader.js +346 -0
- package/dist/lsh.js +62 -0
- package/dist/onboarding-cli.js +750 -0
- package/dist/pair-cli.js +344 -0
- package/dist/pair-crypto.js +359 -0
- package/dist/pair-http.js +404 -0
- package/dist/pair-page.js +826 -0
- package/dist/pair-qr.js +107 -0
- package/dist/pair-remote-client.js +410 -0
- package/dist/pair-session-store.js +566 -0
- package/dist/pin.js +556 -0
- package/dist/qa-bug-report.js +301 -0
- package/dist/relay-headers.js +44 -0
- package/dist/reranker.js +409 -0
- package/dist/retype-setscope.js +368 -0
- package/dist/semantic-dedup.js +75 -0
- package/dist/subgraph-search.js +289 -0
- package/dist/subgraph-store.js +694 -0
- package/dist/tool-gating.js +58 -0
- package/download-ux.ts +91 -0
- package/embedder-cache.ts +230 -0
- package/embedder-loader.ts +189 -0
- package/embedder-network.ts +350 -0
- package/embedding.ts +118 -27
- package/fs-helpers.ts +277 -0
- package/gateway-url.ts +57 -9
- package/index.ts +469 -250
- package/llm-client.ts +4 -3
- package/lsh.ts +7 -2
- package/onboarding-cli.ts +114 -1
- package/package.json +24 -5
- package/pair-cli.ts +76 -8
- package/pair-crypto.ts +34 -24
- package/pair-page.ts +28 -17
- package/pair-qr.ts +152 -0
- package/pair-remote-client.ts +540 -0
- package/pin.ts +31 -0
- package/qa-bug-report.ts +84 -2
- package/relay-headers.ts +50 -0
- package/reranker.ts +40 -0
- package/retype-setscope.ts +69 -8
- package/skill.json +1 -1
- package/subgraph-search.ts +4 -3
- package/subgraph-store.ts +15 -10
package/dist/crypto.js
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TotalReclaw Plugin - Crypto Operations (WASM-backed)
|
|
3
|
+
*
|
|
4
|
+
* Thin re-exports over `@totalreclaw/core` WASM module. Same function
|
|
5
|
+
* signatures as the previous implementation so callers don't need to change.
|
|
6
|
+
*
|
|
7
|
+
* The WASM module handles BIP-39 key derivation, XChaCha20-Poly1305 encrypt/
|
|
8
|
+
* decrypt, SHA-256 blind indices, HMAC-SHA256 content fingerprints,
|
|
9
|
+
* and LSH seed derivation.
|
|
10
|
+
*
|
|
11
|
+
* Key derivation chain (BIP-39 — handled by WASM):
|
|
12
|
+
* mnemonic -> BIP-39 PBKDF2 -> 512-bit seed
|
|
13
|
+
* -> HKDF-SHA256(seed, salt, "totalreclaw-auth-key-v1", 32) -> authKey
|
|
14
|
+
* -> HKDF-SHA256(seed, salt, "totalreclaw-encryption-key-v1", 32) -> encryptionKey
|
|
15
|
+
* -> HKDF-SHA256(seed, salt, "openmemory-dedup-v1", 32) -> dedupKey
|
|
16
|
+
*/
|
|
17
|
+
// Lazy-load WASM. Uses createRequire so this module loads cleanly under bare
|
|
18
|
+
// Node ESM — the shipped `dist/index.js` declares `"type":"module"`, where
|
|
19
|
+
// the CJS `require` global is undefined at runtime. Prior to the rc.21 fix
|
|
20
|
+
// this file called bare `require('@totalreclaw/core')` and every consumer
|
|
21
|
+
// died with `require is not defined`. Matches the pattern already used by
|
|
22
|
+
// claims-helper / consolidation / contradiction-sync / digest-sync / pin /
|
|
23
|
+
// retype-setscope. See issue #124.
|
|
24
|
+
import { createRequire } from 'node:module';
|
|
25
|
+
const requireWasm = createRequire(import.meta.url);
|
|
26
|
+
let _wasm = null;
|
|
27
|
+
function getWasm() {
|
|
28
|
+
if (!_wasm)
|
|
29
|
+
_wasm = requireWasm('@totalreclaw/core');
|
|
30
|
+
return _wasm;
|
|
31
|
+
}
|
|
32
|
+
// ---------------------------------------------------------------------------
|
|
33
|
+
// BIP-39 Validation
|
|
34
|
+
// ---------------------------------------------------------------------------
|
|
35
|
+
/**
|
|
36
|
+
* Check if the input looks like a BIP-39 mnemonic (12 or 24 words).
|
|
37
|
+
*
|
|
38
|
+
* Lenient: accepts phrases where all words look like valid BIP-39 words
|
|
39
|
+
* (allows invalid checksums, which LLMs sometimes generate).
|
|
40
|
+
*/
|
|
41
|
+
export function isBip39Mnemonic(input) {
|
|
42
|
+
const words = input.trim().split(/\s+/);
|
|
43
|
+
return words.length === 12 || words.length === 24;
|
|
44
|
+
}
|
|
45
|
+
// Re-export for backward compatibility
|
|
46
|
+
export const validateMnemonic = isBip39Mnemonic;
|
|
47
|
+
// ---------------------------------------------------------------------------
|
|
48
|
+
// Key Derivation
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
/**
|
|
51
|
+
* Derive auth, encryption, and dedup keys from a recovery phrase.
|
|
52
|
+
*
|
|
53
|
+
* Delegates to the WASM module for BIP-39 seed derivation and HKDF
|
|
54
|
+
* key separation. Uses the lenient variant for phrases where all words
|
|
55
|
+
* are valid but the checksum fails.
|
|
56
|
+
*
|
|
57
|
+
* @param password - BIP-39 12/24-word mnemonic
|
|
58
|
+
* @param existingSalt - Ignored for BIP-39 path (salt is deterministic)
|
|
59
|
+
*/
|
|
60
|
+
export function deriveKeys(password, existingSalt) {
|
|
61
|
+
const trimmed = password.trim();
|
|
62
|
+
// Try strict validation first, fall back to lenient
|
|
63
|
+
let result;
|
|
64
|
+
try {
|
|
65
|
+
result = getWasm().deriveKeysFromMnemonic(trimmed);
|
|
66
|
+
}
|
|
67
|
+
catch {
|
|
68
|
+
result = getWasm().deriveKeysFromMnemonicLenient(trimmed);
|
|
69
|
+
}
|
|
70
|
+
return {
|
|
71
|
+
authKey: Buffer.from(result.auth_key, 'hex'),
|
|
72
|
+
encryptionKey: Buffer.from(result.encryption_key, 'hex'),
|
|
73
|
+
dedupKey: Buffer.from(result.dedup_key, 'hex'),
|
|
74
|
+
salt: Buffer.from(result.salt, 'hex'),
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
// ---------------------------------------------------------------------------
|
|
78
|
+
// LSH Seed Derivation
|
|
79
|
+
// ---------------------------------------------------------------------------
|
|
80
|
+
/**
|
|
81
|
+
* Derive a 32-byte seed for the LSH hasher.
|
|
82
|
+
*
|
|
83
|
+
* Delegates to the WASM module.
|
|
84
|
+
*/
|
|
85
|
+
export function deriveLshSeed(password, salt) {
|
|
86
|
+
const seedHex = getWasm().deriveLshSeed(password.trim(), salt.toString('hex'));
|
|
87
|
+
return new Uint8Array(Buffer.from(seedHex, 'hex'));
|
|
88
|
+
}
|
|
89
|
+
// ---------------------------------------------------------------------------
|
|
90
|
+
// Auth Key Hash
|
|
91
|
+
// ---------------------------------------------------------------------------
|
|
92
|
+
/**
|
|
93
|
+
* Compute the SHA-256 hash of the auth key.
|
|
94
|
+
*/
|
|
95
|
+
export function computeAuthKeyHash(authKey) {
|
|
96
|
+
return getWasm().computeAuthKeyHash(authKey.toString('hex'));
|
|
97
|
+
}
|
|
98
|
+
// ---------------------------------------------------------------------------
|
|
99
|
+
// XChaCha20-Poly1305 Encrypt / Decrypt
|
|
100
|
+
// ---------------------------------------------------------------------------
|
|
101
|
+
/**
|
|
102
|
+
* Encrypt a UTF-8 plaintext string with XChaCha20-Poly1305.
|
|
103
|
+
*
|
|
104
|
+
* Wire format (base64-encoded):
|
|
105
|
+
* [nonce: 24 bytes][tag: 16 bytes][ciphertext: variable]
|
|
106
|
+
*/
|
|
107
|
+
export function encrypt(plaintext, encryptionKey) {
|
|
108
|
+
return getWasm().encrypt(plaintext, encryptionKey.toString('hex'));
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Decrypt a base64-encoded XChaCha20-Poly1305 blob back to a UTF-8 string.
|
|
112
|
+
*/
|
|
113
|
+
export function decrypt(encryptedBase64, encryptionKey) {
|
|
114
|
+
return getWasm().decrypt(encryptedBase64, encryptionKey.toString('hex'));
|
|
115
|
+
}
|
|
116
|
+
// ---------------------------------------------------------------------------
|
|
117
|
+
// Blind Indices
|
|
118
|
+
// ---------------------------------------------------------------------------
|
|
119
|
+
/**
|
|
120
|
+
* Generate blind indices (SHA-256 hashes of tokens) for a text string.
|
|
121
|
+
*
|
|
122
|
+
* Delegates to the WASM module which performs tokenization, stemming,
|
|
123
|
+
* and SHA-256 hashing.
|
|
124
|
+
*/
|
|
125
|
+
export function generateBlindIndices(text) {
|
|
126
|
+
return getWasm().generateBlindIndices(text);
|
|
127
|
+
}
|
|
128
|
+
// ---------------------------------------------------------------------------
|
|
129
|
+
// Content Fingerprint (Dedup)
|
|
130
|
+
// ---------------------------------------------------------------------------
|
|
131
|
+
/**
|
|
132
|
+
* Compute an HMAC-SHA256 content fingerprint for exact-duplicate detection.
|
|
133
|
+
*
|
|
134
|
+
* @returns 64-character hex string.
|
|
135
|
+
*/
|
|
136
|
+
export function generateContentFingerprint(plaintext, dedupKey) {
|
|
137
|
+
return getWasm().generateContentFingerprint(plaintext, dedupKey.toString('hex'));
|
|
138
|
+
}
|
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TotalReclaw Plugin — digest read path (Stage 3b).
|
|
3
|
+
*
|
|
4
|
+
* Loads the latest digest claim from the subgraph, checks staleness, and
|
|
5
|
+
* returns the pre-compiled `promptText` for injection into before_agent_start.
|
|
6
|
+
* Triggers background recompilation (non-blocking) when the digest is stale
|
|
7
|
+
* and the guard conditions (>=10 new claims OR >=24h) are met.
|
|
8
|
+
*
|
|
9
|
+
* The digest is stored on-chain as a regular encrypted fact where the
|
|
10
|
+
* decrypted content is a canonical Claim with category="dig" and a
|
|
11
|
+
* distinctive blind-index marker `DIGEST_TRAPDOOR`.
|
|
12
|
+
*/
|
|
13
|
+
import { createRequire } from 'node:module';
|
|
14
|
+
import { DIGEST_CLAIM_CAP, DIGEST_TRAPDOOR, buildDigestClaim, extractDigestFromClaim, hoursSince, isDigestStale, shouldRecompile, } from './claims-helper.js';
|
|
15
|
+
const requireWasm = createRequire(import.meta.url);
|
|
16
|
+
let _wasm = null;
|
|
17
|
+
function getWasm() {
|
|
18
|
+
if (!_wasm)
|
|
19
|
+
_wasm = requireWasm('@totalreclaw/core');
|
|
20
|
+
return _wasm;
|
|
21
|
+
}
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
// Stub / tombstone blob detection
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
/**
|
|
26
|
+
* Is this subgraph-stored blob a supersede tombstone or other non-content
|
|
27
|
+
* stub? The 3.0.7-rc.1 QA found that 7 of 25 facts on the QA wallet had
|
|
28
|
+
* `encryptedBlob == "0x00"` — a 1-byte stub written as a supersede
|
|
29
|
+
* tombstone. The digest pipeline attempted to decrypt these unconditionally
|
|
30
|
+
* and produced 5 `Digest: decrypt failed … Encrypted data too short`
|
|
31
|
+
* warnings per QA window.
|
|
32
|
+
*
|
|
33
|
+
* We deliberately ONLY short-circuit shapes that cannot plausibly contain
|
|
34
|
+
* a real XChaCha20-Poly1305 payload — a valid ciphertext must be at
|
|
35
|
+
* least 40 bytes (24B nonce + 16B tag). We stay conservative about
|
|
36
|
+
* "short-but-non-stub" blobs: if someone's wire format changes and we
|
|
37
|
+
* see a 30-byte blob, that's a legitimate decrypt-failure case worth
|
|
38
|
+
* logging as a WARN, not silently skipping. So the check is:
|
|
39
|
+
*
|
|
40
|
+
* - Empty string → stub
|
|
41
|
+
* - Just the `0x` / `0X` prefix → stub
|
|
42
|
+
* - All-zero hex (e.g. "0x00", "00") → stub (explicit tombstone)
|
|
43
|
+
*
|
|
44
|
+
* Anything else falls through to the decrypt attempt.
|
|
45
|
+
*
|
|
46
|
+
* Called from both `loadLatestDigest` (digest read path) and
|
|
47
|
+
* `fetchAllActiveClaims` (digest recompile path).
|
|
48
|
+
*/
|
|
49
|
+
export function isStubBlob(hex) {
|
|
50
|
+
if (typeof hex !== 'string')
|
|
51
|
+
return true;
|
|
52
|
+
const stripped = (hex.startsWith('0x') || hex.startsWith('0X')) ? hex.slice(2) : hex;
|
|
53
|
+
if (stripped.length === 0)
|
|
54
|
+
return true;
|
|
55
|
+
// All-zero hex is the explicit tombstone shape the relay emits
|
|
56
|
+
// when marking a fact superseded (seen as "0x00" on the QA wallet,
|
|
57
|
+
// but any "00...00" of any length is semantically the same).
|
|
58
|
+
return /^0+$/i.test(stripped);
|
|
59
|
+
}
|
|
60
|
+
// ---------------------------------------------------------------------------
|
|
61
|
+
// Recompile-in-progress guard (in-memory, per-process)
|
|
62
|
+
// ---------------------------------------------------------------------------
|
|
63
|
+
let _recompileInProgress = false;
|
|
64
|
+
/** Is a digest recompilation currently running for this process? */
|
|
65
|
+
export function isRecompileInProgress() {
|
|
66
|
+
return _recompileInProgress;
|
|
67
|
+
}
|
|
68
|
+
/** Attempt to acquire the recompile lock. Returns true on success. */
|
|
69
|
+
export function tryBeginRecompile() {
|
|
70
|
+
if (_recompileInProgress)
|
|
71
|
+
return false;
|
|
72
|
+
_recompileInProgress = true;
|
|
73
|
+
return true;
|
|
74
|
+
}
|
|
75
|
+
/** Release the recompile lock — always call in a finally block. */
|
|
76
|
+
export function endRecompile() {
|
|
77
|
+
_recompileInProgress = false;
|
|
78
|
+
}
|
|
79
|
+
/** Test-only helper to reset module state between cases. */
|
|
80
|
+
export function __resetDigestSyncState() {
|
|
81
|
+
_recompileInProgress = false;
|
|
82
|
+
}
|
|
83
|
+
// ---------------------------------------------------------------------------
|
|
84
|
+
// Pure staleness + guard evaluation
|
|
85
|
+
// ---------------------------------------------------------------------------
|
|
86
|
+
/**
|
|
87
|
+
* Combine staleness + guard checks into one decision.
|
|
88
|
+
*
|
|
89
|
+
* The caller still needs to consult `isRecompileInProgress()` before firing
|
|
90
|
+
* the background task — this function is purely about the digest's age.
|
|
91
|
+
*/
|
|
92
|
+
export function evaluateDigestState(input) {
|
|
93
|
+
const stale = isDigestStale(input.digestVersion, input.currentMaxCreatedAt);
|
|
94
|
+
if (!stale)
|
|
95
|
+
return { stale: false, recompile: false };
|
|
96
|
+
const recompile = shouldRecompile({
|
|
97
|
+
countNewClaims: input.countNewClaims,
|
|
98
|
+
hoursSinceCompilation: input.hoursSinceCompilation,
|
|
99
|
+
});
|
|
100
|
+
return { stale: true, recompile };
|
|
101
|
+
}
|
|
102
|
+
// ---------------------------------------------------------------------------
|
|
103
|
+
// Core compilation (pure, no I/O)
|
|
104
|
+
// ---------------------------------------------------------------------------
|
|
105
|
+
/**
|
|
106
|
+
* Compile a Digest JSON from an array of Claim JSON.
|
|
107
|
+
*
|
|
108
|
+
* - `mode === 'template'` or `llmFn === null` → template path.
|
|
109
|
+
* - `mode === 'on'` with a non-null `llmFn` → LLM path with template fallback.
|
|
110
|
+
* Any parsing/assembly/LLM failure logs a warning and falls back silently.
|
|
111
|
+
* - Claim count above DIGEST_CLAIM_CAP forces the template path regardless
|
|
112
|
+
* of mode, to keep LLM token cost bounded.
|
|
113
|
+
*
|
|
114
|
+
* Returns the Digest JSON as produced by the WASM core.
|
|
115
|
+
*/
|
|
116
|
+
export async function compileDigestCore(input) {
|
|
117
|
+
const { claimsJson, nowUnixSeconds, mode, llmFn, logger } = input;
|
|
118
|
+
const core = getWasm();
|
|
119
|
+
const nowBig = BigInt(Math.floor(nowUnixSeconds));
|
|
120
|
+
// Check whether we should even attempt the LLM path.
|
|
121
|
+
let useLlm = mode === 'on' && llmFn !== null;
|
|
122
|
+
if (useLlm) {
|
|
123
|
+
try {
|
|
124
|
+
const parsedClaims = JSON.parse(claimsJson);
|
|
125
|
+
if (!Array.isArray(parsedClaims) || parsedClaims.length === 0) {
|
|
126
|
+
useLlm = false;
|
|
127
|
+
}
|
|
128
|
+
else if (parsedClaims.length > DIGEST_CLAIM_CAP) {
|
|
129
|
+
logger.info(`Digest: ${parsedClaims.length} active claims > cap ${DIGEST_CLAIM_CAP}; using template path`);
|
|
130
|
+
useLlm = false;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
catch {
|
|
134
|
+
useLlm = false;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
if (useLlm && llmFn) {
|
|
138
|
+
try {
|
|
139
|
+
const prompt = core.buildDigestPrompt(claimsJson);
|
|
140
|
+
const raw = await llmFn(prompt);
|
|
141
|
+
if (!raw || typeof raw !== 'string' || raw.trim().length === 0) {
|
|
142
|
+
throw new Error('LLM returned empty response');
|
|
143
|
+
}
|
|
144
|
+
const parsedResponse = core.parseDigestResponse(raw);
|
|
145
|
+
return core.assembleDigestFromLlm(parsedResponse, claimsJson, nowBig);
|
|
146
|
+
}
|
|
147
|
+
catch (err) {
|
|
148
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
149
|
+
logger.warn(`Digest LLM compilation failed, falling back to template: ${msg}`);
|
|
150
|
+
// fall through to template path
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
return core.buildTemplateDigest(claimsJson, nowBig);
|
|
154
|
+
}
|
|
155
|
+
export async function loadLatestDigest(owner, authKeyHex, encryptionKey, deps, logger) {
|
|
156
|
+
let results;
|
|
157
|
+
try {
|
|
158
|
+
results = await deps.searchSubgraph(owner, [DIGEST_TRAPDOOR], 10, authKeyHex);
|
|
159
|
+
}
|
|
160
|
+
catch (err) {
|
|
161
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
162
|
+
logger.warn(`Digest: subgraph query failed: ${msg}`);
|
|
163
|
+
return null;
|
|
164
|
+
}
|
|
165
|
+
if (!results || results.length === 0)
|
|
166
|
+
return null;
|
|
167
|
+
// Pick the highest createdAt (client-generated Unix seconds) among rows
|
|
168
|
+
// with a real (non-stub) blob. Stub blobs are supersede tombstones —
|
|
169
|
+
// see `isStubBlob` above; attempting to decrypt one produces a noisy
|
|
170
|
+
// `Digest: decrypt failed … Encrypted data too short` WARN. We filter
|
|
171
|
+
// them out pre-ranking so we prefer a slightly-older real digest over
|
|
172
|
+
// a newer tombstone. If EVERY candidate is a stub, return null quietly.
|
|
173
|
+
let best = null;
|
|
174
|
+
let stubCount = 0;
|
|
175
|
+
for (const r of results) {
|
|
176
|
+
if (isStubBlob(r.encryptedBlob)) {
|
|
177
|
+
stubCount++;
|
|
178
|
+
continue;
|
|
179
|
+
}
|
|
180
|
+
const createdAt = parseInt(r.createdAt ?? r.timestamp ?? '0', 10) || 0;
|
|
181
|
+
if (!best || createdAt > best.createdAt) {
|
|
182
|
+
best = { id: r.id, encryptedBlob: r.encryptedBlob, createdAt };
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
if (!best) {
|
|
186
|
+
if (stubCount > 0) {
|
|
187
|
+
logger.info(`Digest: all ${stubCount} candidates were tombstone stubs — no digest available`);
|
|
188
|
+
}
|
|
189
|
+
return null;
|
|
190
|
+
}
|
|
191
|
+
try {
|
|
192
|
+
const decrypted = deps.decryptFromHex(best.encryptedBlob, encryptionKey);
|
|
193
|
+
const canonical = getWasm().parseClaimOrLegacy(decrypted);
|
|
194
|
+
const digest = extractDigestFromClaim(canonical);
|
|
195
|
+
if (!digest) {
|
|
196
|
+
logger.warn(`Digest: blob ${best.id.slice(0, 10)}… did not parse as a digest claim`);
|
|
197
|
+
return null;
|
|
198
|
+
}
|
|
199
|
+
return { digest, claimId: best.id, createdAt: best.createdAt };
|
|
200
|
+
}
|
|
201
|
+
catch (err) {
|
|
202
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
203
|
+
logger.warn(`Digest: decrypt failed for ${best.id.slice(0, 10)}…: ${msg}`);
|
|
204
|
+
return null;
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
/** How many recent facts we fetch to drive the recompile guard. */
|
|
208
|
+
export const DIGEST_RECENCY_PROBE_LIMIT = 10;
|
|
209
|
+
export async function getDigestRecencyProbe(owner, authKeyHex, deps) {
|
|
210
|
+
let results = [];
|
|
211
|
+
try {
|
|
212
|
+
results = await deps.searchSubgraphBroadened(owner, DIGEST_RECENCY_PROBE_LIMIT, authKeyHex);
|
|
213
|
+
}
|
|
214
|
+
catch {
|
|
215
|
+
return { maxCreatedAt: 0, countNewerThan: () => 0 };
|
|
216
|
+
}
|
|
217
|
+
if (!results || results.length === 0) {
|
|
218
|
+
return { maxCreatedAt: 0, countNewerThan: () => 0 };
|
|
219
|
+
}
|
|
220
|
+
const createdAts = [];
|
|
221
|
+
for (const r of results) {
|
|
222
|
+
const ts = parseInt(r.createdAt ?? r.timestamp ?? '0', 10);
|
|
223
|
+
if (!Number.isNaN(ts) && ts > 0)
|
|
224
|
+
createdAts.push(ts);
|
|
225
|
+
}
|
|
226
|
+
const maxCreatedAt = createdAts.length > 0 ? Math.max(...createdAts) : 0;
|
|
227
|
+
return {
|
|
228
|
+
maxCreatedAt,
|
|
229
|
+
countNewerThan(digestVersion) {
|
|
230
|
+
let n = 0;
|
|
231
|
+
for (const ca of createdAts)
|
|
232
|
+
if (ca > digestVersion)
|
|
233
|
+
n++;
|
|
234
|
+
return n;
|
|
235
|
+
},
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
export async function fetchAllActiveClaims(owner, authKeyHex, encryptionKey, limit, deps, logger) {
|
|
239
|
+
let rows;
|
|
240
|
+
try {
|
|
241
|
+
rows = await deps.searchSubgraphBroadened(owner, limit, authKeyHex);
|
|
242
|
+
}
|
|
243
|
+
catch (err) {
|
|
244
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
245
|
+
logger.warn(`Digest: fetchAllActiveClaims subgraph query failed: ${msg}`);
|
|
246
|
+
return '[]';
|
|
247
|
+
}
|
|
248
|
+
if (!rows || rows.length === 0)
|
|
249
|
+
return '[]';
|
|
250
|
+
const claimsOut = [];
|
|
251
|
+
for (const row of rows) {
|
|
252
|
+
if (row.isActive === false)
|
|
253
|
+
continue;
|
|
254
|
+
// Stub / tombstone blobs (encryptedBlob == "0x00") will always fail
|
|
255
|
+
// decrypt with `Encrypted data too short`. Skip pre-decrypt so we
|
|
256
|
+
// don't spin up a WASM call path per stub — the QA wallet had 7 of
|
|
257
|
+
// 25 facts as stubs, so this matters for recompile cost too.
|
|
258
|
+
if (isStubBlob(row.encryptedBlob))
|
|
259
|
+
continue;
|
|
260
|
+
try {
|
|
261
|
+
const decrypted = deps.decryptFromHex(row.encryptedBlob, encryptionKey);
|
|
262
|
+
const canonicalJson = getWasm().parseClaimOrLegacy(decrypted);
|
|
263
|
+
const claim = JSON.parse(canonicalJson);
|
|
264
|
+
// Skip infrastructure claims — digest and entity records aren't user memories.
|
|
265
|
+
if (claim.c === 'dig' || claim.c === 'ent')
|
|
266
|
+
continue;
|
|
267
|
+
claimsOut.push(claim);
|
|
268
|
+
}
|
|
269
|
+
catch {
|
|
270
|
+
// Skip un-decryptable / un-parseable rows. Don't fail the whole compilation.
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return JSON.stringify(claimsOut);
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Full recompile pipeline. Safe to fire-and-forget (never throws).
|
|
277
|
+
*
|
|
278
|
+
* Steps:
|
|
279
|
+
* 1. Fetch all active claims (decrypted, filtered to user-facing categories)
|
|
280
|
+
* 2. Compile via template or LLM (with template fallback)
|
|
281
|
+
* 3. Wrap as a canonical Claim and encrypt + store on-chain
|
|
282
|
+
* 4. Tombstone the previous digest (if any) so only one digest stays indexed
|
|
283
|
+
*
|
|
284
|
+
* The caller should call `tryBeginRecompile` before scheduling and
|
|
285
|
+
* `endRecompile` in a finally. This function does not touch the guard itself.
|
|
286
|
+
*/
|
|
287
|
+
export async function recompileDigest(input) {
|
|
288
|
+
const { mode, previousClaimId, nowUnixSeconds, deps, logger } = input;
|
|
289
|
+
try {
|
|
290
|
+
const claimsJson = await deps.fetchAllActiveClaimsFn();
|
|
291
|
+
const digestJson = await compileDigestCore({
|
|
292
|
+
claimsJson,
|
|
293
|
+
nowUnixSeconds,
|
|
294
|
+
mode,
|
|
295
|
+
llmFn: deps.llmFn,
|
|
296
|
+
logger,
|
|
297
|
+
});
|
|
298
|
+
const compiledAt = new Date(nowUnixSeconds * 1000).toISOString();
|
|
299
|
+
const canonical = buildDigestClaim({ digestJson, compiledAt });
|
|
300
|
+
await deps.storeDigestClaim(canonical, compiledAt);
|
|
301
|
+
if (previousClaimId) {
|
|
302
|
+
try {
|
|
303
|
+
await deps.tombstoneDigest(previousClaimId);
|
|
304
|
+
}
|
|
305
|
+
catch (err) {
|
|
306
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
307
|
+
logger.warn(`Digest: tombstone of previous ${previousClaimId.slice(0, 10)}… failed: ${msg}`);
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
logger.info(`Digest: recompiled and stored (compiledAt=${compiledAt})`);
|
|
311
|
+
}
|
|
312
|
+
catch (err) {
|
|
313
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
314
|
+
logger.warn(`Digest: recompile failed: ${msg}`);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
/**
|
|
318
|
+
* Top-level read path helper. Decides whether to return a promptText from
|
|
319
|
+
* the latest digest, kicks off a background recompile when appropriate,
|
|
320
|
+
* and never throws.
|
|
321
|
+
*
|
|
322
|
+
* If this returns `{ promptText: null }`, the caller must fall back to the
|
|
323
|
+
* legacy individual-fact search path — digest injection is a fast path, not
|
|
324
|
+
* a replacement.
|
|
325
|
+
*/
|
|
326
|
+
export async function maybeInjectDigest(input) {
|
|
327
|
+
const { owner, authKeyHex, encryptionKey, mode, nowMs, loadDeps, probeDeps, recompileFn, logger, } = input;
|
|
328
|
+
if (mode === 'off') {
|
|
329
|
+
return { promptText: null, state: 'off' };
|
|
330
|
+
}
|
|
331
|
+
// Fetch the latest digest and the recency probe in parallel.
|
|
332
|
+
const [loaded, probe] = await Promise.all([
|
|
333
|
+
loadLatestDigest(owner, authKeyHex, encryptionKey, loadDeps, logger),
|
|
334
|
+
getDigestRecencyProbe(owner, authKeyHex, probeDeps),
|
|
335
|
+
]);
|
|
336
|
+
if (!loaded) {
|
|
337
|
+
// No digest exists yet — schedule a first compile, fall back to legacy search.
|
|
338
|
+
if (!isRecompileInProgress()) {
|
|
339
|
+
recompileFn(null);
|
|
340
|
+
}
|
|
341
|
+
return { promptText: null, state: 'first-compile' };
|
|
342
|
+
}
|
|
343
|
+
const digestVersion = typeof loaded.digest.version === 'number'
|
|
344
|
+
? loaded.digest.version
|
|
345
|
+
: parseInt(String(loaded.digest.version ?? 0), 10) || 0;
|
|
346
|
+
const compiledAt = typeof loaded.digest.compiled_at === 'string' ? loaded.digest.compiled_at : '';
|
|
347
|
+
const state = evaluateDigestState({
|
|
348
|
+
digestVersion,
|
|
349
|
+
currentMaxCreatedAt: probe.maxCreatedAt,
|
|
350
|
+
countNewClaims: probe.countNewerThan(digestVersion),
|
|
351
|
+
hoursSinceCompilation: hoursSince(compiledAt, nowMs),
|
|
352
|
+
});
|
|
353
|
+
if (state.stale && state.recompile && !isRecompileInProgress()) {
|
|
354
|
+
recompileFn(loaded.claimId);
|
|
355
|
+
}
|
|
356
|
+
const promptText = typeof loaded.digest.prompt_text === 'string' ? loaded.digest.prompt_text : null;
|
|
357
|
+
return {
|
|
358
|
+
promptText,
|
|
359
|
+
state: state.stale ? 'stale' : 'fresh',
|
|
360
|
+
};
|
|
361
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* download-ux.ts — Wrapper for heavy first-call downloads (rc.16, fixes #92).
|
|
3
|
+
*
|
|
4
|
+
* Wraps a download promise with:
|
|
5
|
+
* - per-attempt timeout (default 600s, override via TOTALRECLAW_ONNX_INSTALL_TIMEOUT in seconds)
|
|
6
|
+
* - 60s keep-alive log so slow-bandwidth users don't think it's frozen
|
|
7
|
+
* - 3-attempt exponential-backoff retry (per-attempt timeout grows 1x/2x/4x)
|
|
8
|
+
* - loud actionable error after exhaustion
|
|
9
|
+
*
|
|
10
|
+
* No third-party imports here — pure stdlib so the unit test can exercise it
|
|
11
|
+
* without pulling the heavy `@huggingface/transformers` chain.
|
|
12
|
+
*/
|
|
13
|
+
const DEFAULT_DOWNLOAD_TIMEOUT_MS = 600_000;
|
|
14
|
+
const KEEPALIVE_INTERVAL_MS = 60_000;
|
|
15
|
+
const MAX_DOWNLOAD_ATTEMPTS = 3;
|
|
16
|
+
export function getDownloadTimeoutMs() {
|
|
17
|
+
const raw = process.env.TOTALRECLAW_ONNX_INSTALL_TIMEOUT;
|
|
18
|
+
if (!raw)
|
|
19
|
+
return DEFAULT_DOWNLOAD_TIMEOUT_MS;
|
|
20
|
+
const parsed = Number(raw);
|
|
21
|
+
if (!Number.isFinite(parsed) || parsed <= 0)
|
|
22
|
+
return DEFAULT_DOWNLOAD_TIMEOUT_MS;
|
|
23
|
+
// Spec accepts seconds; convert to ms.
|
|
24
|
+
return Math.floor(parsed * 1000);
|
|
25
|
+
}
|
|
26
|
+
export async function downloadWithUX(label, download, opts) {
|
|
27
|
+
const baseTimeoutMs = opts?.timeoutMs ?? getDownloadTimeoutMs();
|
|
28
|
+
const keepaliveMs = opts?.keepaliveMs ?? KEEPALIVE_INTERVAL_MS;
|
|
29
|
+
const maxAttempts = opts?.maxAttempts ?? MAX_DOWNLOAD_ATTEMPTS;
|
|
30
|
+
const log = opts?.log ?? ((msg) => console.error(msg));
|
|
31
|
+
const sleep = opts?.sleep ?? ((ms) => new Promise(r => setTimeout(r, ms)));
|
|
32
|
+
let lastErr = null;
|
|
33
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt++) {
|
|
34
|
+
const attemptTimeoutMs = baseTimeoutMs * Math.pow(2, attempt - 1);
|
|
35
|
+
const startedAt = Date.now();
|
|
36
|
+
const keepaliveTimer = setInterval(() => {
|
|
37
|
+
const elapsedSec = Math.floor((Date.now() - startedAt) / 1000);
|
|
38
|
+
log(`[TotalReclaw] ${label}: still downloading… (${elapsedSec}s elapsed, attempt ${attempt}/${maxAttempts})`);
|
|
39
|
+
}, keepaliveMs);
|
|
40
|
+
try {
|
|
41
|
+
const result = await Promise.race([
|
|
42
|
+
download(),
|
|
43
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error(`Download timeout after ${Math.floor(attemptTimeoutMs / 1000)}s (attempt ${attempt}/${maxAttempts})`)), attemptTimeoutMs)),
|
|
44
|
+
]);
|
|
45
|
+
clearInterval(keepaliveTimer);
|
|
46
|
+
return result;
|
|
47
|
+
}
|
|
48
|
+
catch (err) {
|
|
49
|
+
clearInterval(keepaliveTimer);
|
|
50
|
+
lastErr = err;
|
|
51
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
52
|
+
if (attempt < maxAttempts) {
|
|
53
|
+
const backoffMs = Math.min(5_000 * Math.pow(2, attempt - 1), 30_000);
|
|
54
|
+
log(`[TotalReclaw] ${label}: attempt ${attempt} failed (${msg}). Retrying in ${Math.floor(backoffMs / 1000)}s…`);
|
|
55
|
+
await sleep(backoffMs);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
const finalMsg = lastErr instanceof Error ? lastErr.message : String(lastErr);
|
|
60
|
+
throw new Error(`[TotalReclaw] Embedding model download failed after ${maxAttempts} attempts (last error: ${finalMsg}). ` +
|
|
61
|
+
`Check your network connection and retry: \`openclaw plugins install totalreclaw\`. ` +
|
|
62
|
+
`On slow connections, set TOTALRECLAW_ONNX_INSTALL_TIMEOUT=1200 (in seconds) to extend the per-attempt timeout.`);
|
|
63
|
+
}
|