@totalreclaw/totalreclaw 1.4.0 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.ts +316 -0
- package/package.json +1 -1
package/index.ts
CHANGED
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
* - totalreclaw_consolidate -- scan and merge near-duplicate memories
|
|
11
11
|
* - totalreclaw_import_from -- import memories from other tools (Mem0, MCP Memory, etc.)
|
|
12
12
|
* - totalreclaw_upgrade -- create Stripe checkout for Pro upgrade
|
|
13
|
+
* - totalreclaw_migrate -- migrate testnet memories to mainnet after Pro upgrade
|
|
13
14
|
*
|
|
14
15
|
* Also registers a `before_agent_start` hook that automatically injects
|
|
15
16
|
* relevant memories into the agent's context.
|
|
@@ -725,6 +726,138 @@ function decryptFromHex(hexBlob: string, key: Buffer): string {
|
|
|
725
726
|
return decrypt(b64, key);
|
|
726
727
|
}
|
|
727
728
|
|
|
729
|
+
// ---------------------------------------------------------------------------
|
|
730
|
+
// Migration GraphQL helpers
|
|
731
|
+
// ---------------------------------------------------------------------------
|
|
732
|
+
|
|
733
|
+
interface MigrationFact {
|
|
734
|
+
id: string;
|
|
735
|
+
owner: string;
|
|
736
|
+
encryptedBlob: string;
|
|
737
|
+
encryptedEmbedding: string | null;
|
|
738
|
+
decayScore: string;
|
|
739
|
+
isActive: boolean;
|
|
740
|
+
contentFp: string;
|
|
741
|
+
source: string;
|
|
742
|
+
agentId: string;
|
|
743
|
+
version: number;
|
|
744
|
+
timestamp: string;
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
const MIGRATION_PAGE_SIZE = 1000;
|
|
748
|
+
|
|
749
|
+
/** Execute a GraphQL query against a subgraph endpoint. Returns null on error. */
|
|
750
|
+
async function migrationGqlQuery<T>(
|
|
751
|
+
endpoint: string,
|
|
752
|
+
query: string,
|
|
753
|
+
variables: Record<string, unknown>,
|
|
754
|
+
authKey?: string,
|
|
755
|
+
): Promise<T | null> {
|
|
756
|
+
try {
|
|
757
|
+
const headers: Record<string, string> = {
|
|
758
|
+
'Content-Type': 'application/json',
|
|
759
|
+
'X-TotalReclaw-Client': 'openclaw-plugin',
|
|
760
|
+
};
|
|
761
|
+
if (authKey) headers['Authorization'] = `Bearer ${authKey}`;
|
|
762
|
+
const response = await fetch(endpoint, {
|
|
763
|
+
method: 'POST',
|
|
764
|
+
headers,
|
|
765
|
+
body: JSON.stringify({ query, variables }),
|
|
766
|
+
});
|
|
767
|
+
if (!response.ok) return null;
|
|
768
|
+
const json = await response.json() as { data?: T; errors?: unknown[] };
|
|
769
|
+
return json.data ?? null;
|
|
770
|
+
} catch {
|
|
771
|
+
return null;
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
/** Fetch all active facts by owner from a subgraph, paginated. */
|
|
776
|
+
async function fetchAllFactsByOwner(
|
|
777
|
+
subgraphUrl: string,
|
|
778
|
+
owner: string,
|
|
779
|
+
authKey: string,
|
|
780
|
+
): Promise<MigrationFact[]> {
|
|
781
|
+
const allFacts: MigrationFact[] = [];
|
|
782
|
+
let lastId = '';
|
|
783
|
+
|
|
784
|
+
while (true) {
|
|
785
|
+
const hasLastId = lastId !== '';
|
|
786
|
+
const query = hasLastId
|
|
787
|
+
? `query($owner:Bytes!,$first:Int!,$lastId:String!){facts(where:{owner:$owner,isActive:true,id_gt:$lastId},first:$first,orderBy:id,orderDirection:asc){id owner encryptedBlob encryptedEmbedding decayScore isActive contentFp source agentId version timestamp}}`
|
|
788
|
+
: `query($owner:Bytes!,$first:Int!){facts(where:{owner:$owner,isActive:true},first:$first,orderBy:id,orderDirection:asc){id owner encryptedBlob encryptedEmbedding decayScore isActive contentFp source agentId version timestamp}}`;
|
|
789
|
+
const vars: Record<string, unknown> = hasLastId
|
|
790
|
+
? { owner, first: MIGRATION_PAGE_SIZE, lastId }
|
|
791
|
+
: { owner, first: MIGRATION_PAGE_SIZE };
|
|
792
|
+
|
|
793
|
+
const data = await migrationGqlQuery<{ facts?: MigrationFact[] }>(subgraphUrl, query, vars, authKey);
|
|
794
|
+
const facts = data?.facts ?? [];
|
|
795
|
+
if (facts.length === 0) break;
|
|
796
|
+
allFacts.push(...facts);
|
|
797
|
+
if (facts.length < MIGRATION_PAGE_SIZE) break;
|
|
798
|
+
lastId = facts[facts.length - 1].id;
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
return allFacts;
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
/** Fetch content fingerprints from a subgraph for idempotency. */
|
|
805
|
+
async function fetchContentFingerprintsByOwner(
|
|
806
|
+
subgraphUrl: string,
|
|
807
|
+
owner: string,
|
|
808
|
+
authKey: string,
|
|
809
|
+
): Promise<Set<string>> {
|
|
810
|
+
const fps = new Set<string>();
|
|
811
|
+
let lastId = '';
|
|
812
|
+
|
|
813
|
+
while (true) {
|
|
814
|
+
const hasLastId = lastId !== '';
|
|
815
|
+
const query = hasLastId
|
|
816
|
+
? `query($owner:Bytes!,$first:Int!,$lastId:String!){facts(where:{owner:$owner,isActive:true,id_gt:$lastId},first:$first,orderBy:id,orderDirection:asc){id contentFp}}`
|
|
817
|
+
: `query($owner:Bytes!,$first:Int!){facts(where:{owner:$owner,isActive:true},first:$first,orderBy:id,orderDirection:asc){id contentFp}}`;
|
|
818
|
+
const vars: Record<string, unknown> = hasLastId
|
|
819
|
+
? { owner, first: MIGRATION_PAGE_SIZE, lastId }
|
|
820
|
+
: { owner, first: MIGRATION_PAGE_SIZE };
|
|
821
|
+
|
|
822
|
+
const data = await migrationGqlQuery<{ facts?: Array<{ id: string; contentFp: string }> }>(subgraphUrl, query, vars, authKey);
|
|
823
|
+
const facts = data?.facts ?? [];
|
|
824
|
+
if (facts.length === 0) break;
|
|
825
|
+
for (const f of facts) {
|
|
826
|
+
if (f.contentFp) fps.add(f.contentFp);
|
|
827
|
+
}
|
|
828
|
+
if (facts.length < MIGRATION_PAGE_SIZE) break;
|
|
829
|
+
lastId = facts[facts.length - 1].id;
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
return fps;
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
/** Fetch blind index hashes for given fact IDs. */
|
|
836
|
+
async function fetchBlindIndicesByFactIds(
|
|
837
|
+
subgraphUrl: string,
|
|
838
|
+
factIds: string[],
|
|
839
|
+
authKey: string,
|
|
840
|
+
): Promise<Map<string, string[]>> {
|
|
841
|
+
const result = new Map<string, string[]>();
|
|
842
|
+
const CHUNK = 50;
|
|
843
|
+
|
|
844
|
+
for (let i = 0; i < factIds.length; i += CHUNK) {
|
|
845
|
+
const chunk = factIds.slice(i, i + CHUNK);
|
|
846
|
+
const query = `query($factIds:[String!]!,$first:Int!){blindIndexes(where:{fact_in:$factIds},first:$first){hash fact{id}}}`;
|
|
847
|
+
const data = await migrationGqlQuery<{
|
|
848
|
+
blindIndexes?: Array<{ hash: string; fact: { id: string } }>;
|
|
849
|
+
}>(subgraphUrl, query, { factIds: chunk, first: 1000 }, authKey);
|
|
850
|
+
|
|
851
|
+
for (const entry of data?.blindIndexes ?? []) {
|
|
852
|
+
const existing = result.get(entry.fact.id) || [];
|
|
853
|
+
existing.push(entry.hash);
|
|
854
|
+
result.set(entry.fact.id, existing);
|
|
855
|
+
}
|
|
856
|
+
}
|
|
857
|
+
|
|
858
|
+
return result;
|
|
859
|
+
}
|
|
860
|
+
|
|
728
861
|
/**
|
|
729
862
|
* Fetch existing memories from the vault to provide dedup context for extraction.
|
|
730
863
|
* Returns a lightweight list of {id, text} pairs for the LLM prompt.
|
|
@@ -2305,6 +2438,189 @@ const plugin = {
|
|
|
2305
2438
|
{ name: 'totalreclaw_upgrade' },
|
|
2306
2439
|
);
|
|
2307
2440
|
|
|
2441
|
+
// ---------------------------------------------------------------
|
|
2442
|
+
// Tool: totalreclaw_migrate
|
|
2443
|
+
// ---------------------------------------------------------------
|
|
2444
|
+
|
|
2445
|
+
api.registerTool(
|
|
2446
|
+
{
|
|
2447
|
+
name: 'totalreclaw_migrate',
|
|
2448
|
+
label: 'Migrate Testnet to Mainnet',
|
|
2449
|
+
description:
|
|
2450
|
+
'Migrate memories from testnet (Base Sepolia) to mainnet (Gnosis) after upgrading to Pro. ' +
|
|
2451
|
+
'Dry-run by default — set confirm=true to execute. Idempotent: re-running skips already-migrated facts.',
|
|
2452
|
+
parameters: {
|
|
2453
|
+
type: 'object',
|
|
2454
|
+
properties: {
|
|
2455
|
+
confirm: {
|
|
2456
|
+
type: 'boolean',
|
|
2457
|
+
description: 'Set to true to execute the migration. Without it, returns a dry-run preview.',
|
|
2458
|
+
default: false,
|
|
2459
|
+
},
|
|
2460
|
+
},
|
|
2461
|
+
additionalProperties: false,
|
|
2462
|
+
},
|
|
2463
|
+
async execute(_params: { confirm?: boolean }) {
|
|
2464
|
+
try {
|
|
2465
|
+
await requireFullSetup(api.logger);
|
|
2466
|
+
|
|
2467
|
+
if (!authKeyHex || !subgraphOwner) {
|
|
2468
|
+
return {
|
|
2469
|
+
content: [{ type: 'text', text: 'Plugin not fully initialized. Ensure TOTALRECLAW_RECOVERY_PHRASE is set.' }],
|
|
2470
|
+
};
|
|
2471
|
+
}
|
|
2472
|
+
|
|
2473
|
+
if (!isSubgraphMode()) {
|
|
2474
|
+
return {
|
|
2475
|
+
content: [{ type: 'text', text: 'Migration is only available with the managed service (subgraph mode).' }],
|
|
2476
|
+
};
|
|
2477
|
+
}
|
|
2478
|
+
|
|
2479
|
+
const confirm = _params?.confirm === true;
|
|
2480
|
+
const serverUrl = (process.env.TOTALRECLAW_SERVER_URL || 'https://api.totalreclaw.xyz').replace(/\/+$/, '');
|
|
2481
|
+
|
|
2482
|
+
// 1. Check billing tier
|
|
2483
|
+
const billingResp = await fetch(
|
|
2484
|
+
`${serverUrl}/v1/billing/status?wallet_address=${encodeURIComponent(subgraphOwner)}`,
|
|
2485
|
+
{
|
|
2486
|
+
method: 'GET',
|
|
2487
|
+
headers: {
|
|
2488
|
+
'Authorization': `Bearer ${authKeyHex}`,
|
|
2489
|
+
'Content-Type': 'application/json',
|
|
2490
|
+
'X-TotalReclaw-Client': 'openclaw-plugin',
|
|
2491
|
+
},
|
|
2492
|
+
},
|
|
2493
|
+
);
|
|
2494
|
+
if (!billingResp.ok) {
|
|
2495
|
+
return { content: [{ type: 'text', text: `Failed to check billing tier (HTTP ${billingResp.status}).` }] };
|
|
2496
|
+
}
|
|
2497
|
+
const billingData = await billingResp.json() as { tier: string };
|
|
2498
|
+
if (billingData.tier !== 'pro') {
|
|
2499
|
+
return {
|
|
2500
|
+
content: [{ type: 'text', text: 'Migration requires Pro tier. Use totalreclaw_upgrade to upgrade first.' }],
|
|
2501
|
+
};
|
|
2502
|
+
}
|
|
2503
|
+
|
|
2504
|
+
// 2. Fetch testnet facts via relay (chain=testnet query param)
|
|
2505
|
+
const testnetSubgraphUrl = `${serverUrl}/v1/subgraph?chain=testnet`;
|
|
2506
|
+
const mainnetSubgraphUrl = `${serverUrl}/v1/subgraph`;
|
|
2507
|
+
|
|
2508
|
+
api.logger.info('Fetching testnet facts...');
|
|
2509
|
+
const testnetFacts = await fetchAllFactsByOwner(testnetSubgraphUrl, subgraphOwner, authKeyHex);
|
|
2510
|
+
|
|
2511
|
+
if (testnetFacts.length === 0) {
|
|
2512
|
+
return {
|
|
2513
|
+
content: [{ type: 'text', text: 'No facts found on testnet. Nothing to migrate.' }],
|
|
2514
|
+
};
|
|
2515
|
+
}
|
|
2516
|
+
|
|
2517
|
+
// 3. Check mainnet for existing facts (idempotency)
|
|
2518
|
+
api.logger.info('Checking mainnet for existing facts...');
|
|
2519
|
+
const mainnetFps = await fetchContentFingerprintsByOwner(mainnetSubgraphUrl, subgraphOwner, authKeyHex);
|
|
2520
|
+
const factsToMigrate = testnetFacts.filter(f => !f.contentFp || !mainnetFps.has(f.contentFp));
|
|
2521
|
+
const alreadyOnMainnet = testnetFacts.length - factsToMigrate.length;
|
|
2522
|
+
|
|
2523
|
+
// 4. Dry-run
|
|
2524
|
+
if (!confirm) {
|
|
2525
|
+
const msg = factsToMigrate.length === 0
|
|
2526
|
+
? `All ${testnetFacts.length} testnet facts already exist on mainnet. Nothing to migrate.`
|
|
2527
|
+
: `Found ${factsToMigrate.length} facts to migrate from testnet to Gnosis mainnet (${alreadyOnMainnet} already on mainnet). Call with confirm=true to proceed.`;
|
|
2528
|
+
return {
|
|
2529
|
+
content: [{ type: 'text', text: msg }],
|
|
2530
|
+
details: {
|
|
2531
|
+
mode: 'dry_run',
|
|
2532
|
+
testnet_facts: testnetFacts.length,
|
|
2533
|
+
already_on_mainnet: alreadyOnMainnet,
|
|
2534
|
+
to_migrate: factsToMigrate.length,
|
|
2535
|
+
},
|
|
2536
|
+
};
|
|
2537
|
+
}
|
|
2538
|
+
|
|
2539
|
+
// 5. Execute migration
|
|
2540
|
+
if (factsToMigrate.length === 0) {
|
|
2541
|
+
return {
|
|
2542
|
+
content: [{ type: 'text', text: `All ${testnetFacts.length} testnet facts already exist on mainnet. Nothing to migrate.` }],
|
|
2543
|
+
};
|
|
2544
|
+
}
|
|
2545
|
+
|
|
2546
|
+
// Fetch blind indices
|
|
2547
|
+
api.logger.info(`Fetching blind indices for ${factsToMigrate.length} facts...`);
|
|
2548
|
+
const factIds = factsToMigrate.map(f => f.id);
|
|
2549
|
+
const blindIndicesMap = await fetchBlindIndicesByFactIds(testnetSubgraphUrl, factIds, authKeyHex);
|
|
2550
|
+
|
|
2551
|
+
// Build protobuf payloads
|
|
2552
|
+
const payloads: Buffer[] = [];
|
|
2553
|
+
for (const fact of factsToMigrate) {
|
|
2554
|
+
const blobHex = fact.encryptedBlob.startsWith('0x') ? fact.encryptedBlob.slice(2) : fact.encryptedBlob;
|
|
2555
|
+
const indices = blindIndicesMap.get(fact.id) || [];
|
|
2556
|
+
const factPayload: FactPayload = {
|
|
2557
|
+
id: fact.id,
|
|
2558
|
+
timestamp: new Date().toISOString(),
|
|
2559
|
+
owner: subgraphOwner,
|
|
2560
|
+
encryptedBlob: blobHex,
|
|
2561
|
+
blindIndices: indices,
|
|
2562
|
+
decayScore: parseFloat(fact.decayScore) || 0.5,
|
|
2563
|
+
source: fact.source || 'migration',
|
|
2564
|
+
contentFp: fact.contentFp || '',
|
|
2565
|
+
agentId: fact.agentId || 'openclaw-plugin',
|
|
2566
|
+
encryptedEmbedding: fact.encryptedEmbedding || undefined,
|
|
2567
|
+
};
|
|
2568
|
+
payloads.push(encodeFactProtobuf(factPayload));
|
|
2569
|
+
}
|
|
2570
|
+
|
|
2571
|
+
// Batch submit (15 per UserOp)
|
|
2572
|
+
const BATCH_SIZE = 15;
|
|
2573
|
+
const batchConfig = { ...getSubgraphConfig(), authKeyHex: authKeyHex!, walletAddress: subgraphOwner ?? undefined };
|
|
2574
|
+
let migrated = 0;
|
|
2575
|
+
let failedBatches = 0;
|
|
2576
|
+
|
|
2577
|
+
for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
|
|
2578
|
+
const batch = payloads.slice(i, i + BATCH_SIZE);
|
|
2579
|
+
const batchNum = Math.floor(i / BATCH_SIZE) + 1;
|
|
2580
|
+
const totalBatches = Math.ceil(payloads.length / BATCH_SIZE);
|
|
2581
|
+
api.logger.info(`Migrating batch ${batchNum}/${totalBatches} (${batch.length} facts)...`);
|
|
2582
|
+
|
|
2583
|
+
try {
|
|
2584
|
+
const result = await submitFactBatchOnChain(batch, batchConfig);
|
|
2585
|
+
if (result.success) {
|
|
2586
|
+
migrated += batch.length;
|
|
2587
|
+
} else {
|
|
2588
|
+
failedBatches++;
|
|
2589
|
+
}
|
|
2590
|
+
} catch (err: unknown) {
|
|
2591
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
2592
|
+
api.logger.error(`Migration batch ${batchNum} failed: ${msg}`);
|
|
2593
|
+
failedBatches++;
|
|
2594
|
+
}
|
|
2595
|
+
}
|
|
2596
|
+
|
|
2597
|
+
const resultMsg = failedBatches === 0
|
|
2598
|
+
? `Successfully migrated ${migrated} memories from testnet to Gnosis mainnet.`
|
|
2599
|
+
: `Migrated ${migrated}/${factsToMigrate.length} memories. ${failedBatches} batch(es) failed — re-run to retry (idempotent).`;
|
|
2600
|
+
|
|
2601
|
+
return {
|
|
2602
|
+
content: [{ type: 'text', text: resultMsg }],
|
|
2603
|
+
details: {
|
|
2604
|
+
mode: 'executed',
|
|
2605
|
+
testnet_facts: testnetFacts.length,
|
|
2606
|
+
already_on_mainnet: alreadyOnMainnet,
|
|
2607
|
+
to_migrate: factsToMigrate.length,
|
|
2608
|
+
migrated,
|
|
2609
|
+
failed_batches: failedBatches,
|
|
2610
|
+
},
|
|
2611
|
+
};
|
|
2612
|
+
} catch (err: unknown) {
|
|
2613
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
2614
|
+
api.logger.error(`totalreclaw_migrate failed: ${message}`);
|
|
2615
|
+
return {
|
|
2616
|
+
content: [{ type: 'text', text: `Migration failed: ${message}` }],
|
|
2617
|
+
};
|
|
2618
|
+
}
|
|
2619
|
+
},
|
|
2620
|
+
},
|
|
2621
|
+
{ name: 'totalreclaw_migrate' },
|
|
2622
|
+
);
|
|
2623
|
+
|
|
2308
2624
|
// ---------------------------------------------------------------
|
|
2309
2625
|
// Hook: before_agent_start
|
|
2310
2626
|
// ---------------------------------------------------------------
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@totalreclaw/totalreclaw",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.5.0",
|
|
4
4
|
"description": "End-to-end encrypted memory for AI agents — portable, yours forever. Automatic extraction, semantic search, and on-chain storage",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"keywords": [
|