@totalreclaw/totalreclaw 1.4.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,7 +19,7 @@ export interface NormalizedFact {
19
19
  tags?: string[];
20
20
  }
21
21
 
22
- export type ImportSource = 'mem0' | 'mcp-memory' | 'memoclaw' | 'generic-json' | 'generic-csv';
22
+ export type ImportSource = 'mem0' | 'mcp-memory' | 'chatgpt' | 'claude' | 'memoclaw' | 'generic-json' | 'generic-csv';
23
23
 
24
24
  /**
25
25
  * What the user passes to the import tool.
@@ -73,15 +73,38 @@ export interface ImportResult {
73
73
  export type ProgressCallback = (progress: {
74
74
  current: number;
75
75
  total: number;
76
- phase: 'fetching' | 'parsing' | 'storing';
76
+ phase: 'fetching' | 'parsing' | 'storing' | 'extracting';
77
77
  message: string;
78
78
  }) => void;
79
79
 
80
+ /**
81
+ * A chunk of conversation messages for LLM-based fact extraction.
82
+ * Adapters that parse conversation data (ChatGPT, Claude) return these
83
+ * instead of pre-extracted facts, delegating extraction to the LLM.
84
+ */
85
+ export interface ConversationChunk {
86
+ /** Human-readable title for progress reporting */
87
+ title: string;
88
+ /** Ordered messages in this chunk */
89
+ messages: Array<{ role: 'user' | 'assistant'; text: string }>;
90
+ /** Original timestamp (ISO 8601) if available */
91
+ timestamp?: string;
92
+ }
93
+
80
94
  /**
81
95
  * Adapter parse result — returned by each adapter's parse method.
96
+ *
97
+ * Adapters return EITHER `facts` (pre-structured sources like Mem0, MCP Memory)
98
+ * OR `chunks` (conversation-based sources like ChatGPT, Claude) that need
99
+ * LLM extraction. The caller checks which field is populated.
82
100
  */
83
101
  export interface AdapterParseResult {
102
+ /** Pre-structured facts (Mem0, MCP Memory adapters) */
84
103
  facts: NormalizedFact[];
104
+ /** Conversation chunks needing LLM extraction (ChatGPT, Claude adapters) */
105
+ chunks: ConversationChunk[];
106
+ /** Total message count across all chunks */
107
+ totalMessages: number;
85
108
  warnings: string[];
86
109
  errors: string[];
87
110
  /** Metadata about the source (for logging) */
package/index.ts CHANGED
@@ -10,6 +10,7 @@
10
10
  * - totalreclaw_consolidate -- scan and merge near-duplicate memories
11
11
  * - totalreclaw_import_from -- import memories from other tools (Mem0, MCP Memory, etc.)
12
12
  * - totalreclaw_upgrade -- create Stripe checkout for Pro upgrade
13
+ * - totalreclaw_migrate -- migrate testnet memories to mainnet after Pro upgrade
13
14
  *
14
15
  * Also registers a `before_agent_start` hook that automatically injects
15
16
  * relevant memories into the agent's context.
@@ -331,6 +332,9 @@ async function getFactCount(logger: OpenClawPluginApi['logger']): Promise<number
331
332
  /** True when recovery phrase is missing — tools return setup instructions. */
332
333
  let needsSetup = false;
333
334
 
335
+ /** True on first before_agent_start after successful init — show welcome message once. */
336
+ let firstRunAfterInit = true;
337
+
334
338
  /**
335
339
  * Derive keys from the recovery phrase, load or create credentials, and
336
340
  * register with the server if this is the first run.
@@ -725,6 +729,138 @@ function decryptFromHex(hexBlob: string, key: Buffer): string {
725
729
  return decrypt(b64, key);
726
730
  }
727
731
 
732
+ // ---------------------------------------------------------------------------
733
+ // Migration GraphQL helpers
734
+ // ---------------------------------------------------------------------------
735
+
736
+ interface MigrationFact {
737
+ id: string;
738
+ owner: string;
739
+ encryptedBlob: string;
740
+ encryptedEmbedding: string | null;
741
+ decayScore: string;
742
+ isActive: boolean;
743
+ contentFp: string;
744
+ source: string;
745
+ agentId: string;
746
+ version: number;
747
+ timestamp: string;
748
+ }
749
+
750
+ const MIGRATION_PAGE_SIZE = 1000;
751
+
752
+ /** Execute a GraphQL query against a subgraph endpoint. Returns null on error. */
753
+ async function migrationGqlQuery<T>(
754
+ endpoint: string,
755
+ query: string,
756
+ variables: Record<string, unknown>,
757
+ authKey?: string,
758
+ ): Promise<T | null> {
759
+ try {
760
+ const headers: Record<string, string> = {
761
+ 'Content-Type': 'application/json',
762
+ 'X-TotalReclaw-Client': 'openclaw-plugin',
763
+ };
764
+ if (authKey) headers['Authorization'] = `Bearer ${authKey}`;
765
+ const response = await fetch(endpoint, {
766
+ method: 'POST',
767
+ headers,
768
+ body: JSON.stringify({ query, variables }),
769
+ });
770
+ if (!response.ok) return null;
771
+ const json = await response.json() as { data?: T; errors?: unknown[] };
772
+ return json.data ?? null;
773
+ } catch {
774
+ return null;
775
+ }
776
+ }
777
+
778
+ /** Fetch all active facts by owner from a subgraph, paginated. */
779
+ async function fetchAllFactsByOwner(
780
+ subgraphUrl: string,
781
+ owner: string,
782
+ authKey: string,
783
+ ): Promise<MigrationFact[]> {
784
+ const allFacts: MigrationFact[] = [];
785
+ let lastId = '';
786
+
787
+ while (true) {
788
+ const hasLastId = lastId !== '';
789
+ const query = hasLastId
790
+ ? `query($owner:Bytes!,$first:Int!,$lastId:String!){facts(where:{owner:$owner,isActive:true,id_gt:$lastId},first:$first,orderBy:id,orderDirection:asc){id owner encryptedBlob encryptedEmbedding decayScore isActive contentFp source agentId version timestamp}}`
791
+ : `query($owner:Bytes!,$first:Int!){facts(where:{owner:$owner,isActive:true},first:$first,orderBy:id,orderDirection:asc){id owner encryptedBlob encryptedEmbedding decayScore isActive contentFp source agentId version timestamp}}`;
792
+ const vars: Record<string, unknown> = hasLastId
793
+ ? { owner, first: MIGRATION_PAGE_SIZE, lastId }
794
+ : { owner, first: MIGRATION_PAGE_SIZE };
795
+
796
+ const data = await migrationGqlQuery<{ facts?: MigrationFact[] }>(subgraphUrl, query, vars, authKey);
797
+ const facts = data?.facts ?? [];
798
+ if (facts.length === 0) break;
799
+ allFacts.push(...facts);
800
+ if (facts.length < MIGRATION_PAGE_SIZE) break;
801
+ lastId = facts[facts.length - 1].id;
802
+ }
803
+
804
+ return allFacts;
805
+ }
806
+
807
+ /** Fetch content fingerprints from a subgraph for idempotency. */
808
+ async function fetchContentFingerprintsByOwner(
809
+ subgraphUrl: string,
810
+ owner: string,
811
+ authKey: string,
812
+ ): Promise<Set<string>> {
813
+ const fps = new Set<string>();
814
+ let lastId = '';
815
+
816
+ while (true) {
817
+ const hasLastId = lastId !== '';
818
+ const query = hasLastId
819
+ ? `query($owner:Bytes!,$first:Int!,$lastId:String!){facts(where:{owner:$owner,isActive:true,id_gt:$lastId},first:$first,orderBy:id,orderDirection:asc){id contentFp}}`
820
+ : `query($owner:Bytes!,$first:Int!){facts(where:{owner:$owner,isActive:true},first:$first,orderBy:id,orderDirection:asc){id contentFp}}`;
821
+ const vars: Record<string, unknown> = hasLastId
822
+ ? { owner, first: MIGRATION_PAGE_SIZE, lastId }
823
+ : { owner, first: MIGRATION_PAGE_SIZE };
824
+
825
+ const data = await migrationGqlQuery<{ facts?: Array<{ id: string; contentFp: string }> }>(subgraphUrl, query, vars, authKey);
826
+ const facts = data?.facts ?? [];
827
+ if (facts.length === 0) break;
828
+ for (const f of facts) {
829
+ if (f.contentFp) fps.add(f.contentFp);
830
+ }
831
+ if (facts.length < MIGRATION_PAGE_SIZE) break;
832
+ lastId = facts[facts.length - 1].id;
833
+ }
834
+
835
+ return fps;
836
+ }
837
+
838
+ /** Fetch blind index hashes for given fact IDs. */
839
+ async function fetchBlindIndicesByFactIds(
840
+ subgraphUrl: string,
841
+ factIds: string[],
842
+ authKey: string,
843
+ ): Promise<Map<string, string[]>> {
844
+ const result = new Map<string, string[]>();
845
+ const CHUNK = 50;
846
+
847
+ for (let i = 0; i < factIds.length; i += CHUNK) {
848
+ const chunk = factIds.slice(i, i + CHUNK);
849
+ const query = `query($factIds:[String!]!,$first:Int!){blindIndexes(where:{fact_in:$factIds},first:$first){hash fact{id}}}`;
850
+ const data = await migrationGqlQuery<{
851
+ blindIndexes?: Array<{ hash: string; fact: { id: string } }>;
852
+ }>(subgraphUrl, query, { factIds: chunk, first: 1000 }, authKey);
853
+
854
+ for (const entry of data?.blindIndexes ?? []) {
855
+ const existing = result.get(entry.fact.id) || [];
856
+ existing.push(entry.hash);
857
+ result.set(entry.fact.id, existing);
858
+ }
859
+ }
860
+
861
+ return result;
862
+ }
863
+
728
864
  /**
729
865
  * Fetch existing memories from the vault to provide dedup context for extraction.
730
866
  * Returns a lightweight list of {id, text} pairs for the LLM prompt.
@@ -1154,7 +1290,12 @@ async function storeExtractedFacts(
1154
1290
  /**
1155
1291
  * Handle import_from tool calls in the plugin context.
1156
1292
  *
1157
- * Uses the shared adapters to parse, then stores via storeExtractedFacts().
1293
+ * Two paths:
1294
+ * 1. Pre-structured sources (Mem0, MCP Memory) — adapter returns facts directly,
1295
+ * stored via storeExtractedFacts().
1296
+ * 2. Conversation-based sources (ChatGPT, Claude) — adapter returns conversation
1297
+ * chunks, each chunk is passed through extractFacts() (the same LLM extraction
1298
+ * pipeline used for auto-extraction), then stored via storeExtractedFacts().
1158
1299
  */
1159
1300
  async function handlePluginImportFrom(
1160
1301
  params: Record<string, unknown>,
@@ -1163,7 +1304,7 @@ async function handlePluginImportFrom(
1163
1304
  const startTime = Date.now();
1164
1305
 
1165
1306
  const source = params.source as string;
1166
- const validSources = ['mem0', 'mcp-memory', 'memoclaw', 'generic-json', 'generic-csv'];
1307
+ const validSources = ['mem0', 'mcp-memory', 'chatgpt', 'claude', 'memoclaw', 'generic-json', 'generic-csv'];
1167
1308
 
1168
1309
  if (!source || !validSources.includes(source)) {
1169
1310
  return { success: false, error: `Invalid source. Must be one of: ${validSources.join(', ')}` };
@@ -1181,7 +1322,10 @@ async function handlePluginImportFrom(
1181
1322
  file_path: params.file_path as string | undefined,
1182
1323
  });
1183
1324
 
1184
- if (parseResult.errors.length > 0 && parseResult.facts.length === 0) {
1325
+ const hasChunks = parseResult.chunks && parseResult.chunks.length > 0;
1326
+ const hasFacts = parseResult.facts && parseResult.facts.length > 0;
1327
+
1328
+ if (parseResult.errors.length > 0 && !hasFacts && !hasChunks) {
1185
1329
  return {
1186
1330
  success: false,
1187
1331
  error: `Failed to parse ${adapter.displayName} data`,
@@ -1189,7 +1333,24 @@ async function handlePluginImportFrom(
1189
1333
  };
1190
1334
  }
1191
1335
 
1336
+ // Dry run: report what was parsed (chunks or facts)
1192
1337
  if (params.dry_run) {
1338
+ if (hasChunks) {
1339
+ return {
1340
+ success: true,
1341
+ dry_run: true,
1342
+ source,
1343
+ total_chunks: parseResult.chunks.length,
1344
+ total_messages: parseResult.totalMessages,
1345
+ preview: parseResult.chunks.slice(0, 5).map((c) => ({
1346
+ title: c.title,
1347
+ messages: c.messages.length,
1348
+ first_message: c.messages[0]?.text.slice(0, 100),
1349
+ })),
1350
+ note: 'Chunks will be processed through LLM extraction (same quality as auto-extraction).',
1351
+ warnings: parseResult.warnings,
1352
+ };
1353
+ }
1193
1354
  return {
1194
1355
  success: true,
1195
1356
  dry_run: true,
@@ -1204,7 +1365,12 @@ async function handlePluginImportFrom(
1204
1365
  };
1205
1366
  }
1206
1367
 
1207
- // Convert NormalizedFact[] to ExtractedFact[] for storeExtractedFacts()
1368
+ // ── Path 1: Conversation chunks (ChatGPT, Claude) — LLM extraction ──
1369
+ if (hasChunks) {
1370
+ return handleChunkImport(parseResult.chunks, parseResult.totalMessages, source, logger, startTime, parseResult.warnings);
1371
+ }
1372
+
1373
+ // ── Path 2: Pre-structured facts (Mem0, MCP Memory) — direct store ──
1208
1374
  const extractedFacts: ExtractedFact[] = parseResult.facts.map((f) => ({
1209
1375
  text: f.text,
1210
1376
  type: f.type,
@@ -1243,6 +1409,77 @@ async function handlePluginImportFrom(
1243
1409
  }
1244
1410
  }
1245
1411
 
1412
+ /**
1413
+ * Process conversation chunks through LLM extraction and store results.
1414
+ *
1415
+ * Each chunk is passed to extractFacts() — the same extraction pipeline used
1416
+ * for auto-extraction during live conversations. This ensures import quality
1417
+ * matches conversation extraction quality.
1418
+ */
1419
+ async function handleChunkImport(
1420
+ chunks: import('./import-adapters/types.js').ConversationChunk[],
1421
+ totalMessages: number,
1422
+ source: string,
1423
+ logger: OpenClawPluginApi['logger'],
1424
+ startTime: number,
1425
+ warnings: string[],
1426
+ ): Promise<Record<string, unknown>> {
1427
+ let totalExtracted = 0;
1428
+ let totalStored = 0;
1429
+ let chunksProcessed = 0;
1430
+
1431
+ for (const chunk of chunks) {
1432
+ chunksProcessed++;
1433
+ logger.info(
1434
+ `Import: extracting facts from chunk ${chunksProcessed}/${chunks.length}: "${chunk.title}"`,
1435
+ );
1436
+
1437
+ // Convert chunk messages to the format extractFacts() expects.
1438
+ // extractFacts() takes an array of message-like objects with { role, content }.
1439
+ const messages = chunk.messages.map((m) => ({
1440
+ role: m.role,
1441
+ content: m.text,
1442
+ }));
1443
+
1444
+ // Use 'full' mode to extract ALL valuable memories from the chunk
1445
+ // (not just the last few messages like 'turn' mode does).
1446
+ const facts = await extractFacts(messages, 'full');
1447
+
1448
+ if (facts.length > 0) {
1449
+ totalExtracted += facts.length;
1450
+
1451
+ // Store through the normal pipeline (dedup, encrypt, store)
1452
+ const stored = await storeExtractedFacts(facts, logger);
1453
+ totalStored += stored;
1454
+
1455
+ logger.info(
1456
+ `Import chunk ${chunksProcessed}/${chunks.length}: extracted ${facts.length} facts, stored ${stored}`,
1457
+ );
1458
+ }
1459
+ }
1460
+
1461
+ if (totalExtracted === 0 && chunks.length > 0) {
1462
+ warnings.push(
1463
+ `Processed ${chunks.length} conversation chunks (${totalMessages} messages) but the LLM ` +
1464
+ `did not extract any facts worth storing. This can happen if the conversations are mostly ` +
1465
+ `generic/ephemeral content without personal facts, preferences, or decisions.`,
1466
+ );
1467
+ }
1468
+
1469
+ return {
1470
+ success: totalStored > 0 || totalExtracted > 0,
1471
+ source,
1472
+ import_id: crypto.randomUUID(),
1473
+ total_chunks: chunks.length,
1474
+ total_messages: totalMessages,
1475
+ facts_extracted: totalExtracted,
1476
+ imported: totalStored,
1477
+ skipped: totalExtracted - totalStored,
1478
+ warnings,
1479
+ duration_ms: Date.now() - startTime,
1480
+ };
1481
+ }
1482
+
1246
1483
  // ---------------------------------------------------------------------------
1247
1484
  // Plugin definition
1248
1485
  // ---------------------------------------------------------------------------
@@ -2175,16 +2412,16 @@ const plugin = {
2175
2412
  name: 'totalreclaw_import_from',
2176
2413
  label: 'Import From',
2177
2414
  description:
2178
- 'Import memories from other AI memory tools (Mem0, MCP Memory Server, MemoClaw, or generic JSON/CSV). ' +
2179
- 'Provide the source name and either an API key or file content. ' +
2415
+ 'Import memories from other AI memory tools (Mem0, MCP Memory Server, ChatGPT, Claude, MemoClaw, or generic JSON/CSV). ' +
2416
+ 'Provide the source name and either an API key, file content, or file path. ' +
2180
2417
  'Use dry_run=true to preview before importing. Idempotent — safe to run multiple times.',
2181
2418
  parameters: {
2182
2419
  type: 'object',
2183
2420
  properties: {
2184
2421
  source: {
2185
2422
  type: 'string',
2186
- enum: ['mem0', 'mcp-memory', 'memoclaw', 'generic-json', 'generic-csv'],
2187
- description: 'The source system to import from',
2423
+ enum: ['mem0', 'mcp-memory', 'chatgpt', 'claude', 'memoclaw', 'generic-json', 'generic-csv'],
2424
+ description: 'The source system to import from (chatgpt: conversations.json or memory text; claude: memory text)',
2188
2425
  },
2189
2426
  api_key: {
2190
2427
  type: 'string',
@@ -2305,6 +2542,189 @@ const plugin = {
2305
2542
  { name: 'totalreclaw_upgrade' },
2306
2543
  );
2307
2544
 
2545
+ // ---------------------------------------------------------------
2546
+ // Tool: totalreclaw_migrate
2547
+ // ---------------------------------------------------------------
2548
+
2549
+ api.registerTool(
2550
+ {
2551
+ name: 'totalreclaw_migrate',
2552
+ label: 'Migrate Testnet to Mainnet',
2553
+ description:
2554
+ 'Migrate memories from testnet (Base Sepolia) to mainnet (Gnosis) after upgrading to Pro. ' +
2555
+ 'Dry-run by default — set confirm=true to execute. Idempotent: re-running skips already-migrated facts.',
2556
+ parameters: {
2557
+ type: 'object',
2558
+ properties: {
2559
+ confirm: {
2560
+ type: 'boolean',
2561
+ description: 'Set to true to execute the migration. Without it, returns a dry-run preview.',
2562
+ default: false,
2563
+ },
2564
+ },
2565
+ additionalProperties: false,
2566
+ },
2567
+ async execute(_params: { confirm?: boolean }) {
2568
+ try {
2569
+ await requireFullSetup(api.logger);
2570
+
2571
+ if (!authKeyHex || !subgraphOwner) {
2572
+ return {
2573
+ content: [{ type: 'text', text: 'Plugin not fully initialized. Ensure TOTALRECLAW_RECOVERY_PHRASE is set.' }],
2574
+ };
2575
+ }
2576
+
2577
+ if (!isSubgraphMode()) {
2578
+ return {
2579
+ content: [{ type: 'text', text: 'Migration is only available with the managed service (subgraph mode).' }],
2580
+ };
2581
+ }
2582
+
2583
+ const confirm = _params?.confirm === true;
2584
+ const serverUrl = (process.env.TOTALRECLAW_SERVER_URL || 'https://api.totalreclaw.xyz').replace(/\/+$/, '');
2585
+
2586
+ // 1. Check billing tier
2587
+ const billingResp = await fetch(
2588
+ `${serverUrl}/v1/billing/status?wallet_address=${encodeURIComponent(subgraphOwner)}`,
2589
+ {
2590
+ method: 'GET',
2591
+ headers: {
2592
+ 'Authorization': `Bearer ${authKeyHex}`,
2593
+ 'Content-Type': 'application/json',
2594
+ 'X-TotalReclaw-Client': 'openclaw-plugin',
2595
+ },
2596
+ },
2597
+ );
2598
+ if (!billingResp.ok) {
2599
+ return { content: [{ type: 'text', text: `Failed to check billing tier (HTTP ${billingResp.status}).` }] };
2600
+ }
2601
+ const billingData = await billingResp.json() as { tier: string };
2602
+ if (billingData.tier !== 'pro') {
2603
+ return {
2604
+ content: [{ type: 'text', text: 'Migration requires Pro tier. Use totalreclaw_upgrade to upgrade first.' }],
2605
+ };
2606
+ }
2607
+
2608
+ // 2. Fetch testnet facts via relay (chain=testnet query param)
2609
+ const testnetSubgraphUrl = `${serverUrl}/v1/subgraph?chain=testnet`;
2610
+ const mainnetSubgraphUrl = `${serverUrl}/v1/subgraph`;
2611
+
2612
+ api.logger.info('Fetching testnet facts...');
2613
+ const testnetFacts = await fetchAllFactsByOwner(testnetSubgraphUrl, subgraphOwner, authKeyHex);
2614
+
2615
+ if (testnetFacts.length === 0) {
2616
+ return {
2617
+ content: [{ type: 'text', text: 'No facts found on testnet. Nothing to migrate.' }],
2618
+ };
2619
+ }
2620
+
2621
+ // 3. Check mainnet for existing facts (idempotency)
2622
+ api.logger.info('Checking mainnet for existing facts...');
2623
+ const mainnetFps = await fetchContentFingerprintsByOwner(mainnetSubgraphUrl, subgraphOwner, authKeyHex);
2624
+ const factsToMigrate = testnetFacts.filter(f => !f.contentFp || !mainnetFps.has(f.contentFp));
2625
+ const alreadyOnMainnet = testnetFacts.length - factsToMigrate.length;
2626
+
2627
+ // 4. Dry-run
2628
+ if (!confirm) {
2629
+ const msg = factsToMigrate.length === 0
2630
+ ? `All ${testnetFacts.length} testnet facts already exist on mainnet. Nothing to migrate.`
2631
+ : `Found ${factsToMigrate.length} facts to migrate from testnet to Gnosis mainnet (${alreadyOnMainnet} already on mainnet). Call with confirm=true to proceed.`;
2632
+ return {
2633
+ content: [{ type: 'text', text: msg }],
2634
+ details: {
2635
+ mode: 'dry_run',
2636
+ testnet_facts: testnetFacts.length,
2637
+ already_on_mainnet: alreadyOnMainnet,
2638
+ to_migrate: factsToMigrate.length,
2639
+ },
2640
+ };
2641
+ }
2642
+
2643
+ // 5. Execute migration
2644
+ if (factsToMigrate.length === 0) {
2645
+ return {
2646
+ content: [{ type: 'text', text: `All ${testnetFacts.length} testnet facts already exist on mainnet. Nothing to migrate.` }],
2647
+ };
2648
+ }
2649
+
2650
+ // Fetch blind indices
2651
+ api.logger.info(`Fetching blind indices for ${factsToMigrate.length} facts...`);
2652
+ const factIds = factsToMigrate.map(f => f.id);
2653
+ const blindIndicesMap = await fetchBlindIndicesByFactIds(testnetSubgraphUrl, factIds, authKeyHex);
2654
+
2655
+ // Build protobuf payloads
2656
+ const payloads: Buffer[] = [];
2657
+ for (const fact of factsToMigrate) {
2658
+ const blobHex = fact.encryptedBlob.startsWith('0x') ? fact.encryptedBlob.slice(2) : fact.encryptedBlob;
2659
+ const indices = blindIndicesMap.get(fact.id) || [];
2660
+ const factPayload: FactPayload = {
2661
+ id: fact.id,
2662
+ timestamp: new Date().toISOString(),
2663
+ owner: subgraphOwner,
2664
+ encryptedBlob: blobHex,
2665
+ blindIndices: indices,
2666
+ decayScore: parseFloat(fact.decayScore) || 0.5,
2667
+ source: fact.source || 'migration',
2668
+ contentFp: fact.contentFp || '',
2669
+ agentId: fact.agentId || 'openclaw-plugin',
2670
+ encryptedEmbedding: fact.encryptedEmbedding || undefined,
2671
+ };
2672
+ payloads.push(encodeFactProtobuf(factPayload));
2673
+ }
2674
+
2675
+ // Batch submit (15 per UserOp)
2676
+ const BATCH_SIZE = 15;
2677
+ const batchConfig = { ...getSubgraphConfig(), authKeyHex: authKeyHex!, walletAddress: subgraphOwner ?? undefined };
2678
+ let migrated = 0;
2679
+ let failedBatches = 0;
2680
+
2681
+ for (let i = 0; i < payloads.length; i += BATCH_SIZE) {
2682
+ const batch = payloads.slice(i, i + BATCH_SIZE);
2683
+ const batchNum = Math.floor(i / BATCH_SIZE) + 1;
2684
+ const totalBatches = Math.ceil(payloads.length / BATCH_SIZE);
2685
+ api.logger.info(`Migrating batch ${batchNum}/${totalBatches} (${batch.length} facts)...`);
2686
+
2687
+ try {
2688
+ const result = await submitFactBatchOnChain(batch, batchConfig);
2689
+ if (result.success) {
2690
+ migrated += batch.length;
2691
+ } else {
2692
+ failedBatches++;
2693
+ }
2694
+ } catch (err: unknown) {
2695
+ const msg = err instanceof Error ? err.message : String(err);
2696
+ api.logger.error(`Migration batch ${batchNum} failed: ${msg}`);
2697
+ failedBatches++;
2698
+ }
2699
+ }
2700
+
2701
+ const resultMsg = failedBatches === 0
2702
+ ? `Successfully migrated ${migrated} memories from testnet to Gnosis mainnet.`
2703
+ : `Migrated ${migrated}/${factsToMigrate.length} memories. ${failedBatches} batch(es) failed — re-run to retry (idempotent).`;
2704
+
2705
+ return {
2706
+ content: [{ type: 'text', text: resultMsg }],
2707
+ details: {
2708
+ mode: 'executed',
2709
+ testnet_facts: testnetFacts.length,
2710
+ already_on_mainnet: alreadyOnMainnet,
2711
+ to_migrate: factsToMigrate.length,
2712
+ migrated,
2713
+ failed_batches: failedBatches,
2714
+ },
2715
+ };
2716
+ } catch (err: unknown) {
2717
+ const message = err instanceof Error ? err.message : String(err);
2718
+ api.logger.error(`totalreclaw_migrate failed: ${message}`);
2719
+ return {
2720
+ content: [{ type: 'text', text: `Migration failed: ${message}` }],
2721
+ };
2722
+ }
2723
+ },
2724
+ },
2725
+ { name: 'totalreclaw_migrate' },
2726
+ );
2727
+
2308
2728
  // ---------------------------------------------------------------
2309
2729
  // Hook: before_agent_start
2310
2730
  // ---------------------------------------------------------------
@@ -2325,21 +2745,36 @@ const plugin = {
2325
2745
 
2326
2746
  await ensureInitialized(api.logger);
2327
2747
 
2328
- // If setup is still needed after init, inject setup instructions
2748
+ // If setup is still needed after init, inject guided onboarding
2329
2749
  if (needsSetup) {
2330
2750
  return {
2331
2751
  prependContext:
2332
- '## TotalReclaw Setup Required\n\n' +
2333
- 'TotalReclaw is installed but needs configuration. Follow the "Post-Install Setup" steps in SKILL.md to complete setup.\n' +
2334
- 'Ask the user: "Do you have an existing TotalReclaw recovery phrase, or should I generate a new one?"',
2752
+ '## TotalReclaw — First-Time Setup\n\n' +
2753
+ 'TotalReclaw is an end-to-end encrypted memory vault. It remembers things across conversations preferences, decisions, project context — all encrypted so no one (not even the server) can read your memories.\n\n' +
2754
+ '**To get started, ask the user:** "Do you have an existing TotalReclaw recovery phrase you\'d like to restore, or should I generate a new one?"\n\n' +
2755
+ '**If new user:** Generate a random 12-word BIP-39 mnemonic and set it as the TOTALRECLAW_RECOVERY_PHRASE environment variable. Display it prominently and warn:\n' +
2756
+ '- "This is your ONLY way to access your memories. Write it down and store it securely."\n' +
2757
+ '- "NEVER use a recovery phrase from an existing crypto wallet — keep this separate from any wallet that holds funds."\n' +
2758
+ '- "If you lose this phrase, your memories are gone forever — there is no recovery."\n\n' +
2759
+ '**If returning user:** Ask them to provide their 12-word phrase, validate it, and set it as TOTALRECLAW_RECOVERY_PHRASE.\n\n' +
2760
+ '**After setup:** Explain that from now on, you will automatically remember important things from conversations and recall relevant memories at the start of each session. The user can also explicitly ask you to remember, recall, forget, or export memories.',
2335
2761
  };
2336
2762
  }
2337
2763
 
2338
- // One-time welcome-back message for returning Pro users.
2764
+ // One-time welcome message (first conversation after setup or returning user)
2339
2765
  let welcomeBack = '';
2340
2766
  if (welcomeBackMessage) {
2341
2767
  welcomeBack = `\n\n${welcomeBackMessage}`;
2342
2768
  welcomeBackMessage = null; // Consume — only show once
2769
+ } else if (firstRunAfterInit) {
2770
+ // First conversation with a configured user — explain what's happening
2771
+ firstRunAfterInit = false;
2772
+ const cache = readBillingCache();
2773
+ const tier = cache?.tier || 'free';
2774
+ const tierInfo = tier === 'pro'
2775
+ ? 'You are on the **Pro** tier — unlimited memories, permanently stored on Gnosis mainnet.'
2776
+ : 'You are on the **Free** tier — memories stored on testnet. Use the totalreclaw_upgrade tool to upgrade to Pro for permanent on-chain storage.';
2777
+ welcomeBack = `\n\nTotalReclaw is active. I will automatically remember important things from our conversations and recall relevant context at the start of each session. ${tierInfo}`;
2343
2778
  }
2344
2779
 
2345
2780
  // Billing cache check — warn if quota is approaching limit.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@totalreclaw/totalreclaw",
3
- "version": "1.4.0",
3
+ "version": "1.6.0",
4
4
  "description": "End-to-end encrypted memory for AI agents — portable, yours forever. Automatic extraction, semantic search, and on-chain storage",
5
5
  "type": "module",
6
6
  "keywords": [