@totalreclaw/totalreclaw 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,7 +19,7 @@ export interface NormalizedFact {
19
19
  tags?: string[];
20
20
  }
21
21
 
22
- export type ImportSource = 'mem0' | 'mcp-memory' | 'memoclaw' | 'generic-json' | 'generic-csv';
22
+ export type ImportSource = 'mem0' | 'mcp-memory' | 'chatgpt' | 'claude' | 'memoclaw' | 'generic-json' | 'generic-csv';
23
23
 
24
24
  /**
25
25
  * What the user passes to the import tool.
@@ -73,15 +73,38 @@ export interface ImportResult {
73
73
  export type ProgressCallback = (progress: {
74
74
  current: number;
75
75
  total: number;
76
- phase: 'fetching' | 'parsing' | 'storing';
76
+ phase: 'fetching' | 'parsing' | 'storing' | 'extracting';
77
77
  message: string;
78
78
  }) => void;
79
79
 
80
+ /**
81
+ * A chunk of conversation messages for LLM-based fact extraction.
82
+ * Adapters that parse conversation data (ChatGPT, Claude) return these
83
+ * instead of pre-extracted facts, delegating extraction to the LLM.
84
+ */
85
+ export interface ConversationChunk {
86
+ /** Human-readable title for progress reporting */
87
+ title: string;
88
+ /** Ordered messages in this chunk */
89
+ messages: Array<{ role: 'user' | 'assistant'; text: string }>;
90
+ /** Original timestamp (ISO 8601) if available */
91
+ timestamp?: string;
92
+ }
93
+
80
94
  /**
81
95
  * Adapter parse result — returned by each adapter's parse method.
96
+ *
97
+ * Adapters return EITHER `facts` (pre-structured sources like Mem0, MCP Memory)
98
+ * OR `chunks` (conversation-based sources like ChatGPT, Claude) that need
99
+ * LLM extraction. The caller checks which field is populated.
82
100
  */
83
101
  export interface AdapterParseResult {
102
+ /** Pre-structured facts (Mem0, MCP Memory adapters) */
84
103
  facts: NormalizedFact[];
104
+ /** Conversation chunks needing LLM extraction (ChatGPT, Claude adapters) */
105
+ chunks: ConversationChunk[];
106
+ /** Total message count across all chunks */
107
+ totalMessages: number;
85
108
  warnings: string[];
86
109
  errors: string[];
87
110
  /** Metadata about the source (for logging) */
package/index.ts CHANGED
@@ -332,6 +332,9 @@ async function getFactCount(logger: OpenClawPluginApi['logger']): Promise<number
332
332
  /** True when recovery phrase is missing — tools return setup instructions. */
333
333
  let needsSetup = false;
334
334
 
335
+ /** True on first before_agent_start after successful init — show welcome message once. */
336
+ let firstRunAfterInit = true;
337
+
335
338
  /**
336
339
  * Derive keys from the recovery phrase, load or create credentials, and
337
340
  * register with the server if this is the first run.
@@ -1287,7 +1290,12 @@ async function storeExtractedFacts(
1287
1290
  /**
1288
1291
  * Handle import_from tool calls in the plugin context.
1289
1292
  *
1290
- * Uses the shared adapters to parse, then stores via storeExtractedFacts().
1293
+ * Two paths:
1294
+ * 1. Pre-structured sources (Mem0, MCP Memory) — adapter returns facts directly,
1295
+ * stored via storeExtractedFacts().
1296
+ * 2. Conversation-based sources (ChatGPT, Claude) — adapter returns conversation
1297
+ * chunks, each chunk is passed through extractFacts() (the same LLM extraction
1298
+ * pipeline used for auto-extraction), then stored via storeExtractedFacts().
1291
1299
  */
1292
1300
  async function handlePluginImportFrom(
1293
1301
  params: Record<string, unknown>,
@@ -1296,7 +1304,7 @@ async function handlePluginImportFrom(
1296
1304
  const startTime = Date.now();
1297
1305
 
1298
1306
  const source = params.source as string;
1299
- const validSources = ['mem0', 'mcp-memory', 'memoclaw', 'generic-json', 'generic-csv'];
1307
+ const validSources = ['mem0', 'mcp-memory', 'chatgpt', 'claude', 'memoclaw', 'generic-json', 'generic-csv'];
1300
1308
 
1301
1309
  if (!source || !validSources.includes(source)) {
1302
1310
  return { success: false, error: `Invalid source. Must be one of: ${validSources.join(', ')}` };
@@ -1314,7 +1322,10 @@ async function handlePluginImportFrom(
1314
1322
  file_path: params.file_path as string | undefined,
1315
1323
  });
1316
1324
 
1317
- if (parseResult.errors.length > 0 && parseResult.facts.length === 0) {
1325
+ const hasChunks = parseResult.chunks && parseResult.chunks.length > 0;
1326
+ const hasFacts = parseResult.facts && parseResult.facts.length > 0;
1327
+
1328
+ if (parseResult.errors.length > 0 && !hasFacts && !hasChunks) {
1318
1329
  return {
1319
1330
  success: false,
1320
1331
  error: `Failed to parse ${adapter.displayName} data`,
@@ -1322,7 +1333,24 @@ async function handlePluginImportFrom(
1322
1333
  };
1323
1334
  }
1324
1335
 
1336
+ // Dry run: report what was parsed (chunks or facts)
1325
1337
  if (params.dry_run) {
1338
+ if (hasChunks) {
1339
+ return {
1340
+ success: true,
1341
+ dry_run: true,
1342
+ source,
1343
+ total_chunks: parseResult.chunks.length,
1344
+ total_messages: parseResult.totalMessages,
1345
+ preview: parseResult.chunks.slice(0, 5).map((c) => ({
1346
+ title: c.title,
1347
+ messages: c.messages.length,
1348
+ first_message: c.messages[0]?.text.slice(0, 100),
1349
+ })),
1350
+ note: 'Chunks will be processed through LLM extraction (same quality as auto-extraction).',
1351
+ warnings: parseResult.warnings,
1352
+ };
1353
+ }
1326
1354
  return {
1327
1355
  success: true,
1328
1356
  dry_run: true,
@@ -1337,7 +1365,12 @@ async function handlePluginImportFrom(
1337
1365
  };
1338
1366
  }
1339
1367
 
1340
- // Convert NormalizedFact[] to ExtractedFact[] for storeExtractedFacts()
1368
+ // ── Path 1: Conversation chunks (ChatGPT, Claude) — LLM extraction ──
1369
+ if (hasChunks) {
1370
+ return handleChunkImport(parseResult.chunks, parseResult.totalMessages, source, logger, startTime, parseResult.warnings);
1371
+ }
1372
+
1373
+ // ── Path 2: Pre-structured facts (Mem0, MCP Memory) — direct store ──
1341
1374
  const extractedFacts: ExtractedFact[] = parseResult.facts.map((f) => ({
1342
1375
  text: f.text,
1343
1376
  type: f.type,
@@ -1376,6 +1409,77 @@ async function handlePluginImportFrom(
1376
1409
  }
1377
1410
  }
1378
1411
 
1412
+ /**
1413
+ * Process conversation chunks through LLM extraction and store results.
1414
+ *
1415
+ * Each chunk is passed to extractFacts() — the same extraction pipeline used
1416
+ * for auto-extraction during live conversations. This ensures import quality
1417
+ * matches conversation extraction quality.
1418
+ */
1419
+ async function handleChunkImport(
1420
+ chunks: import('./import-adapters/types.js').ConversationChunk[],
1421
+ totalMessages: number,
1422
+ source: string,
1423
+ logger: OpenClawPluginApi['logger'],
1424
+ startTime: number,
1425
+ warnings: string[],
1426
+ ): Promise<Record<string, unknown>> {
1427
+ let totalExtracted = 0;
1428
+ let totalStored = 0;
1429
+ let chunksProcessed = 0;
1430
+
1431
+ for (const chunk of chunks) {
1432
+ chunksProcessed++;
1433
+ logger.info(
1434
+ `Import: extracting facts from chunk ${chunksProcessed}/${chunks.length}: "${chunk.title}"`,
1435
+ );
1436
+
1437
+ // Convert chunk messages to the format extractFacts() expects.
1438
+ // extractFacts() takes an array of message-like objects with { role, content }.
1439
+ const messages = chunk.messages.map((m) => ({
1440
+ role: m.role,
1441
+ content: m.text,
1442
+ }));
1443
+
1444
+ // Use 'full' mode to extract ALL valuable memories from the chunk
1445
+ // (not just the last few messages like 'turn' mode does).
1446
+ const facts = await extractFacts(messages, 'full');
1447
+
1448
+ if (facts.length > 0) {
1449
+ totalExtracted += facts.length;
1450
+
1451
+ // Store through the normal pipeline (dedup, encrypt, store)
1452
+ const stored = await storeExtractedFacts(facts, logger);
1453
+ totalStored += stored;
1454
+
1455
+ logger.info(
1456
+ `Import chunk ${chunksProcessed}/${chunks.length}: extracted ${facts.length} facts, stored ${stored}`,
1457
+ );
1458
+ }
1459
+ }
1460
+
1461
+ if (totalExtracted === 0 && chunks.length > 0) {
1462
+ warnings.push(
1463
+ `Processed ${chunks.length} conversation chunks (${totalMessages} messages) but the LLM ` +
1464
+ `did not extract any facts worth storing. This can happen if the conversations are mostly ` +
1465
+ `generic/ephemeral content without personal facts, preferences, or decisions.`,
1466
+ );
1467
+ }
1468
+
1469
+ return {
1470
+ success: totalStored > 0 || totalExtracted > 0,
1471
+ source,
1472
+ import_id: crypto.randomUUID(),
1473
+ total_chunks: chunks.length,
1474
+ total_messages: totalMessages,
1475
+ facts_extracted: totalExtracted,
1476
+ imported: totalStored,
1477
+ skipped: totalExtracted - totalStored,
1478
+ warnings,
1479
+ duration_ms: Date.now() - startTime,
1480
+ };
1481
+ }
1482
+
1379
1483
  // ---------------------------------------------------------------------------
1380
1484
  // Plugin definition
1381
1485
  // ---------------------------------------------------------------------------
@@ -2308,16 +2412,16 @@ const plugin = {
2308
2412
  name: 'totalreclaw_import_from',
2309
2413
  label: 'Import From',
2310
2414
  description:
2311
- 'Import memories from other AI memory tools (Mem0, MCP Memory Server, MemoClaw, or generic JSON/CSV). ' +
2312
- 'Provide the source name and either an API key or file content. ' +
2415
+ 'Import memories from other AI memory tools (Mem0, MCP Memory Server, ChatGPT, Claude, MemoClaw, or generic JSON/CSV). ' +
2416
+ 'Provide the source name and either an API key, file content, or file path. ' +
2313
2417
  'Use dry_run=true to preview before importing. Idempotent — safe to run multiple times.',
2314
2418
  parameters: {
2315
2419
  type: 'object',
2316
2420
  properties: {
2317
2421
  source: {
2318
2422
  type: 'string',
2319
- enum: ['mem0', 'mcp-memory', 'memoclaw', 'generic-json', 'generic-csv'],
2320
- description: 'The source system to import from',
2423
+ enum: ['mem0', 'mcp-memory', 'chatgpt', 'claude', 'memoclaw', 'generic-json', 'generic-csv'],
2424
+ description: 'The source system to import from (chatgpt: conversations.json or memory text; claude: memory text)',
2321
2425
  },
2322
2426
  api_key: {
2323
2427
  type: 'string',
@@ -2641,21 +2745,36 @@ const plugin = {
2641
2745
 
2642
2746
  await ensureInitialized(api.logger);
2643
2747
 
2644
- // If setup is still needed after init, inject setup instructions
2748
+ // If setup is still needed after init, inject guided onboarding
2645
2749
  if (needsSetup) {
2646
2750
  return {
2647
2751
  prependContext:
2648
- '## TotalReclaw Setup Required\n\n' +
2649
- 'TotalReclaw is installed but needs configuration. Follow the "Post-Install Setup" steps in SKILL.md to complete setup.\n' +
2650
- 'Ask the user: "Do you have an existing TotalReclaw recovery phrase, or should I generate a new one?"',
2752
+ '## TotalReclaw — First-Time Setup\n\n' +
2753
+ 'TotalReclaw is an end-to-end encrypted memory vault. It remembers things across conversations preferences, decisions, project context — all encrypted so no one (not even the server) can read your memories.\n\n' +
2754
+ '**To get started, ask the user:** "Do you have an existing TotalReclaw recovery phrase you\'d like to restore, or should I generate a new one?"\n\n' +
2755
+ '**If new user:** Generate a random 12-word BIP-39 mnemonic and set it as the TOTALRECLAW_RECOVERY_PHRASE environment variable. Display it prominently and warn:\n' +
2756
+ '- "This is your ONLY way to access your memories. Write it down and store it securely."\n' +
2757
+ '- "NEVER use a recovery phrase from an existing crypto wallet — keep this separate from any wallet that holds funds."\n' +
2758
+ '- "If you lose this phrase, your memories are gone forever — there is no recovery."\n\n' +
2759
+ '**If returning user:** Ask them to provide their 12-word phrase, validate it, and set it as TOTALRECLAW_RECOVERY_PHRASE.\n\n' +
2760
+ '**After setup:** Explain that from now on, you will automatically remember important things from conversations and recall relevant memories at the start of each session. The user can also explicitly ask you to remember, recall, forget, or export memories.',
2651
2761
  };
2652
2762
  }
2653
2763
 
2654
- // One-time welcome-back message for returning Pro users.
2764
+ // One-time welcome message (first conversation after setup or returning user)
2655
2765
  let welcomeBack = '';
2656
2766
  if (welcomeBackMessage) {
2657
2767
  welcomeBack = `\n\n${welcomeBackMessage}`;
2658
2768
  welcomeBackMessage = null; // Consume — only show once
2769
+ } else if (firstRunAfterInit) {
2770
+ // First conversation with a configured user — explain what's happening
2771
+ firstRunAfterInit = false;
2772
+ const cache = readBillingCache();
2773
+ const tier = cache?.tier || 'free';
2774
+ const tierInfo = tier === 'pro'
2775
+ ? 'You are on the **Pro** tier — unlimited memories, permanently stored on Gnosis mainnet.'
2776
+ : 'You are on the **Free** tier — memories stored on testnet. Use the totalreclaw_upgrade tool to upgrade to Pro for permanent on-chain storage.';
2777
+ welcomeBack = `\n\nTotalReclaw is active. I will automatically remember important things from our conversations and recall relevant context at the start of each session. ${tierInfo}`;
2659
2778
  }
2660
2779
 
2661
2780
  // Billing cache check — warn if quota is approaching limit.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@totalreclaw/totalreclaw",
3
- "version": "1.5.0",
3
+ "version": "1.6.0",
4
4
  "description": "End-to-end encrypted memory for AI agents — portable, yours forever. Automatic extraction, semantic search, and on-chain storage",
5
5
  "type": "module",
6
6
  "keywords": [