mengram-ai 2.7.2 → 2.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -0
- package/index.d.ts +13 -0
- package/index.js +273 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -65,6 +65,27 @@ const procs: Procedure[] = await m.procedures({ query: 'release' });
|
|
|
65
65
|
const all: UnifiedSearchResult = await m.searchAll('issues');
|
|
66
66
|
```
|
|
67
67
|
|
|
68
|
+
## Import Existing Data
|
|
69
|
+
|
|
70
|
+
Kill the cold-start problem — import ChatGPT, Obsidian, or text files (Node.js only, requires `jszip` for ChatGPT):
|
|
71
|
+
|
|
72
|
+
```javascript
|
|
73
|
+
// ChatGPT export
|
|
74
|
+
npm install jszip // one-time dependency
|
|
75
|
+
await m.importChatgpt('~/Downloads/chatgpt-export.zip');
|
|
76
|
+
|
|
77
|
+
// Obsidian vault
|
|
78
|
+
await m.importObsidian('~/Documents/MyVault');
|
|
79
|
+
|
|
80
|
+
// Text/markdown files
|
|
81
|
+
await m.importFiles(['notes.md', 'journal.txt']);
|
|
82
|
+
|
|
83
|
+
// With progress callback
|
|
84
|
+
await m.importChatgpt('export.zip', {
|
|
85
|
+
onProgress: (current, total, title) => console.log(`${current}/${total} ${title}`)
|
|
86
|
+
});
|
|
87
|
+
```
|
|
88
|
+
|
|
68
89
|
## API
|
|
69
90
|
|
|
70
91
|
| Method | Description |
|
|
@@ -83,6 +104,9 @@ const all: UnifiedSearchResult = await m.searchAll('issues');
|
|
|
83
104
|
| `get(name)` | Get specific entity |
|
|
84
105
|
| `delete(name)` | Delete entity |
|
|
85
106
|
| `runAgents(options?)` | Run memory agents |
|
|
107
|
+
| `importChatgpt(zipPath, options?)` | **Import ChatGPT export ZIP** |
|
|
108
|
+
| `importObsidian(vaultPath, options?)` | **Import Obsidian vault** |
|
|
109
|
+
| `importFiles(paths, options?)` | **Import text/markdown files** |
|
|
86
110
|
| `insights()` | AI reflections |
|
|
87
111
|
| `createTeam(name)` | Create shared team |
|
|
88
112
|
| `joinTeam(code)` | Join team |
|
package/index.d.ts
CHANGED
|
@@ -219,6 +219,19 @@ export declare class MengramClient {
|
|
|
219
219
|
getTriggers(userId?: string, options?: { includeFired?: boolean; limit?: number }): Promise<SmartTrigger[]>;
|
|
220
220
|
processTriggers(): Promise<{ processed: number; fired: number; errors: number }>;
|
|
221
221
|
dismissTrigger(triggerId: number): Promise<{ status: string; id: number }>;
|
|
222
|
+
|
|
223
|
+
// Import (v2.9)
|
|
224
|
+
importChatgpt(zipPath: string, options?: { chunkSize?: number; onProgress?: (current: number, total: number, title: string) => void }): Promise<ImportResult>;
|
|
225
|
+
importObsidian(vaultPath: string, options?: { chunkChars?: number; onProgress?: (current: number, total: number, title: string) => void }): Promise<ImportResult>;
|
|
226
|
+
importFiles(paths: string[], options?: { chunkChars?: number; onProgress?: (current: number, total: number, title: string) => void }): Promise<ImportResult>;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
export interface ImportResult {
|
|
230
|
+
conversations_found: number;
|
|
231
|
+
chunks_sent: number;
|
|
232
|
+
entities_created: string[];
|
|
233
|
+
errors: string[];
|
|
234
|
+
duration_seconds: number;
|
|
222
235
|
}
|
|
223
236
|
|
|
224
237
|
export interface SmartTrigger {
|
package/index.js
CHANGED
|
@@ -572,6 +572,279 @@ class MengramClient {
|
|
|
572
572
|
async dismissTrigger(triggerId) {
|
|
573
573
|
return this._request('DELETE', `/v1/triggers/${triggerId}`);
|
|
574
574
|
}
|
|
575
|
+
|
|
576
|
+
// ---- Import ----
|
|
577
|
+
|
|
578
|
+
/**
|
|
579
|
+
* Import ChatGPT export ZIP into memory.
|
|
580
|
+
* Node.js only — reads file from disk.
|
|
581
|
+
* @param {string} zipPath - Path to ChatGPT export ZIP file
|
|
582
|
+
* @param {object} [options]
|
|
583
|
+
* @param {number} [options.chunkSize] - Max messages per chunk (default 20)
|
|
584
|
+
* @param {function} [options.onProgress] - Callback(current, total, title)
|
|
585
|
+
* @returns {Promise<{conversations_found: number, chunks_sent: number, entities_created: string[], errors: string[], duration_seconds: number}>}
|
|
586
|
+
*/
|
|
587
|
+
async importChatgpt(zipPath, options = {}) {
|
|
588
|
+
const fs = await import('fs');
|
|
589
|
+
const path = await import('path');
|
|
590
|
+
const { default: JSZip } = await import('jszip').catch(() => {
|
|
591
|
+
throw new MengramError(
|
|
592
|
+
'jszip is required for ChatGPT import: npm install jszip', 0
|
|
593
|
+
);
|
|
594
|
+
});
|
|
595
|
+
|
|
596
|
+
const start = Date.now();
|
|
597
|
+
const result = { conversations_found: 0, chunks_sent: 0, entities_created: [], errors: [], duration_seconds: 0 };
|
|
598
|
+
const chunkSize = options.chunkSize || 20;
|
|
599
|
+
|
|
600
|
+
try {
|
|
601
|
+
const data = fs.readFileSync(zipPath);
|
|
602
|
+
const zip = await JSZip.loadAsync(data);
|
|
603
|
+
const convFile = Object.keys(zip.files).find(n => n.endsWith('conversations.json'));
|
|
604
|
+
if (!convFile) throw new Error('No conversations.json found in ZIP');
|
|
605
|
+
|
|
606
|
+
const convData = JSON.parse(await zip.files[convFile].async('string'));
|
|
607
|
+
if (!Array.isArray(convData)) throw new Error('conversations.json should contain a list');
|
|
608
|
+
|
|
609
|
+
// Parse conversations from tree structure
|
|
610
|
+
const conversations = [];
|
|
611
|
+
for (const conv of convData) {
|
|
612
|
+
const mapping = conv.mapping || {};
|
|
613
|
+
const messages = this._walkChatgptTree(mapping);
|
|
614
|
+
if (messages.length > 0) conversations.push(messages);
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
result.conversations_found = conversations.length;
|
|
618
|
+
|
|
619
|
+
let chunkIdx = 0;
|
|
620
|
+
const totalChunks = conversations.reduce((sum, conv) =>
|
|
621
|
+
sum + Math.ceil(conv.length / chunkSize), 0);
|
|
622
|
+
|
|
623
|
+
for (let i = 0; i < conversations.length; i++) {
|
|
624
|
+
const conv = conversations[i];
|
|
625
|
+
for (let j = 0; j < conv.length; j += chunkSize) {
|
|
626
|
+
const chunk = conv.slice(j, j + chunkSize);
|
|
627
|
+
try {
|
|
628
|
+
await this.add(chunk);
|
|
629
|
+
result.chunks_sent++;
|
|
630
|
+
chunkIdx++;
|
|
631
|
+
if (options.onProgress) {
|
|
632
|
+
options.onProgress(chunkIdx, totalChunks, `conversation ${i + 1}/${conversations.length}`);
|
|
633
|
+
}
|
|
634
|
+
} catch (e) {
|
|
635
|
+
result.errors.push(`Conversation ${i + 1}: ${e.message}`);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
} catch (e) {
|
|
640
|
+
if (!result.errors.length) result.errors.push(e.message);
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
result.duration_seconds = (Date.now() - start) / 1000;
|
|
644
|
+
return result;
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
/**
|
|
648
|
+
* Import Obsidian vault into memory.
|
|
649
|
+
* Node.js only — reads files from disk.
|
|
650
|
+
* @param {string} vaultPath - Path to Obsidian vault directory
|
|
651
|
+
* @param {object} [options]
|
|
652
|
+
* @param {number} [options.chunkChars] - Max characters per chunk (default 4000)
|
|
653
|
+
* @param {function} [options.onProgress] - Callback(current, total, title)
|
|
654
|
+
* @returns {Promise<{conversations_found: number, chunks_sent: number, entities_created: string[], errors: string[], duration_seconds: number}>}
|
|
655
|
+
*/
|
|
656
|
+
async importObsidian(vaultPath, options = {}) {
|
|
657
|
+
const fs = await import('fs');
|
|
658
|
+
const path = await import('path');
|
|
659
|
+
|
|
660
|
+
const start = Date.now();
|
|
661
|
+
const result = { conversations_found: 0, chunks_sent: 0, entities_created: [], errors: [], duration_seconds: 0 };
|
|
662
|
+
const chunkChars = options.chunkChars || 4000;
|
|
663
|
+
|
|
664
|
+
const mdFiles = this._findMdFiles(fs, path, vaultPath);
|
|
665
|
+
result.conversations_found = mdFiles.length;
|
|
666
|
+
|
|
667
|
+
const fileChunks = mdFiles.map(f => {
|
|
668
|
+
try {
|
|
669
|
+
const content = fs.readFileSync(f, 'utf-8');
|
|
670
|
+
return { file: f, chunks: this._chunkText(content, chunkChars) };
|
|
671
|
+
} catch {
|
|
672
|
+
return { file: f, chunks: [] };
|
|
673
|
+
}
|
|
674
|
+
});
|
|
675
|
+
|
|
676
|
+
const totalChunks = fileChunks.reduce((sum, fc) => sum + Math.max(fc.chunks.length, 1), 0);
|
|
677
|
+
let chunkIdx = 0;
|
|
678
|
+
|
|
679
|
+
for (const { file, chunks } of fileChunks) {
|
|
680
|
+
const title = path.basename(file, '.md');
|
|
681
|
+
if (!chunks.length) { chunkIdx++; continue; }
|
|
682
|
+
|
|
683
|
+
for (const chunk of chunks) {
|
|
684
|
+
try {
|
|
685
|
+
await this.add([{ role: 'user', content: `Note: ${title}\n\n${chunk}` }]);
|
|
686
|
+
result.chunks_sent++;
|
|
687
|
+
chunkIdx++;
|
|
688
|
+
if (options.onProgress) options.onProgress(chunkIdx, totalChunks, title);
|
|
689
|
+
} catch (e) {
|
|
690
|
+
result.errors.push(`${title}: ${e.message}`);
|
|
691
|
+
chunkIdx++;
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
result.duration_seconds = (Date.now() - start) / 1000;
|
|
697
|
+
return result;
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
/**
|
|
701
|
+
* Import text/markdown files into memory.
|
|
702
|
+
* Node.js only — reads files from disk.
|
|
703
|
+
* @param {string[]} paths - File paths
|
|
704
|
+
* @param {object} [options]
|
|
705
|
+
* @param {number} [options.chunkChars] - Max characters per chunk (default 4000)
|
|
706
|
+
* @param {function} [options.onProgress] - Callback(current, total, title)
|
|
707
|
+
* @returns {Promise<{conversations_found: number, chunks_sent: number, entities_created: string[], errors: string[], duration_seconds: number}>}
|
|
708
|
+
*/
|
|
709
|
+
async importFiles(paths, options = {}) {
|
|
710
|
+
const fs = await import('fs');
|
|
711
|
+
const path = await import('path');
|
|
712
|
+
|
|
713
|
+
const start = Date.now();
|
|
714
|
+
const result = { conversations_found: 0, chunks_sent: 0, entities_created: [], errors: [], duration_seconds: 0 };
|
|
715
|
+
const chunkChars = options.chunkChars || 4000;
|
|
716
|
+
|
|
717
|
+
// Resolve paths — expand directories
|
|
718
|
+
const resolved = [];
|
|
719
|
+
for (const p of paths) {
|
|
720
|
+
try {
|
|
721
|
+
const stat = fs.statSync(p);
|
|
722
|
+
if (stat.isFile()) {
|
|
723
|
+
resolved.push(p);
|
|
724
|
+
} else if (stat.isDirectory()) {
|
|
725
|
+
const files = this._findMdFiles(fs, path, p);
|
|
726
|
+
// Also include .txt files
|
|
727
|
+
const txtFiles = fs.readdirSync(p, { recursive: true })
|
|
728
|
+
.filter(f => f.endsWith('.txt'))
|
|
729
|
+
.map(f => path.join(p, f));
|
|
730
|
+
resolved.push(...files, ...txtFiles);
|
|
731
|
+
}
|
|
732
|
+
} catch { /* skip missing */ }
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
result.conversations_found = resolved.length;
|
|
736
|
+
|
|
737
|
+
const fileChunks = resolved.map(f => {
|
|
738
|
+
try {
|
|
739
|
+
const content = fs.readFileSync(f, 'utf-8');
|
|
740
|
+
return { file: f, chunks: this._chunkText(content, chunkChars) };
|
|
741
|
+
} catch {
|
|
742
|
+
return { file: f, chunks: [] };
|
|
743
|
+
}
|
|
744
|
+
});
|
|
745
|
+
|
|
746
|
+
const totalChunks = fileChunks.reduce((sum, fc) => sum + Math.max(fc.chunks.length, 1), 0);
|
|
747
|
+
let chunkIdx = 0;
|
|
748
|
+
|
|
749
|
+
for (const { file, chunks } of fileChunks) {
|
|
750
|
+
const title = path.basename(file, path.extname(file));
|
|
751
|
+
if (!chunks.length) { chunkIdx++; continue; }
|
|
752
|
+
|
|
753
|
+
for (const chunk of chunks) {
|
|
754
|
+
try {
|
|
755
|
+
await this.add([{ role: 'user', content: `Note: ${title}\n\n${chunk}` }]);
|
|
756
|
+
result.chunks_sent++;
|
|
757
|
+
chunkIdx++;
|
|
758
|
+
if (options.onProgress) options.onProgress(chunkIdx, totalChunks, title);
|
|
759
|
+
} catch (e) {
|
|
760
|
+
result.errors.push(`${title}: ${e.message}`);
|
|
761
|
+
chunkIdx++;
|
|
762
|
+
}
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
result.duration_seconds = (Date.now() - start) / 1000;
|
|
767
|
+
return result;
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
// ---- Internal helpers for import ----
|
|
771
|
+
|
|
772
|
+
/** Walk ChatGPT's tree-structured mapping to extract ordered messages. */
|
|
773
|
+
_walkChatgptTree(mapping) {
|
|
774
|
+
if (!mapping || !Object.keys(mapping).length) return [];
|
|
775
|
+
let rootId = null;
|
|
776
|
+
for (const [id, node] of Object.entries(mapping)) {
|
|
777
|
+
if (!node.parent) { rootId = id; break; }
|
|
778
|
+
}
|
|
779
|
+
if (!rootId) return [];
|
|
780
|
+
|
|
781
|
+
const messages = [];
|
|
782
|
+
let currentId = rootId;
|
|
783
|
+
while (currentId) {
|
|
784
|
+
const node = mapping[currentId];
|
|
785
|
+
if (!node) break;
|
|
786
|
+
const msg = node.message;
|
|
787
|
+
if (msg && msg.content) {
|
|
788
|
+
const role = (msg.author || {}).role || '';
|
|
789
|
+
const contentData = msg.content;
|
|
790
|
+
let text = '';
|
|
791
|
+
if (typeof contentData === 'string') {
|
|
792
|
+
text = contentData;
|
|
793
|
+
} else if (contentData.parts) {
|
|
794
|
+
text = contentData.parts
|
|
795
|
+
.map(p => typeof p === 'string' ? p : (p && p.text) || '')
|
|
796
|
+
.join('');
|
|
797
|
+
}
|
|
798
|
+
text = text.trim();
|
|
799
|
+
if (text && (role === 'user' || role === 'assistant')) {
|
|
800
|
+
messages.push({ role, content: text });
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
const children = node.children || [];
|
|
804
|
+
currentId = children[0] || null;
|
|
805
|
+
}
|
|
806
|
+
return messages;
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
/** Find .md files recursively, skipping dotfiles and .obsidian/. */
|
|
810
|
+
_findMdFiles(fs, path, dir) {
|
|
811
|
+
const results = [];
|
|
812
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
813
|
+
for (const entry of entries) {
|
|
814
|
+
if (entry.name.startsWith('.')) continue;
|
|
815
|
+
if (entry.name === 'node_modules' || entry.name === '__pycache__') continue;
|
|
816
|
+
const full = path.join(dir, entry.name);
|
|
817
|
+
if (entry.isDirectory()) {
|
|
818
|
+
results.push(...this._findMdFiles(fs, path, full));
|
|
819
|
+
} else if (entry.name.endsWith('.md')) {
|
|
820
|
+
results.push(full);
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
return results.sort();
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
/** Split text into chunks at paragraph boundaries. */
|
|
827
|
+
_chunkText(text, chunkChars) {
|
|
828
|
+
text = (text || '').trim();
|
|
829
|
+
if (!text) return [];
|
|
830
|
+
if (text.length <= chunkChars) return [text];
|
|
831
|
+
|
|
832
|
+
const paragraphs = text.split('\n\n');
|
|
833
|
+
const chunks = [];
|
|
834
|
+
let current = '';
|
|
835
|
+
|
|
836
|
+
for (const para of paragraphs) {
|
|
837
|
+
const p = para.trim();
|
|
838
|
+
if (!p) continue;
|
|
839
|
+
if (current.length + p.length + 2 > chunkChars && current) {
|
|
840
|
+
chunks.push(current.trim());
|
|
841
|
+
current = '';
|
|
842
|
+
}
|
|
843
|
+
current += p + '\n\n';
|
|
844
|
+
}
|
|
845
|
+
if (current.trim()) chunks.push(current.trim());
|
|
846
|
+
return chunks;
|
|
847
|
+
}
|
|
575
848
|
}
|
|
576
849
|
|
|
577
850
|
class MengramError extends Error {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mengram-ai",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.9.0",
|
|
4
4
|
"description": "Human-like memory for AI — semantic, episodic & procedural memory. Experience-driven procedures, Cognitive Profile, unified search, memory agents. Free Mem0 alternative.",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"types": "index.d.ts",
|