@memrosetta/cli 0.5.0 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/dist/chunk-47SU2YUJ.js +64 -0
  2. package/dist/chunk-4LNXT25H.js +891 -0
  3. package/dist/chunk-C4ANKSCI.js +151 -0
  4. package/dist/chunk-CEHRM6IW.js +151 -0
  5. package/dist/chunk-G2W4YK2T.js +56 -0
  6. package/dist/chunk-GGXC7TAJ.js +139 -0
  7. package/dist/chunk-GRNZVSAF.js +56 -0
  8. package/dist/chunk-GZINXXM4.js +139 -0
  9. package/dist/chunk-RZFCVYTK.js +71 -0
  10. package/dist/chunk-US6CEDMU.js +66 -0
  11. package/dist/chunk-VMGX5FCY.js +64 -0
  12. package/dist/chunk-WYHEAKPC.js +71 -0
  13. package/dist/clear-32Y3U2WR.js +39 -0
  14. package/dist/clear-AFEJPCDA.js +39 -0
  15. package/dist/compress-CL5D4VVJ.js +33 -0
  16. package/dist/compress-UUEO7WCU.js +33 -0
  17. package/dist/count-U2ML5ZON.js +24 -0
  18. package/dist/count-VVOGYSM7.js +24 -0
  19. package/dist/duplicates-CEJ7WSGW.js +149 -0
  20. package/dist/duplicates-IBUS7CJS.js +149 -0
  21. package/dist/enforce-T7AS4PVD.js +381 -0
  22. package/dist/enforce-TC5SDPEZ.js +381 -0
  23. package/dist/feedback-3PJTTEOD.js +51 -0
  24. package/dist/feedback-IB7BHIRP.js +51 -0
  25. package/dist/get-TQ2U7HCD.js +30 -0
  26. package/dist/get-WPZIHQKW.js +30 -0
  27. package/dist/hooks/enforce-codex.js +88 -0
  28. package/dist/hooks/on-prompt.js +3 -3
  29. package/dist/hooks/on-stop.js +3 -3
  30. package/dist/index.js +30 -20
  31. package/dist/ingest-37UXPVT5.js +97 -0
  32. package/dist/ingest-TPQRH34A.js +97 -0
  33. package/dist/init-6YQL3RCQ.js +210 -0
  34. package/dist/init-ISP73KEC.js +210 -0
  35. package/dist/init-LHXRCCLX.js +210 -0
  36. package/dist/invalidate-ER2TFFWK.js +40 -0
  37. package/dist/invalidate-PVHUGAJ6.js +40 -0
  38. package/dist/maintain-NICAXFK6.js +37 -0
  39. package/dist/maintain-Q553GBSF.js +37 -0
  40. package/dist/migrate-CZL3YNQK.js +255 -0
  41. package/dist/migrate-FI26FSBP.js +255 -0
  42. package/dist/relate-5TN2WEG3.js +57 -0
  43. package/dist/relate-KLBMYWB3.js +57 -0
  44. package/dist/reset-IPOAKTJM.js +132 -0
  45. package/dist/reset-P62B444X.js +132 -0
  46. package/dist/search-AYZBKRXF.js +48 -0
  47. package/dist/search-JQ3MLRKS.js +48 -0
  48. package/dist/status-FWHUUZ4R.js +184 -0
  49. package/dist/status-JF2V7ZBX.js +184 -0
  50. package/dist/status-UV66PWUD.js +184 -0
  51. package/dist/store-AAJCT3PX.js +101 -0
  52. package/dist/store-OVDS57U5.js +101 -0
  53. package/dist/sync-56KJTKE7.js +542 -0
  54. package/dist/sync-BCKBYRXY.js +542 -0
  55. package/dist/working-memory-CJARSGEK.js +53 -0
  56. package/dist/working-memory-Z3RUGSTQ.js +53 -0
  57. package/package.json +6 -5
@@ -0,0 +1,255 @@
1
+ import {
2
+ hasFlag,
3
+ optionalOption
4
+ } from "./chunk-US6CEDMU.js";
5
+ import {
6
+ resolveDbPath
7
+ } from "./chunk-47SU2YUJ.js";
8
+ import {
9
+ output,
10
+ outputError
11
+ } from "./chunk-ET6TNQOJ.js";
12
+ import {
13
+ resolveCanonicalUserId
14
+ } from "./chunk-WYHEAKPC.js";
15
+
16
+ // src/commands/migrate.ts
17
+ import { createInterface } from "readline";
18
+ var MIGRATION_NAME = "legacy-user-id-to-canonical-v1";
19
+ function scanLegacyImpact(db, canonicalUserId) {
20
+ const totalRows = db.prepare("SELECT COUNT(*) AS c FROM memories").get().c;
21
+ const legacyRows = db.prepare("SELECT COUNT(*) AS c FROM memories WHERE user_id != ?").get(canonicalUserId).c;
22
+ const breakdownRows = db.prepare(
23
+ `SELECT user_id AS legacyUserId, COUNT(*) AS rows, COUNT(DISTINCT namespace) AS distinctNamespaces
24
+ FROM memories
25
+ WHERE user_id != ?
26
+ GROUP BY user_id
27
+ ORDER BY rows DESC`
28
+ ).all(canonicalUserId);
29
+ const queuePending = hasTable(db, "sync_outbox") ? db.prepare("SELECT COUNT(*) AS c FROM sync_outbox WHERE pushed_at IS NULL").get().c : 0;
30
+ const crossPartitionDuplicateGroups = db.prepare(
31
+ `WITH x AS (
32
+ SELECT content, COUNT(DISTINCT user_id) AS u
33
+ FROM memories
34
+ GROUP BY content
35
+ )
36
+ SELECT COUNT(*) AS c FROM x WHERE u > 1`
37
+ ).get().c;
38
+ const alreadyMigrated = hasTable(db, "migration_version") ? Boolean(
39
+ db.prepare("SELECT 1 FROM migration_version WHERE name = ?").get(MIGRATION_NAME)
40
+ ) : false;
41
+ return {
42
+ canonicalUserId,
43
+ totalRows,
44
+ legacyRows,
45
+ distinctLegacyUserIds: breakdownRows.length,
46
+ breakdown: breakdownRows,
47
+ queuePending,
48
+ crossPartitionDuplicateGroups,
49
+ alreadyMigrated
50
+ };
51
+ }
52
+ function hasTable(db, name) {
53
+ const row = db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name = ?").get(name);
54
+ return Boolean(row);
55
+ }
56
+ function runLegacyUserIdMigration(db, canonicalUserId) {
57
+ const run2 = db.transaction(() => {
58
+ const insert = db.prepare(
59
+ `INSERT OR IGNORE INTO memory_legacy_scope (
60
+ memory_id, legacy_user_id, legacy_namespace, migrated_at
61
+ )
62
+ SELECT memory_id, user_id, namespace, CURRENT_TIMESTAMP
63
+ FROM memories
64
+ WHERE user_id != ?`
65
+ );
66
+ const insertInfo = insert.run(canonicalUserId);
67
+ const legacyScopeRows = insertInfo.changes;
68
+ const update = db.prepare(
69
+ "UPDATE memories SET user_id = ? WHERE user_id != ?"
70
+ );
71
+ const updateInfo = update.run(canonicalUserId, canonicalUserId);
72
+ const movedRows = updateInfo.changes;
73
+ let outboxCleared = 0;
74
+ let inboxCleared = 0;
75
+ if (hasTable(db, "sync_outbox")) {
76
+ outboxCleared = db.prepare("DELETE FROM sync_outbox").run().changes;
77
+ }
78
+ if (hasTable(db, "sync_inbox")) {
79
+ inboxCleared = db.prepare("DELETE FROM sync_inbox").run().changes;
80
+ }
81
+ let cursorReset = false;
82
+ if (hasTable(db, "sync_state")) {
83
+ const r = db.prepare(
84
+ `DELETE FROM sync_state WHERE key IN (
85
+ 'last_cursor',
86
+ 'pull_cursor',
87
+ 'last_push_attempt_at',
88
+ 'last_push_success_at',
89
+ 'last_pull_attempt_at',
90
+ 'last_pull_success_at'
91
+ )`
92
+ ).run();
93
+ cursorReset = r.changes > 0;
94
+ }
95
+ db.prepare(
96
+ `INSERT OR IGNORE INTO migration_version (name, applied_at)
97
+ VALUES (?, CURRENT_TIMESTAMP)`
98
+ ).run(MIGRATION_NAME);
99
+ return {
100
+ movedRows,
101
+ legacyScopeRows,
102
+ outboxCleared,
103
+ inboxCleared,
104
+ cursorReset
105
+ };
106
+ });
107
+ return run2();
108
+ }
109
+ async function confirmInteractive(question) {
110
+ if (!process.stdin.isTTY) return false;
111
+ const rl = createInterface({ input: process.stdin, output: process.stdout });
112
+ try {
113
+ const answer = await new Promise((resolve) => {
114
+ rl.question(`${question} [y/N] `, (a) => resolve(a));
115
+ });
116
+ return /^y(es)?$/i.test(answer.trim());
117
+ } finally {
118
+ rl.close();
119
+ }
120
+ }
121
+ async function run(options) {
122
+ const { args, format, db: dbOverride } = options;
123
+ const sub = args[0];
124
+ if (sub !== "legacy-user-ids") {
125
+ outputError(
126
+ "Usage: memrosetta migrate legacy-user-ids [--dry-run] [--canonical <user>] [--yes]",
127
+ format
128
+ );
129
+ process.exitCode = 1;
130
+ return;
131
+ }
132
+ const sliced = args.slice(1);
133
+ const dryRun = hasFlag(sliced, "--dry-run");
134
+ const autoYes = hasFlag(sliced, "--yes") || hasFlag(sliced, "-y");
135
+ const canonicalOverride = optionalOption(sliced, "--canonical");
136
+ const canonicalUserId = resolveCanonicalUserId(canonicalOverride ?? null);
137
+ const dbPath = resolveDbPath(dbOverride);
138
+ const { default: Database } = await import("better-sqlite3");
139
+ const db = new Database(dbPath);
140
+ try {
141
+ const { ensureSchema } = await import("@memrosetta/core");
142
+ ensureSchema(db, { vectorEnabled: false });
143
+ const report = scanLegacyImpact(db, canonicalUserId);
144
+ if (report.alreadyMigrated && report.legacyRows === 0) {
145
+ output(
146
+ {
147
+ status: "noop",
148
+ reason: `migration ${MIGRATION_NAME} already applied and no legacy rows remain`,
149
+ report
150
+ },
151
+ format
152
+ );
153
+ return;
154
+ }
155
+ if (report.legacyRows === 0) {
156
+ output(
157
+ {
158
+ status: "noop",
159
+ reason: "no legacy user_id partitions found",
160
+ report
161
+ },
162
+ format
163
+ );
164
+ return;
165
+ }
166
+ if (dryRun) {
167
+ output(
168
+ {
169
+ status: "dry-run",
170
+ canonicalUserId,
171
+ report,
172
+ wouldClear: {
173
+ syncOutbox: true,
174
+ syncInbox: true,
175
+ cursorState: true
176
+ },
177
+ nextSteps: [
178
+ "Run without --dry-run to apply the migration.",
179
+ "After migration: `memrosetta sync backfill` then `memrosetta sync now`."
180
+ ]
181
+ },
182
+ format
183
+ );
184
+ return;
185
+ }
186
+ if (!autoYes) {
187
+ printImpactPreview(report, canonicalUserId);
188
+ const ok = await confirmInteractive(
189
+ `Apply migration and move ${report.legacyRows} row(s) onto '${canonicalUserId}'?`
190
+ );
191
+ if (!ok) {
192
+ output(
193
+ {
194
+ status: "aborted",
195
+ reason: "user declined or non-interactive session (pass --yes to skip prompt)",
196
+ report
197
+ },
198
+ format
199
+ );
200
+ return;
201
+ }
202
+ }
203
+ const result = runLegacyUserIdMigration(db, canonicalUserId);
204
+ output(
205
+ {
206
+ status: "applied",
207
+ canonicalUserId,
208
+ migration: MIGRATION_NAME,
209
+ result,
210
+ nextSteps: [
211
+ `Run \`memrosetta sync backfill --user ${canonicalUserId}\` to republish memories onto the canonical partition.`,
212
+ "Then `memrosetta sync now` to push them to the hub.",
213
+ "Run `memrosetta duplicates report` to audit cross-partition duplicates before any future dedupe pass."
214
+ ]
215
+ },
216
+ format
217
+ );
218
+ } finally {
219
+ db.close();
220
+ }
221
+ }
222
+ function printImpactPreview(report, canonicalUserId) {
223
+ process.stderr.write(
224
+ [
225
+ "",
226
+ "Migration impact preview",
227
+ "------------------------",
228
+ ` canonical user : ${canonicalUserId}`,
229
+ ` total memories : ${report.totalRows}`,
230
+ ` legacy rows to move : ${report.legacyRows}`,
231
+ ` distinct legacy partitions: ${report.distinctLegacyUserIds}`,
232
+ ` sync_outbox pending : ${report.queuePending}`,
233
+ ` cross-partition dup groups: ${report.crossPartitionDuplicateGroups}`,
234
+ "",
235
+ "Top legacy partitions:",
236
+ ...report.breakdown.slice(0, 10).map(
237
+ (r) => ` - ${r.legacyUserId.padEnd(40)} rows=${r.rows} namespaces=${r.distinctNamespaces}`
238
+ ),
239
+ "",
240
+ "This will:",
241
+ " * copy legacy rows into memory_legacy_scope (non-destructive)",
242
+ " * rewrite memories.user_id to the canonical user",
243
+ " * leave memories.namespace untouched",
244
+ " * clear sync_outbox / sync_inbox / sync cursor state",
245
+ "",
246
+ "Back up ~/.memrosetta/memories.db before continuing if you have not already.",
247
+ ""
248
+ ].join("\n")
249
+ );
250
+ }
251
+ export {
252
+ run,
253
+ runLegacyUserIdMigration,
254
+ scanLegacyImpact
255
+ };
@@ -0,0 +1,255 @@
1
+ import {
2
+ hasFlag,
3
+ optionalOption
4
+ } from "./chunk-US6CEDMU.js";
5
+ import {
6
+ resolveDbPath
7
+ } from "./chunk-VMGX5FCY.js";
8
+ import {
9
+ output,
10
+ outputError
11
+ } from "./chunk-ET6TNQOJ.js";
12
+ import {
13
+ resolveCanonicalUserId
14
+ } from "./chunk-RZFCVYTK.js";
15
+
16
+ // src/commands/migrate.ts
17
+ import { createInterface } from "readline";
18
+ var MIGRATION_NAME = "legacy-user-id-to-canonical-v1";
19
+ function scanLegacyImpact(db, canonicalUserId) {
20
+ const totalRows = db.prepare("SELECT COUNT(*) AS c FROM memories").get().c;
21
+ const legacyRows = db.prepare("SELECT COUNT(*) AS c FROM memories WHERE user_id != ?").get(canonicalUserId).c;
22
+ const breakdownRows = db.prepare(
23
+ `SELECT user_id AS legacyUserId, COUNT(*) AS rows, COUNT(DISTINCT namespace) AS distinctNamespaces
24
+ FROM memories
25
+ WHERE user_id != ?
26
+ GROUP BY user_id
27
+ ORDER BY rows DESC`
28
+ ).all(canonicalUserId);
29
+ const queuePending = hasTable(db, "sync_outbox") ? db.prepare("SELECT COUNT(*) AS c FROM sync_outbox WHERE pushed_at IS NULL").get().c : 0;
30
+ const crossPartitionDuplicateGroups = db.prepare(
31
+ `WITH x AS (
32
+ SELECT content, COUNT(DISTINCT user_id) AS u
33
+ FROM memories
34
+ GROUP BY content
35
+ )
36
+ SELECT COUNT(*) AS c FROM x WHERE u > 1`
37
+ ).get().c;
38
+ const alreadyMigrated = hasTable(db, "migration_version") ? Boolean(
39
+ db.prepare("SELECT 1 FROM migration_version WHERE name = ?").get(MIGRATION_NAME)
40
+ ) : false;
41
+ return {
42
+ canonicalUserId,
43
+ totalRows,
44
+ legacyRows,
45
+ distinctLegacyUserIds: breakdownRows.length,
46
+ breakdown: breakdownRows,
47
+ queuePending,
48
+ crossPartitionDuplicateGroups,
49
+ alreadyMigrated
50
+ };
51
+ }
52
+ function hasTable(db, name) {
53
+ const row = db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name = ?").get(name);
54
+ return Boolean(row);
55
+ }
56
+ function runLegacyUserIdMigration(db, canonicalUserId) {
57
+ const run2 = db.transaction(() => {
58
+ const insert = db.prepare(
59
+ `INSERT OR IGNORE INTO memory_legacy_scope (
60
+ memory_id, legacy_user_id, legacy_namespace, migrated_at
61
+ )
62
+ SELECT memory_id, user_id, namespace, CURRENT_TIMESTAMP
63
+ FROM memories
64
+ WHERE user_id != ?`
65
+ );
66
+ const insertInfo = insert.run(canonicalUserId);
67
+ const legacyScopeRows = insertInfo.changes;
68
+ const update = db.prepare(
69
+ "UPDATE memories SET user_id = ? WHERE user_id != ?"
70
+ );
71
+ const updateInfo = update.run(canonicalUserId, canonicalUserId);
72
+ const movedRows = updateInfo.changes;
73
+ let outboxCleared = 0;
74
+ let inboxCleared = 0;
75
+ if (hasTable(db, "sync_outbox")) {
76
+ outboxCleared = db.prepare("DELETE FROM sync_outbox").run().changes;
77
+ }
78
+ if (hasTable(db, "sync_inbox")) {
79
+ inboxCleared = db.prepare("DELETE FROM sync_inbox").run().changes;
80
+ }
81
+ let cursorReset = false;
82
+ if (hasTable(db, "sync_state")) {
83
+ const r = db.prepare(
84
+ `DELETE FROM sync_state WHERE key IN (
85
+ 'last_cursor',
86
+ 'pull_cursor',
87
+ 'last_push_attempt_at',
88
+ 'last_push_success_at',
89
+ 'last_pull_attempt_at',
90
+ 'last_pull_success_at'
91
+ )`
92
+ ).run();
93
+ cursorReset = r.changes > 0;
94
+ }
95
+ db.prepare(
96
+ `INSERT OR IGNORE INTO migration_version (name, applied_at)
97
+ VALUES (?, CURRENT_TIMESTAMP)`
98
+ ).run(MIGRATION_NAME);
99
+ return {
100
+ movedRows,
101
+ legacyScopeRows,
102
+ outboxCleared,
103
+ inboxCleared,
104
+ cursorReset
105
+ };
106
+ });
107
+ return run2();
108
+ }
109
+ async function confirmInteractive(question) {
110
+ if (!process.stdin.isTTY) return false;
111
+ const rl = createInterface({ input: process.stdin, output: process.stdout });
112
+ try {
113
+ const answer = await new Promise((resolve) => {
114
+ rl.question(`${question} [y/N] `, (a) => resolve(a));
115
+ });
116
+ return /^y(es)?$/i.test(answer.trim());
117
+ } finally {
118
+ rl.close();
119
+ }
120
+ }
121
+ async function run(options) {
122
+ const { args, format, db: dbOverride } = options;
123
+ const sub = args[0];
124
+ if (sub !== "legacy-user-ids") {
125
+ outputError(
126
+ "Usage: memrosetta migrate legacy-user-ids [--dry-run] [--canonical <user>] [--yes]",
127
+ format
128
+ );
129
+ process.exitCode = 1;
130
+ return;
131
+ }
132
+ const sliced = args.slice(1);
133
+ const dryRun = hasFlag(sliced, "--dry-run");
134
+ const autoYes = hasFlag(sliced, "--yes") || hasFlag(sliced, "-y");
135
+ const canonicalOverride = optionalOption(sliced, "--canonical");
136
+ const canonicalUserId = resolveCanonicalUserId(canonicalOverride ?? null);
137
+ const dbPath = resolveDbPath(dbOverride);
138
+ const { default: Database } = await import("better-sqlite3");
139
+ const db = new Database(dbPath);
140
+ try {
141
+ const { ensureSchema } = await import("@memrosetta/core");
142
+ ensureSchema(db, { vectorEnabled: false });
143
+ const report = scanLegacyImpact(db, canonicalUserId);
144
+ if (report.alreadyMigrated && report.legacyRows === 0) {
145
+ output(
146
+ {
147
+ status: "noop",
148
+ reason: `migration ${MIGRATION_NAME} already applied and no legacy rows remain`,
149
+ report
150
+ },
151
+ format
152
+ );
153
+ return;
154
+ }
155
+ if (report.legacyRows === 0) {
156
+ output(
157
+ {
158
+ status: "noop",
159
+ reason: "no legacy user_id partitions found",
160
+ report
161
+ },
162
+ format
163
+ );
164
+ return;
165
+ }
166
+ if (dryRun) {
167
+ output(
168
+ {
169
+ status: "dry-run",
170
+ canonicalUserId,
171
+ report,
172
+ wouldClear: {
173
+ syncOutbox: true,
174
+ syncInbox: true,
175
+ cursorState: true
176
+ },
177
+ nextSteps: [
178
+ "Run without --dry-run to apply the migration.",
179
+ "After migration: `memrosetta sync backfill` then `memrosetta sync now`."
180
+ ]
181
+ },
182
+ format
183
+ );
184
+ return;
185
+ }
186
+ if (!autoYes) {
187
+ printImpactPreview(report, canonicalUserId);
188
+ const ok = await confirmInteractive(
189
+ `Apply migration and move ${report.legacyRows} row(s) onto '${canonicalUserId}'?`
190
+ );
191
+ if (!ok) {
192
+ output(
193
+ {
194
+ status: "aborted",
195
+ reason: "user declined or non-interactive session (pass --yes to skip prompt)",
196
+ report
197
+ },
198
+ format
199
+ );
200
+ return;
201
+ }
202
+ }
203
+ const result = runLegacyUserIdMigration(db, canonicalUserId);
204
+ output(
205
+ {
206
+ status: "applied",
207
+ canonicalUserId,
208
+ migration: MIGRATION_NAME,
209
+ result,
210
+ nextSteps: [
211
+ `Run \`memrosetta sync backfill --user ${canonicalUserId}\` to republish memories onto the canonical partition.`,
212
+ "Then `memrosetta sync now` to push them to the hub.",
213
+ "Run `memrosetta duplicates report` to audit cross-partition duplicates before any future dedupe pass."
214
+ ]
215
+ },
216
+ format
217
+ );
218
+ } finally {
219
+ db.close();
220
+ }
221
+ }
222
+ function printImpactPreview(report, canonicalUserId) {
223
+ process.stderr.write(
224
+ [
225
+ "",
226
+ "Migration impact preview",
227
+ "------------------------",
228
+ ` canonical user : ${canonicalUserId}`,
229
+ ` total memories : ${report.totalRows}`,
230
+ ` legacy rows to move : ${report.legacyRows}`,
231
+ ` distinct legacy partitions: ${report.distinctLegacyUserIds}`,
232
+ ` sync_outbox pending : ${report.queuePending}`,
233
+ ` cross-partition dup groups: ${report.crossPartitionDuplicateGroups}`,
234
+ "",
235
+ "Top legacy partitions:",
236
+ ...report.breakdown.slice(0, 10).map(
237
+ (r) => ` - ${r.legacyUserId.padEnd(40)} rows=${r.rows} namespaces=${r.distinctNamespaces}`
238
+ ),
239
+ "",
240
+ "This will:",
241
+ " * copy legacy rows into memory_legacy_scope (non-destructive)",
242
+ " * rewrite memories.user_id to the canonical user",
243
+ " * leave memories.namespace untouched",
244
+ " * clear sync_outbox / sync_inbox / sync cursor state",
245
+ "",
246
+ "Back up ~/.memrosetta/memories.db before continuing if you have not already.",
247
+ ""
248
+ ].join("\n")
249
+ );
250
+ }
251
+ export {
252
+ run,
253
+ runLegacyUserIdMigration,
254
+ scanLegacyImpact
255
+ };
@@ -0,0 +1,57 @@
1
+ import {
2
+ buildRelationCreatedOp,
3
+ openCliSyncContext
4
+ } from "./chunk-GGXC7TAJ.js";
5
+ import {
6
+ optionalOption,
7
+ requireOption
8
+ } from "./chunk-US6CEDMU.js";
9
+ import {
10
+ getEngine,
11
+ resolveDbPath
12
+ } from "./chunk-47SU2YUJ.js";
13
+ import {
14
+ output,
15
+ outputError
16
+ } from "./chunk-ET6TNQOJ.js";
17
+ import "./chunk-WYHEAKPC.js";
18
+
19
+ // src/commands/relate.ts
20
+ var VALID_RELATION_TYPES = /* @__PURE__ */ new Set([
21
+ "updates",
22
+ "extends",
23
+ "derives",
24
+ "contradicts",
25
+ "supports"
26
+ ]);
27
+ async function run(options) {
28
+ const { args, format, db, noEmbeddings } = options;
29
+ const src = requireOption(args, "--src", "source memory ID");
30
+ const dst = requireOption(args, "--dst", "destination memory ID");
31
+ const relationType = requireOption(args, "--type", "relation type");
32
+ const reason = optionalOption(args, "--reason");
33
+ if (!VALID_RELATION_TYPES.has(relationType)) {
34
+ outputError(
35
+ `Invalid relation type: ${relationType}. Must be one of: updates, extends, derives, contradicts, supports`,
36
+ format
37
+ );
38
+ process.exitCode = 1;
39
+ return;
40
+ }
41
+ const engine = await getEngine({ db, noEmbeddings });
42
+ const relation = await engine.relate(
43
+ src,
44
+ dst,
45
+ relationType,
46
+ reason
47
+ );
48
+ const sync = await openCliSyncContext(resolveDbPath(db));
49
+ if (sync.enabled) {
50
+ sync.enqueue(buildRelationCreatedOp(sync, relation));
51
+ sync.close();
52
+ }
53
+ output(relation, format);
54
+ }
55
+ export {
56
+ run
57
+ };
@@ -0,0 +1,57 @@
1
+ import {
2
+ buildRelationCreatedOp,
3
+ openCliSyncContext
4
+ } from "./chunk-GZINXXM4.js";
5
+ import {
6
+ optionalOption,
7
+ requireOption
8
+ } from "./chunk-US6CEDMU.js";
9
+ import {
10
+ getEngine,
11
+ resolveDbPath
12
+ } from "./chunk-VMGX5FCY.js";
13
+ import {
14
+ output,
15
+ outputError
16
+ } from "./chunk-ET6TNQOJ.js";
17
+ import "./chunk-RZFCVYTK.js";
18
+
19
+ // src/commands/relate.ts
20
+ var VALID_RELATION_TYPES = /* @__PURE__ */ new Set([
21
+ "updates",
22
+ "extends",
23
+ "derives",
24
+ "contradicts",
25
+ "supports"
26
+ ]);
27
+ async function run(options) {
28
+ const { args, format, db, noEmbeddings } = options;
29
+ const src = requireOption(args, "--src", "source memory ID");
30
+ const dst = requireOption(args, "--dst", "destination memory ID");
31
+ const relationType = requireOption(args, "--type", "relation type");
32
+ const reason = optionalOption(args, "--reason");
33
+ if (!VALID_RELATION_TYPES.has(relationType)) {
34
+ outputError(
35
+ `Invalid relation type: ${relationType}. Must be one of: updates, extends, derives, contradicts, supports`,
36
+ format
37
+ );
38
+ process.exitCode = 1;
39
+ return;
40
+ }
41
+ const engine = await getEngine({ db, noEmbeddings });
42
+ const relation = await engine.relate(
43
+ src,
44
+ dst,
45
+ relationType,
46
+ reason
47
+ );
48
+ const sync = await openCliSyncContext(resolveDbPath(db));
49
+ if (sync.enabled) {
50
+ sync.enqueue(buildRelationCreatedOp(sync, relation));
51
+ sync.close();
52
+ }
53
+ output(relation, format);
54
+ }
55
+ export {
56
+ run
57
+ };