specmem-hardwicksoftware 3.6.0 → 3.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -52,7 +52,7 @@ CREATE TABLE IF NOT EXISTS team_member_conversations (
|
|
|
52
52
|
);
|
|
53
53
|
|
|
54
54
|
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_memory ON team_member_conversations(memory_id);
|
|
55
|
-
CREATE INDEX IF NOT EXISTS
|
|
55
|
+
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_team_member ON team_member_conversations(team_member_id);
|
|
56
56
|
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_time ON team_member_conversations(timestamp);
|
|
57
57
|
|
|
58
58
|
-- HELPER FUNCTIONS
|
|
@@ -266,6 +266,27 @@ INSERT INTO team_channels (name, channel_type, created_by, project_path)
|
|
|
266
266
|
SELECT 'team-broadcast', 'broadcast', 'system', '/'
|
|
267
267
|
WHERE NOT EXISTS (SELECT 1 FROM team_channels WHERE name = 'team-broadcast' AND channel_type = 'broadcast');
|
|
268
268
|
|
|
269
|
+
-- ============================================================================
|
|
270
|
+
-- TEAM MEMBER CONVERSATIONS TABLE
|
|
271
|
+
-- Stores the conversation context that spawned a memory
|
|
272
|
+
-- Required by MemoryDrilldown.js for getMemoryFull / drill_down
|
|
273
|
+
-- ============================================================================
|
|
274
|
+
CREATE TABLE IF NOT EXISTS team_member_conversations (
|
|
275
|
+
id BIGSERIAL PRIMARY KEY,
|
|
276
|
+
memory_id UUID NOT NULL,
|
|
277
|
+
team_member_id VARCHAR(255) NOT NULL,
|
|
278
|
+
team_member_name VARCHAR(255),
|
|
279
|
+
timestamp TIMESTAMPTZ DEFAULT NOW(),
|
|
280
|
+
summary TEXT,
|
|
281
|
+
full_transcript TEXT,
|
|
282
|
+
message_count INTEGER,
|
|
283
|
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
284
|
+
);
|
|
285
|
+
|
|
286
|
+
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_memory ON team_member_conversations(memory_id);
|
|
287
|
+
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_team_member ON team_member_conversations(team_member_id);
|
|
288
|
+
CREATE INDEX IF NOT EXISTS idx_team_member_conversations_time ON team_member_conversations(timestamp);
|
|
289
|
+
|
|
269
290
|
-- ============================================================================
|
|
270
291
|
-- END - Core data uses PUBLIC schema with project_path filtering
|
|
271
292
|
-- ============================================================================
|
package/dist/mcp/toolRegistry.js
CHANGED
|
@@ -54,6 +54,8 @@ import { SmartSearch } from '../tools/goofy/smartSearch.js';
|
|
|
54
54
|
// Import memory drilldown tools - gallery view + full drill-down
|
|
55
55
|
import { FindMemoryGallery } from '../tools/goofy/findMemoryGallery.js';
|
|
56
56
|
import { GetMemoryFull } from '../tools/goofy/getMemoryFull.js';
|
|
57
|
+
// Import project memory import tool - carry context across projects
|
|
58
|
+
import { ImportProjectMemories } from '../tools/goofy/importProjectMemories.js';
|
|
57
59
|
// Import MCP-based team communication tools (NEW - replaces HTTP team member comms)
|
|
58
60
|
import { createTeamCommTools } from './tools/teamComms.js';
|
|
59
61
|
// Import embedding server control tools (Phase 4 - user start/stop/status)
|
|
@@ -500,6 +502,8 @@ export function createToolRegistry(db, embeddingProvider) {
|
|
|
500
502
|
// Camera roll drilldown tools - zoom in/out on memories and code
|
|
501
503
|
registry.register(new DrillDown(db));
|
|
502
504
|
registry.register(new GetMemoryByDrilldownID(db));
|
|
505
|
+
// Project memory import tool - import memories from other projects
|
|
506
|
+
registry.register(new ImportProjectMemories(db, cachingProvider));
|
|
503
507
|
// Team communication tools - multi-team member coordination
|
|
504
508
|
const teamCommTools = createTeamCommTools();
|
|
505
509
|
for (const tool of teamCommTools) {
|
|
@@ -93,7 +93,7 @@ export class MemoryDrilldown {
|
|
|
93
93
|
m.metadata,
|
|
94
94
|
m.embedding,
|
|
95
95
|
EXISTS(SELECT 1 FROM codebase_pointers WHERE memory_id = m.id) as has_code,
|
|
96
|
-
EXISTS(SELECT 1 FROM team_member_conversations WHERE memory_id = m.id) as has_conversation,
|
|
96
|
+
EXISTS(SELECT 1 FROM team_member_conversations tmc WHERE tmc.memory_id = m.id) as has_conversation,
|
|
97
97
|
1 - (m.embedding <=> $1::vector) as relevance
|
|
98
98
|
FROM memories m
|
|
99
99
|
WHERE m.content ILIKE $2
|
|
@@ -218,7 +218,8 @@ export class MemoryDrilldown {
|
|
|
218
218
|
* Fetch the conversation that spawned this memory
|
|
219
219
|
*/
|
|
220
220
|
async getConversation(memoryId) {
|
|
221
|
-
|
|
221
|
+
try {
|
|
222
|
+
const result = await this.db.query(`
|
|
222
223
|
SELECT
|
|
223
224
|
team_member_id,
|
|
224
225
|
team_member_name,
|
|
@@ -230,17 +231,23 @@ export class MemoryDrilldown {
|
|
|
230
231
|
ORDER BY timestamp DESC
|
|
231
232
|
LIMIT 1
|
|
232
233
|
`, [memoryId]);
|
|
233
|
-
|
|
234
|
+
if (result.rows.length === 0) {
|
|
235
|
+
return null;
|
|
236
|
+
}
|
|
237
|
+
const row = result.rows[0];
|
|
238
|
+
return {
|
|
239
|
+
team_member_id: row.team_member_id,
|
|
240
|
+
team_member_name: row.team_member_name,
|
|
241
|
+
timestamp: row.timestamp,
|
|
242
|
+
summary: row.summary,
|
|
243
|
+
full_transcript: row.full_transcript
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
catch (err) {
|
|
247
|
+
// Table may not exist yet - gracefully return null
|
|
248
|
+
logger.warn({ err: err?.message, memoryId }, 'getConversation failed (table may not exist)');
|
|
234
249
|
return null;
|
|
235
250
|
}
|
|
236
|
-
const row = result.rows[0];
|
|
237
|
-
return {
|
|
238
|
-
team_member_id: row.team_member_id,
|
|
239
|
-
team_member_name: row.team_member_name,
|
|
240
|
-
timestamp: row.timestamp,
|
|
241
|
-
summary: row.summary,
|
|
242
|
-
full_transcript: row.full_transcript
|
|
243
|
-
};
|
|
244
251
|
}
|
|
245
252
|
/**
|
|
246
253
|
* GET RELATED MEMORIES
|
|
@@ -256,7 +263,7 @@ export class MemoryDrilldown {
|
|
|
256
263
|
m.tags,
|
|
257
264
|
m.metadata,
|
|
258
265
|
EXISTS(SELECT 1 FROM codebase_pointers WHERE memory_id = m.id) as has_code,
|
|
259
|
-
EXISTS(SELECT 1 FROM team_member_conversations WHERE memory_id = m.id) as has_conversation,
|
|
266
|
+
EXISTS(SELECT 1 FROM team_member_conversations tmc WHERE tmc.memory_id = m.id) as has_conversation,
|
|
260
267
|
1 - (m.embedding <=> $1::vector) as relevance
|
|
261
268
|
FROM memories m
|
|
262
269
|
WHERE m.id != $2
|
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* importProjectMemories - import memories from another project schema
|
|
3
|
+
*
|
|
4
|
+
* copies memories from project A into the current project
|
|
5
|
+
* so you can carry context across projects fr fr
|
|
6
|
+
*/
|
|
7
|
+
import { randomUUID } from 'crypto';
|
|
8
|
+
import { logger } from '../../utils/logger.js';
|
|
9
|
+
import { getProjectSchema } from '../../db/projectNamespacing.js';
|
|
10
|
+
import { getProjectPathForInsert } from '../../services/ProjectContext.js';
|
|
11
|
+
import { formatHumanReadable } from '../../utils/humanReadableOutput.js';
|
|
12
|
+
|
|
13
|
+
export class ImportProjectMemories {
|
|
14
|
+
db;
|
|
15
|
+
embeddingProvider;
|
|
16
|
+
name = 'import_project_memories';
|
|
17
|
+
description = 'Import memories from another project into the current project. Use this to carry context across projects - e.g. import /specmem memories into /AEGIS_AI.';
|
|
18
|
+
inputSchema = {
|
|
19
|
+
type: 'object',
|
|
20
|
+
properties: {
|
|
21
|
+
sourceProject: {
|
|
22
|
+
type: 'string',
|
|
23
|
+
description: 'Absolute path of the source project to import from (e.g. "/specmem", "/AEGIS_AI")'
|
|
24
|
+
},
|
|
25
|
+
query: {
|
|
26
|
+
type: 'string',
|
|
27
|
+
description: 'Optional semantic search query to filter which memories to import. If omitted, imports all (up to limit).'
|
|
28
|
+
},
|
|
29
|
+
tags: {
|
|
30
|
+
type: 'array',
|
|
31
|
+
items: { type: 'string' },
|
|
32
|
+
description: 'Optional tag filter - only import memories with these tags'
|
|
33
|
+
},
|
|
34
|
+
memoryTypes: {
|
|
35
|
+
type: 'array',
|
|
36
|
+
items: {
|
|
37
|
+
type: 'string',
|
|
38
|
+
enum: ['episodic', 'semantic', 'procedural', 'working', 'consolidated']
|
|
39
|
+
},
|
|
40
|
+
description: 'Optional memory type filter'
|
|
41
|
+
},
|
|
42
|
+
importance: {
|
|
43
|
+
type: 'array',
|
|
44
|
+
items: {
|
|
45
|
+
type: 'string',
|
|
46
|
+
enum: ['critical', 'high', 'medium', 'low', 'trivial']
|
|
47
|
+
},
|
|
48
|
+
description: 'Optional importance filter'
|
|
49
|
+
},
|
|
50
|
+
limit: {
|
|
51
|
+
type: 'number',
|
|
52
|
+
default: 100,
|
|
53
|
+
minimum: 1,
|
|
54
|
+
maximum: 1000,
|
|
55
|
+
description: 'Max number of memories to import (default: 100)'
|
|
56
|
+
},
|
|
57
|
+
dryRun: {
|
|
58
|
+
type: 'boolean',
|
|
59
|
+
default: false,
|
|
60
|
+
description: 'Preview what would be imported without actually importing'
|
|
61
|
+
}
|
|
62
|
+
},
|
|
63
|
+
required: ['sourceProject']
|
|
64
|
+
};
|
|
65
|
+
|
|
66
|
+
constructor(db, embeddingProvider) {
|
|
67
|
+
this.db = db;
|
|
68
|
+
this.embeddingProvider = embeddingProvider;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async execute(params) {
|
|
72
|
+
const { sourceProject, query, tags, memoryTypes, importance, dryRun = false } = params;
|
|
73
|
+
const limit = Math.min(params.limit || 100, 1000);
|
|
74
|
+
const startTime = Date.now();
|
|
75
|
+
|
|
76
|
+
try {
|
|
77
|
+
// Get schema names
|
|
78
|
+
const sourceSchema = getProjectSchema(sourceProject);
|
|
79
|
+
const currentSchema = this.db.getProjectSchemaName();
|
|
80
|
+
const currentProjectPath = getProjectPathForInsert();
|
|
81
|
+
|
|
82
|
+
logger.info({
|
|
83
|
+
sourceProject,
|
|
84
|
+
sourceSchema,
|
|
85
|
+
currentSchema,
|
|
86
|
+
limit,
|
|
87
|
+
dryRun,
|
|
88
|
+
query: query?.slice(0, 50),
|
|
89
|
+
tags
|
|
90
|
+
}, 'Starting memory import');
|
|
91
|
+
|
|
92
|
+
// Verify source schema exists
|
|
93
|
+
const schemaCheck = await this.db.query(
|
|
94
|
+
`SELECT schema_name FROM information_schema.schemata WHERE schema_name = $1`,
|
|
95
|
+
[sourceSchema]
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
if (schemaCheck.rows.length === 0) {
|
|
99
|
+
// List available schemas for helpful error
|
|
100
|
+
const available = await this.db.query(
|
|
101
|
+
`SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'specmem_%' ORDER BY schema_name`
|
|
102
|
+
);
|
|
103
|
+
const schemaList = available.rows.map(r => r.schema_name).join(', ');
|
|
104
|
+
return {
|
|
105
|
+
content: [{
|
|
106
|
+
type: 'text',
|
|
107
|
+
text: formatHumanReadable({
|
|
108
|
+
error: `Source schema '${sourceSchema}' not found`,
|
|
109
|
+
sourceProject,
|
|
110
|
+
availableSchemas: schemaList || 'none',
|
|
111
|
+
hint: 'Make sure the source project path is correct and has been used with SpecMem before'
|
|
112
|
+
})
|
|
113
|
+
}]
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Build query to fetch memories from source schema
|
|
118
|
+
const conditions = [];
|
|
119
|
+
const queryParams = [];
|
|
120
|
+
let paramIndex = 1;
|
|
121
|
+
|
|
122
|
+
// Filter by tags
|
|
123
|
+
if (tags && tags.length > 0) {
|
|
124
|
+
conditions.push(`tags && $${paramIndex}::text[]`);
|
|
125
|
+
queryParams.push(tags);
|
|
126
|
+
paramIndex++;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Filter by memory types
|
|
130
|
+
if (memoryTypes && memoryTypes.length > 0) {
|
|
131
|
+
conditions.push(`memory_type = ANY($${paramIndex}::text[])`);
|
|
132
|
+
queryParams.push(memoryTypes);
|
|
133
|
+
paramIndex++;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// Filter by importance
|
|
137
|
+
if (importance && importance.length > 0) {
|
|
138
|
+
conditions.push(`importance = ANY($${paramIndex}::text[])`);
|
|
139
|
+
queryParams.push(importance);
|
|
140
|
+
paramIndex++;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// Build the SELECT query against source schema
|
|
144
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
145
|
+
|
|
146
|
+
let selectQuery;
|
|
147
|
+
if (query && this.embeddingProvider) {
|
|
148
|
+
// Semantic search - generate embedding for query, order by similarity
|
|
149
|
+
const embedding = await this.embeddingProvider.generateEmbedding(query);
|
|
150
|
+
const embeddingStr = `[${embedding.join(',')}]`;
|
|
151
|
+
queryParams.push(embeddingStr);
|
|
152
|
+
selectQuery = `
|
|
153
|
+
SELECT id, content, memory_type, importance, tags, metadata,
|
|
154
|
+
embedding, role, created_at, updated_at, expires_at,
|
|
155
|
+
1 - (embedding <=> $${paramIndex}::vector) as similarity
|
|
156
|
+
FROM "${sourceSchema}".memories
|
|
157
|
+
${whereClause}
|
|
158
|
+
ORDER BY embedding <=> $${paramIndex}::vector
|
|
159
|
+
LIMIT ${limit}
|
|
160
|
+
`;
|
|
161
|
+
paramIndex++;
|
|
162
|
+
} else {
|
|
163
|
+
selectQuery = `
|
|
164
|
+
SELECT id, content, memory_type, importance, tags, metadata,
|
|
165
|
+
embedding, role, created_at, updated_at, expires_at
|
|
166
|
+
FROM "${sourceSchema}".memories
|
|
167
|
+
${whereClause}
|
|
168
|
+
ORDER BY created_at DESC
|
|
169
|
+
LIMIT ${limit}
|
|
170
|
+
`;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const sourceMemories = await this.db.query(selectQuery, queryParams);
|
|
174
|
+
|
|
175
|
+
if (sourceMemories.rows.length === 0) {
|
|
176
|
+
return {
|
|
177
|
+
content: [{
|
|
178
|
+
type: 'text',
|
|
179
|
+
text: formatHumanReadable({
|
|
180
|
+
result: 'No memories found matching criteria in source project',
|
|
181
|
+
sourceProject,
|
|
182
|
+
sourceSchema,
|
|
183
|
+
filters: { tags, memoryTypes, importance, query: query?.slice(0, 50) }
|
|
184
|
+
})
|
|
185
|
+
}]
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Dry run - just show what would be imported
|
|
190
|
+
if (dryRun) {
|
|
191
|
+
const preview = sourceMemories.rows.slice(0, 10).map(m => ({
|
|
192
|
+
content: m.content?.slice(0, 100) + (m.content?.length > 100 ? '...' : ''),
|
|
193
|
+
type: m.memory_type,
|
|
194
|
+
importance: m.importance,
|
|
195
|
+
tags: m.tags,
|
|
196
|
+
similarity: m.similarity ? Math.round(m.similarity * 100) + '%' : undefined,
|
|
197
|
+
created: m.created_at
|
|
198
|
+
}));
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
content: [{
|
|
202
|
+
type: 'text',
|
|
203
|
+
text: formatHumanReadable({
|
|
204
|
+
dryRun: true,
|
|
205
|
+
wouldImport: sourceMemories.rows.length,
|
|
206
|
+
sourceProject,
|
|
207
|
+
sourceSchema,
|
|
208
|
+
targetSchema: currentSchema,
|
|
209
|
+
preview,
|
|
210
|
+
previewNote: sourceMemories.rows.length > 10
|
|
211
|
+
? `Showing 10 of ${sourceMemories.rows.length} memories`
|
|
212
|
+
: undefined
|
|
213
|
+
})
|
|
214
|
+
}]
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Actually import - insert into current schema with new UUIDs
|
|
219
|
+
let imported = 0;
|
|
220
|
+
let skipped = 0;
|
|
221
|
+
const errors = [];
|
|
222
|
+
|
|
223
|
+
for (const memory of sourceMemories.rows) {
|
|
224
|
+
try {
|
|
225
|
+
const newId = randomUUID();
|
|
226
|
+
const importTag = `imported_from:${sourceProject}`;
|
|
227
|
+
const newTags = Array.isArray(memory.tags)
|
|
228
|
+
? [...new Set([...memory.tags, importTag])]
|
|
229
|
+
: [importTag];
|
|
230
|
+
|
|
231
|
+
// Merge metadata with import info
|
|
232
|
+
const newMetadata = {
|
|
233
|
+
...(memory.metadata || {}),
|
|
234
|
+
imported: {
|
|
235
|
+
from: sourceProject,
|
|
236
|
+
originalId: memory.id,
|
|
237
|
+
importedAt: new Date().toISOString()
|
|
238
|
+
}
|
|
239
|
+
};
|
|
240
|
+
|
|
241
|
+
// Check for duplicate content in target schema
|
|
242
|
+
const dupCheck = await this.db.query(
|
|
243
|
+
`SELECT id FROM memories WHERE content = $1 LIMIT 1`,
|
|
244
|
+
[memory.content]
|
|
245
|
+
);
|
|
246
|
+
|
|
247
|
+
if (dupCheck.rows.length > 0) {
|
|
248
|
+
skipped++;
|
|
249
|
+
continue;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Insert into current schema
|
|
253
|
+
const insertQuery = `
|
|
254
|
+
INSERT INTO memories (
|
|
255
|
+
id, content, memory_type, importance, tags, metadata,
|
|
256
|
+
embedding, project_path, role, created_at, updated_at
|
|
257
|
+
) VALUES (
|
|
258
|
+
$1, $2, $3, $4, $5, $6,
|
|
259
|
+
$7, $8, $9, $10, $11
|
|
260
|
+
)
|
|
261
|
+
`;
|
|
262
|
+
|
|
263
|
+
await this.db.query(insertQuery, [
|
|
264
|
+
newId,
|
|
265
|
+
memory.content,
|
|
266
|
+
memory.memory_type || 'semantic',
|
|
267
|
+
memory.importance || 'medium',
|
|
268
|
+
newTags,
|
|
269
|
+
JSON.stringify(newMetadata),
|
|
270
|
+
memory.embedding, // preserve original embedding vector
|
|
271
|
+
currentProjectPath,
|
|
272
|
+
memory.role || 'user',
|
|
273
|
+
memory.created_at || new Date(),
|
|
274
|
+
new Date()
|
|
275
|
+
]);
|
|
276
|
+
|
|
277
|
+
imported++;
|
|
278
|
+
} catch (err) {
|
|
279
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
280
|
+
errors.push({ id: memory.id, error: errMsg });
|
|
281
|
+
logger.warn({ memoryId: memory.id, error: errMsg }, 'Failed to import memory');
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
const duration = Date.now() - startTime;
|
|
286
|
+
|
|
287
|
+
logger.info({
|
|
288
|
+
imported,
|
|
289
|
+
skipped,
|
|
290
|
+
errors: errors.length,
|
|
291
|
+
duration,
|
|
292
|
+
sourceProject,
|
|
293
|
+
sourceSchema,
|
|
294
|
+
currentSchema
|
|
295
|
+
}, 'Memory import completed');
|
|
296
|
+
|
|
297
|
+
const result = {
|
|
298
|
+
imported,
|
|
299
|
+
skipped,
|
|
300
|
+
errors: errors.length,
|
|
301
|
+
total: sourceMemories.rows.length,
|
|
302
|
+
sourceProject,
|
|
303
|
+
sourceSchema,
|
|
304
|
+
targetSchema: currentSchema,
|
|
305
|
+
duration: `${duration}ms`
|
|
306
|
+
};
|
|
307
|
+
|
|
308
|
+
if (errors.length > 0 && errors.length <= 5) {
|
|
309
|
+
result.errorDetails = errors;
|
|
310
|
+
} else if (errors.length > 5) {
|
|
311
|
+
result.errorDetails = errors.slice(0, 5);
|
|
312
|
+
result.moreErrors = errors.length - 5;
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
return {
|
|
316
|
+
content: [{
|
|
317
|
+
type: 'text',
|
|
318
|
+
text: formatHumanReadable(result)
|
|
319
|
+
}]
|
|
320
|
+
};
|
|
321
|
+
|
|
322
|
+
} catch (err) {
|
|
323
|
+
const errMsg = err instanceof Error ? err.message : String(err);
|
|
324
|
+
logger.error({ error: errMsg, sourceProject }, 'Memory import failed');
|
|
325
|
+
return {
|
|
326
|
+
content: [{
|
|
327
|
+
type: 'text',
|
|
328
|
+
text: formatHumanReadable({
|
|
329
|
+
error: 'Memory import failed',
|
|
330
|
+
details: errMsg,
|
|
331
|
+
sourceProject
|
|
332
|
+
})
|
|
333
|
+
}]
|
|
334
|
+
};
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "specmem-hardwicksoftware",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.7.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"description": "Persistent memory system for coding sessions - semantic search with pgvector, token compression, team coordination, file watching. Needs root: installs system-wide hooks, manages docker/PostgreSQL, writes global configs, handles screen sessions. justcalljon.pro",
|
|
6
6
|
"main": "dist/index.js",
|