kiro-memory 1.9.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +5 -1
  2. package/package.json +5 -5
  3. package/plugin/dist/cli/contextkit.js +2611 -345
  4. package/plugin/dist/hooks/agentSpawn.js +853 -223
  5. package/plugin/dist/hooks/kiro-hooks.js +841 -211
  6. package/plugin/dist/hooks/postToolUse.js +853 -222
  7. package/plugin/dist/hooks/stop.js +850 -220
  8. package/plugin/dist/hooks/userPromptSubmit.js +848 -216
  9. package/plugin/dist/index.js +843 -340
  10. package/plugin/dist/plugins/github/github-client.js +152 -0
  11. package/plugin/dist/plugins/github/index.js +412 -0
  12. package/plugin/dist/plugins/github/issue-parser.js +54 -0
  13. package/plugin/dist/plugins/slack/formatter.js +90 -0
  14. package/plugin/dist/plugins/slack/index.js +215 -0
  15. package/plugin/dist/sdk/index.js +841 -215
  16. package/plugin/dist/servers/mcp-server.js +4461 -397
  17. package/plugin/dist/services/search/EmbeddingService.js +146 -37
  18. package/plugin/dist/services/search/HybridSearch.js +564 -116
  19. package/plugin/dist/services/search/VectorSearch.js +187 -60
  20. package/plugin/dist/services/search/index.js +565 -254
  21. package/plugin/dist/services/sqlite/Backup.js +416 -0
  22. package/plugin/dist/services/sqlite/Database.js +126 -153
  23. package/plugin/dist/services/sqlite/ImportExport.js +452 -0
  24. package/plugin/dist/services/sqlite/Observations.js +314 -19
  25. package/plugin/dist/services/sqlite/Prompts.js +1 -1
  26. package/plugin/dist/services/sqlite/Search.js +41 -29
  27. package/plugin/dist/services/sqlite/Summaries.js +4 -4
  28. package/plugin/dist/services/sqlite/index.js +1428 -208
  29. package/plugin/dist/viewer.css +1 -0
  30. package/plugin/dist/viewer.html +2 -179
  31. package/plugin/dist/viewer.js +23 -24942
  32. package/plugin/dist/viewer.js.map +7 -0
  33. package/plugin/dist/worker-service.js +427 -5569
  34. package/plugin/dist/worker-service.js.map +7 -0
@@ -220,14 +220,48 @@ ${data.stack}` : ` ${data.message}`;
220
220
  var logger = new Logger();
221
221
 
222
222
  // src/services/search/EmbeddingService.ts
223
+ var MODEL_CONFIGS = {
224
+ "all-MiniLM-L6-v2": {
225
+ modelId: "Xenova/all-MiniLM-L6-v2",
226
+ dimensions: 384
227
+ },
228
+ "jina-code-v2": {
229
+ modelId: "jinaai/jina-embeddings-v2-base-code",
230
+ dimensions: 768
231
+ },
232
+ "bge-small-en": {
233
+ modelId: "BAAI/bge-small-en-v1.5",
234
+ dimensions: 384
235
+ }
236
+ };
237
+ var FASTEMBED_COMPATIBLE_MODELS = /* @__PURE__ */ new Set(["all-MiniLM-L6-v2", "bge-small-en"]);
223
238
  var EmbeddingService = class {
224
239
  provider = null;
225
240
  model = null;
226
241
  initialized = false;
227
242
  initializing = null;
243
+ config;
244
+ configName;
245
+ constructor() {
246
+ const envModel = process.env.KIRO_MEMORY_EMBEDDING_MODEL || "all-MiniLM-L6-v2";
247
+ this.configName = envModel;
248
+ if (MODEL_CONFIGS[envModel]) {
249
+ this.config = MODEL_CONFIGS[envModel];
250
+ } else if (envModel.includes("/")) {
251
+ const dimensions = parseInt(process.env.KIRO_MEMORY_EMBEDDING_DIMENSIONS || "384", 10);
252
+ this.config = {
253
+ modelId: envModel,
254
+ dimensions: isNaN(dimensions) ? 384 : dimensions
255
+ };
256
+ } else {
257
+ logger.warn("EMBEDDING", `Unknown model name '${envModel}', falling back to 'all-MiniLM-L6-v2'`);
258
+ this.configName = "all-MiniLM-L6-v2";
259
+ this.config = MODEL_CONFIGS["all-MiniLM-L6-v2"];
260
+ }
261
+ }
228
262
  /**
229
- * Inizializza il servizio di embedding.
230
- * Tenta fastembed, poi @huggingface/transformers, poi fallback a null.
263
+ * Initialize the embedding service.
264
+ * Tries fastembed (when compatible), then @huggingface/transformers, then falls back to null.
231
265
  */
232
266
  async initialize() {
233
267
  if (this.initialized) return this.provider !== null;
@@ -238,45 +272,48 @@ var EmbeddingService = class {
238
272
  return result;
239
273
  }
240
274
  async _doInitialize() {
241
- try {
242
- const fastembed = await import("fastembed");
243
- const EmbeddingModel = fastembed.EmbeddingModel || fastembed.default?.EmbeddingModel;
244
- const FlagEmbedding = fastembed.FlagEmbedding || fastembed.default?.FlagEmbedding;
245
- if (FlagEmbedding && EmbeddingModel) {
246
- this.model = await FlagEmbedding.init({
247
- model: EmbeddingModel.BGESmallENV15
248
- });
249
- this.provider = "fastembed";
250
- this.initialized = true;
251
- logger.info("EMBEDDING", "Inizializzato con fastembed (BGE-small-en-v1.5)");
252
- return true;
275
+ const fastembedCompatible = FASTEMBED_COMPATIBLE_MODELS.has(this.configName);
276
+ if (fastembedCompatible) {
277
+ try {
278
+ const fastembed = await import("fastembed");
279
+ const EmbeddingModel = fastembed.EmbeddingModel || fastembed.default?.EmbeddingModel;
280
+ const FlagEmbedding = fastembed.FlagEmbedding || fastembed.default?.FlagEmbedding;
281
+ if (FlagEmbedding && EmbeddingModel) {
282
+ this.model = await FlagEmbedding.init({
283
+ model: EmbeddingModel.BGESmallENV15
284
+ });
285
+ this.provider = "fastembed";
286
+ this.initialized = true;
287
+ logger.info("EMBEDDING", `Initialized with fastembed (BGE-small-en-v1.5) for model '${this.configName}'`);
288
+ return true;
289
+ }
290
+ } catch (error) {
291
+ logger.debug("EMBEDDING", `fastembed not available: ${error}`);
253
292
  }
254
- } catch (error) {
255
- logger.debug("EMBEDDING", `fastembed non disponibile: ${error}`);
256
293
  }
257
294
  try {
258
295
  const transformers = await import("@huggingface/transformers");
259
296
  const pipeline = transformers.pipeline || transformers.default?.pipeline;
260
297
  if (pipeline) {
261
- this.model = await pipeline("feature-extraction", "Xenova/all-MiniLM-L6-v2", {
298
+ this.model = await pipeline("feature-extraction", this.config.modelId, {
262
299
  quantized: true
263
300
  });
264
301
  this.provider = "transformers";
265
302
  this.initialized = true;
266
- logger.info("EMBEDDING", "Inizializzato con @huggingface/transformers (all-MiniLM-L6-v2)");
303
+ logger.info("EMBEDDING", `Initialized with @huggingface/transformers (${this.config.modelId})`);
267
304
  return true;
268
305
  }
269
306
  } catch (error) {
270
- logger.debug("EMBEDDING", `@huggingface/transformers non disponibile: ${error}`);
307
+ logger.debug("EMBEDDING", `@huggingface/transformers not available: ${error}`);
271
308
  }
272
309
  this.provider = null;
273
310
  this.initialized = true;
274
- logger.warn("EMBEDDING", "Nessun provider embedding disponibile, ricerca semantica disabilitata");
311
+ logger.warn("EMBEDDING", "No embedding provider available, semantic search disabled");
275
312
  return false;
276
313
  }
277
314
  /**
278
- * Genera embedding per un singolo testo.
279
- * Ritorna Float32Array con 384 dimensioni, o null se non disponibile.
315
+ * Generate embedding for a single text.
316
+ * Returns Float32Array with configured dimensions, or null if not available.
280
317
  */
281
318
  async embed(text) {
282
319
  if (!this.initialized) await this.initialize();
@@ -289,46 +326,118 @@ var EmbeddingService = class {
289
326
  return await this._embedTransformers(truncated);
290
327
  }
291
328
  } catch (error) {
292
- logger.error("EMBEDDING", `Errore generazione embedding: ${error}`);
329
+ logger.error("EMBEDDING", `Error generating embedding: ${error}`);
293
330
  }
294
331
  return null;
295
332
  }
296
333
  /**
297
- * Genera embeddings in batch.
334
+ * Generate embeddings in batch.
335
+ * Uses native batch support when available (fastembed, transformers),
336
+ * falls back to serial processing on batch failure.
298
337
  */
299
338
  async embedBatch(texts) {
300
339
  if (!this.initialized) await this.initialize();
301
340
  if (!this.provider || !this.model) return texts.map(() => null);
302
- const results = [];
303
- for (const text of texts) {
304
- try {
305
- const embedding = await this.embed(text);
306
- results.push(embedding);
307
- } catch {
308
- results.push(null);
341
+ if (texts.length === 0) return [];
342
+ const truncated = texts.map((t) => t.substring(0, 2e3));
343
+ try {
344
+ if (this.provider === "fastembed") {
345
+ return await this._embedBatchFastembed(truncated);
346
+ } else if (this.provider === "transformers") {
347
+ return await this._embedBatchTransformers(truncated);
309
348
  }
349
+ } catch (error) {
350
+ logger.warn("EMBEDDING", `Batch embedding failed, falling back to serial: ${error}`);
310
351
  }
311
- return results;
352
+ return this._embedBatchSerial(truncated);
312
353
  }
313
354
  /**
314
- * Verifica se il servizio è disponibile.
355
+ * Check if the service is available.
315
356
  */
316
357
  isAvailable() {
317
358
  return this.initialized && this.provider !== null;
318
359
  }
319
360
  /**
320
- * Nome del provider attivo.
361
+ * Name of the active provider.
321
362
  */
322
363
  getProvider() {
323
364
  return this.provider;
324
365
  }
325
366
  /**
326
- * Dimensioni del vettore embedding.
367
+ * Embedding vector dimensions for the active model configuration.
327
368
  */
328
369
  getDimensions() {
329
- return 384;
370
+ return this.config.dimensions;
371
+ }
372
+ /**
373
+ * Human-readable model name used as identifier in the observation_embeddings table.
374
+ * Returns the short name (e.g., 'all-MiniLM-L6-v2') or the full HF model ID for custom models.
375
+ */
376
+ getModelName() {
377
+ return this.configName;
378
+ }
379
+ // --- Batch implementations ---
380
+ /**
381
+ * Native batch embedding with fastembed.
382
+ * FlagEmbedding.embed() accepts string[] and returns an async iterable of batches.
383
+ */
384
+ async _embedBatchFastembed(texts) {
385
+ const results = [];
386
+ const embeddings = this.model.embed(texts, texts.length);
387
+ for await (const batch of embeddings) {
388
+ if (batch) {
389
+ for (const vec of batch) {
390
+ results.push(vec instanceof Float32Array ? vec : new Float32Array(vec));
391
+ }
392
+ }
393
+ }
394
+ while (results.length < texts.length) {
395
+ results.push(null);
396
+ }
397
+ return results;
398
+ }
399
+ /**
400
+ * Batch embedding with @huggingface/transformers pipeline.
401
+ * The pipeline accepts string[] and returns a Tensor with shape [N, dims].
402
+ */
403
+ async _embedBatchTransformers(texts) {
404
+ const output = await this.model(texts, {
405
+ pooling: "mean",
406
+ normalize: true
407
+ });
408
+ if (!output?.data) {
409
+ return texts.map(() => null);
410
+ }
411
+ const dims = this.getDimensions();
412
+ const data = output.data instanceof Float32Array ? output.data : new Float32Array(output.data);
413
+ const results = [];
414
+ for (let i = 0; i < texts.length; i++) {
415
+ const offset = i * dims;
416
+ if (offset + dims <= data.length) {
417
+ results.push(data.slice(offset, offset + dims));
418
+ } else {
419
+ results.push(null);
420
+ }
421
+ }
422
+ return results;
423
+ }
424
+ /**
425
+ * Serial fallback: embed texts one at a time.
426
+ * Used when native batch fails.
427
+ */
428
+ async _embedBatchSerial(texts) {
429
+ const results = [];
430
+ for (const text of texts) {
431
+ try {
432
+ const embedding = await this.embed(text);
433
+ results.push(embedding);
434
+ } catch {
435
+ results.push(null);
436
+ }
437
+ }
438
+ return results;
330
439
  }
331
- // --- Provider specifici ---
440
+ // --- Single-text provider implementations ---
332
441
  async _embedFastembed(text) {
333
442
  const embeddings = this.model.embed([text], 1);
334
443
  for await (const batch of embeddings) {