@dpesch/mantisbt-mcp-server 1.5.4 → 1.5.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/search/store.js +7 -2
- package/dist/search/sync.js +13 -0
- package/package.json +1 -1
- package/tests/helpers/search-mocks.ts +1 -0
- package/tests/search/store.test.ts +45 -0
- package/tests/search/sync.test.ts +54 -0
package/CHANGELOG.md
CHANGED
|
@@ -7,6 +7,13 @@ This project adheres to [Semantic Versioning](https://semver.org/).
|
|
|
7
7
|
|
|
8
8
|
---
|
|
9
9
|
|
|
10
|
+
## [1.5.5] – 2026-03-18
|
|
11
|
+
|
|
12
|
+
### Fixed
|
|
13
|
+
- Semantic search index sync: eliminated O(n²) disk write amplification during initial index builds. Previously `addBatch()` wrote the entire `index.json` to disk after every batch, causing n/batch_size complete rewrites for a full rebuild. Now `addBatch()` only updates the in-memory map; a new `flush()` method (atomic write via tmp file + rename) persists to disk. `SearchSyncService.sync()` calls `flush()` as a checkpoint every 100 indexed issues (`CHECKPOINT_INTERVAL=100`), limiting data loss on process kill to at most 100 issues, and performs a final flush after the loop for any remaining items.
|
|
14
|
+
|
|
15
|
+
---
|
|
16
|
+
|
|
10
17
|
## [1.5.4] – 2026-03-18
|
|
11
18
|
|
|
12
19
|
### Fixed
|
package/dist/search/store.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { readFile, writeFile, mkdir, unlink } from 'node:fs/promises';
|
|
1
|
+
import { readFile, writeFile, rename, mkdir, unlink } from 'node:fs/promises';
|
|
2
2
|
import { join } from 'node:path';
|
|
3
3
|
// ---------------------------------------------------------------------------
|
|
4
4
|
// VectraStore
|
|
@@ -34,8 +34,10 @@ export class VectraStore {
|
|
|
34
34
|
}
|
|
35
35
|
async persist() {
|
|
36
36
|
const indexFile = join(this.vectraDir, 'index.json');
|
|
37
|
+
const tmpFile = indexFile + '.tmp';
|
|
37
38
|
const data = JSON.stringify([...this.items.values()]);
|
|
38
|
-
await writeFile(
|
|
39
|
+
await writeFile(tmpFile, data, 'utf-8');
|
|
40
|
+
await rename(tmpFile, indexFile);
|
|
39
41
|
}
|
|
40
42
|
async add(item) {
|
|
41
43
|
await this.ensureLoaded();
|
|
@@ -47,6 +49,9 @@ export class VectraStore {
|
|
|
47
49
|
for (const item of items) {
|
|
48
50
|
this.items.set(item.id, item);
|
|
49
51
|
}
|
|
52
|
+
// No persist() here — call flush() once after all batches are processed.
|
|
53
|
+
}
|
|
54
|
+
async flush() {
|
|
50
55
|
await this.persist();
|
|
51
56
|
}
|
|
52
57
|
async search(vector, topN) {
|
package/dist/search/sync.js
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
// ---------------------------------------------------------------------------
|
|
4
4
|
const PAGE_SIZE = 50;
|
|
5
5
|
const EMBED_BATCH_SIZE = 10;
|
|
6
|
+
const CHECKPOINT_INTERVAL = 100; // flush to disk every N indexed issues
|
|
6
7
|
export class SearchSyncService {
|
|
7
8
|
client;
|
|
8
9
|
store;
|
|
@@ -17,6 +18,7 @@ export class SearchSyncService {
|
|
|
17
18
|
const { issues: allIssues, totalFromApi } = await this.fetchAllIssues(lastSyncedAt ?? undefined, projectId);
|
|
18
19
|
let indexed = 0;
|
|
19
20
|
let skipped = 0;
|
|
21
|
+
let indexedSinceCheckpoint = 0;
|
|
20
22
|
// Process in batches of EMBED_BATCH_SIZE
|
|
21
23
|
for (let i = 0; i < allIssues.length; i += EMBED_BATCH_SIZE) {
|
|
22
24
|
const batch = allIssues.slice(i, i + EMBED_BATCH_SIZE);
|
|
@@ -43,6 +45,17 @@ export class SearchSyncService {
|
|
|
43
45
|
}));
|
|
44
46
|
await this.store.addBatch(batchItems);
|
|
45
47
|
indexed += batchItems.length;
|
|
48
|
+
indexedSinceCheckpoint += batchItems.length;
|
|
49
|
+
// Checkpoint flush: persist every CHECKPOINT_INTERVAL issues to limit
|
|
50
|
+
// data loss if the process is killed before the final flush.
|
|
51
|
+
if (indexedSinceCheckpoint >= CHECKPOINT_INTERVAL) {
|
|
52
|
+
await this.store.flush();
|
|
53
|
+
indexedSinceCheckpoint = 0;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
// Final flush for any remaining items not yet written by a checkpoint.
|
|
57
|
+
if (indexedSinceCheckpoint > 0) {
|
|
58
|
+
await this.store.flush();
|
|
46
59
|
}
|
|
47
60
|
await this.store.setLastSyncedAt(new Date().toISOString());
|
|
48
61
|
// Persist the best known total for get_search_index_status.
|
package/package.json
CHANGED
|
@@ -45,6 +45,7 @@ export function makeMockStore(options?: { lastSyncedAt?: string | null; itemCoun
|
|
|
45
45
|
resetLastSyncedAt: vi.fn(async () => {}),
|
|
46
46
|
getLastKnownTotal: vi.fn(async () => options?.lastKnownTotal ?? null),
|
|
47
47
|
setLastKnownTotal: vi.fn(async () => {}),
|
|
48
|
+
flush: vi.fn(async () => {}),
|
|
48
49
|
};
|
|
49
50
|
}
|
|
50
51
|
|
|
@@ -146,3 +146,48 @@ describe('VectraStore.resetLastSyncedAt', () => {
|
|
|
146
146
|
expect(await store.getLastSyncedAt()).toBeNull();
|
|
147
147
|
});
|
|
148
148
|
});
|
|
149
|
+
|
|
150
|
+
describe('VectraStore.addBatch', () => {
|
|
151
|
+
it('increases in-memory count without persisting to disk', async () => {
|
|
152
|
+
await store.addBatch([
|
|
153
|
+
{ id: 1, vector: randomVector(), metadata: { summary: 'A' } },
|
|
154
|
+
{ id: 2, vector: randomVector(), metadata: { summary: 'B' } },
|
|
155
|
+
]);
|
|
156
|
+
expect(await store.count()).toBe(2);
|
|
157
|
+
|
|
158
|
+
// A new instance reading the same dir must not see the items yet
|
|
159
|
+
const store2 = new VectraStore(dir);
|
|
160
|
+
expect(await store2.count()).toBe(0);
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
it('persists items after flush()', async () => {
|
|
164
|
+
await store.addBatch([
|
|
165
|
+
{ id: 10, vector: randomVector(), metadata: { summary: 'X' } },
|
|
166
|
+
{ id: 11, vector: randomVector(), metadata: { summary: 'Y' } },
|
|
167
|
+
]);
|
|
168
|
+
await store.flush();
|
|
169
|
+
|
|
170
|
+
const store2 = new VectraStore(dir);
|
|
171
|
+
expect(await store2.count()).toBe(2);
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
it('accumulates items across multiple addBatch calls before flush', async () => {
|
|
175
|
+
await store.addBatch([{ id: 1, vector: randomVector(), metadata: { summary: 'First' } }]);
|
|
176
|
+
await store.addBatch([{ id: 2, vector: randomVector(), metadata: { summary: 'Second' } }]);
|
|
177
|
+
await store.flush();
|
|
178
|
+
|
|
179
|
+
const store2 = new VectraStore(dir);
|
|
180
|
+
expect(await store2.count()).toBe(2);
|
|
181
|
+
});
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
describe('VectraStore.flush (atomic write)', () => {
|
|
185
|
+
it('leaves no .tmp file after a successful flush', async () => {
|
|
186
|
+
const { readdir } = await import('node:fs/promises');
|
|
187
|
+
await store.addBatch([{ id: 1, vector: randomVector(), metadata: { summary: 'Test' } }]);
|
|
188
|
+
await store.flush();
|
|
189
|
+
|
|
190
|
+
const files = await readdir(join(dir, 'vectra'));
|
|
191
|
+
expect(files.some(f => f.endsWith('.tmp'))).toBe(false);
|
|
192
|
+
});
|
|
193
|
+
});
|
|
@@ -144,6 +144,60 @@ describe('SearchSyncService.sync – project_id', () => {
|
|
|
144
144
|
});
|
|
145
145
|
});
|
|
146
146
|
|
|
147
|
+
// ---------------------------------------------------------------------------
|
|
148
|
+
// flush / checkpoint behaviour
|
|
149
|
+
// ---------------------------------------------------------------------------
|
|
150
|
+
|
|
151
|
+
describe('SearchSyncService.sync – flush and checkpoint', () => {
|
|
152
|
+
function makeIssues(count: number) {
|
|
153
|
+
return Array.from({ length: count }, (_, i) => ({
|
|
154
|
+
id: i + 1,
|
|
155
|
+
summary: `Issue ${i + 1}`,
|
|
156
|
+
description: `Description ${i + 1}`,
|
|
157
|
+
updated_at: '2024-03-10T08:00:00Z',
|
|
158
|
+
}));
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
it('calls flush exactly once when fewer than 100 issues are indexed', async () => {
|
|
162
|
+
const store = makeMockStore({ lastSyncedAt: null });
|
|
163
|
+
vi.mocked(fetch).mockResolvedValue(
|
|
164
|
+
makeResponse(200, JSON.stringify({ issues: makeIssues(50), total_count: 50 }))
|
|
165
|
+
);
|
|
166
|
+
|
|
167
|
+
const service = new SearchSyncService(client, store, embedder);
|
|
168
|
+
await service.sync();
|
|
169
|
+
|
|
170
|
+
// Only the final flush, no checkpoint
|
|
171
|
+
expect(store.flush).toHaveBeenCalledTimes(1);
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
it('calls flush exactly once when exactly 100 issues are indexed (checkpoint covers all, no redundant final)', async () => {
|
|
175
|
+
const store = makeMockStore({ lastSyncedAt: null });
|
|
176
|
+
vi.mocked(fetch).mockResolvedValue(
|
|
177
|
+
makeResponse(200, JSON.stringify({ issues: makeIssues(100), total_count: 100 }))
|
|
178
|
+
);
|
|
179
|
+
|
|
180
|
+
const service = new SearchSyncService(client, store, embedder);
|
|
181
|
+
await service.sync();
|
|
182
|
+
|
|
183
|
+
// Checkpoint at 100 covers all items; indexedSinceCheckpoint resets to 0 → no redundant final flush
|
|
184
|
+
expect(store.flush).toHaveBeenCalledTimes(1);
|
|
185
|
+
});
|
|
186
|
+
|
|
187
|
+
it('calls flush twice when 110 issues are indexed (checkpoint at 100 + final for remaining 10)', async () => {
|
|
188
|
+
const store = makeMockStore({ lastSyncedAt: null });
|
|
189
|
+
vi.mocked(fetch).mockResolvedValue(
|
|
190
|
+
makeResponse(200, JSON.stringify({ issues: makeIssues(110), total_count: 110 }))
|
|
191
|
+
);
|
|
192
|
+
|
|
193
|
+
const service = new SearchSyncService(client, store, embedder);
|
|
194
|
+
await service.sync();
|
|
195
|
+
|
|
196
|
+
// Checkpoint at 100, then final flush for remaining 10
|
|
197
|
+
expect(store.flush).toHaveBeenCalledTimes(2);
|
|
198
|
+
});
|
|
199
|
+
});
|
|
200
|
+
|
|
147
201
|
// ---------------------------------------------------------------------------
|
|
148
202
|
// total_count persistence (regression: MantisBT installations without total_count)
|
|
149
203
|
// ---------------------------------------------------------------------------
|