@gmickel/gno 0.30.0 → 0.31.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -20,6 +20,7 @@ GNO is a local knowledge engine that turns your documents into a searchable, con
20
20
 
21
21
  - [Quick Start](#quick-start)
22
22
  - [Installation](#installation)
23
+ - [Daemon Mode](#daemon-mode)
23
24
  - [Search Modes](#search-modes)
24
25
  - [Agent Integration](#agent-integration)
25
26
  - [Web UI](#web-ui)
@@ -47,6 +48,13 @@ GNO is a local knowledge engine that turns your documents into a searchable, con
47
48
  - **CLI Concurrency Hardening**: read-only commands no longer trip transient `database is locked` errors when they overlap with `gno update`
48
49
  - **Web/Desktop UI Polish**: sharper workspace styling across dashboard, tabs, search, ask, and footer surfaces
49
50
 
51
+ ## What's New in v0.31
52
+
53
+ - **Windows Desktop Beta Artifact**: release flow now includes a packaged `windows-x64` desktop beta zip, not just source-level support claims
54
+ - **Packaged Runtime Proof**: Windows desktop packaging validates bundled Bun + staged GNO runtime + FTS5 + vendored snowball + `sqlite-vec`
55
+ - **Scoped Index Fix**: `gno index <collection>` now embeds only that collection instead of accidentally burning through unrelated backlog from other collections
56
+ - **CLI Reporting Fix**: long embed runs now report sane durations instead of bogus sub-second summaries
57
+
50
58
  ### v0.24
51
59
 
52
60
  - **Structured Query Documents**: first-class multi-line query syntax using `term:`, `intent:`, and `hyde:`
@@ -182,6 +190,10 @@ Verify everything works:
182
190
  gno doctor
183
191
  ```
184
192
 
193
+ **Windows**: current validated target is `windows-x64`, with a packaged
194
+ desktop beta zip now published on GitHub Releases. See
195
+ [docs/WINDOWS.md](./docs/WINDOWS.md) for support scope and validation notes.
196
+
185
197
  Keep an index fresh continuously without opening the Web UI:
186
198
 
187
199
  ```bash
@@ -191,6 +203,8 @@ gno daemon
191
203
  `gno daemon` runs as a foreground watcher/sync/embed process. Use `nohup`,
192
204
  launchd, or systemd if you want it supervised long-term.
193
205
 
206
+ See also: [docs/DAEMON.md](./docs/DAEMON.md)
207
+
194
208
  ### Connect to AI Agents
195
209
 
196
210
  #### MCP Server (Claude Desktop, Cursor, Zed, etc.)
@@ -228,6 +242,25 @@ gno skill install --target all # All targets
228
242
 
229
243
  ---
230
244
 
245
+ ## Daemon Mode
246
+
247
+ Use `gno daemon` when you want continuous indexing without the browser or
248
+ desktop shell open.
249
+
250
+ ```bash
251
+ gno daemon
252
+ gno daemon --no-sync-on-start
253
+ nohup gno daemon > /tmp/gno-daemon.log 2>&1 &
254
+ ```
255
+
256
+ It reuses the same watch/sync/embed runtime as `gno serve`, but stays
257
+ headless. In v0.30 it is foreground-only and does not expose built-in
258
+ `start/stop/status` management.
259
+
260
+ [Daemon guide →](https://gno.sh/docs/DAEMON/)
261
+
262
+ ---
263
+
231
264
  ## SDK
232
265
 
233
266
  Embed GNO directly in another Bun or TypeScript app. No CLI subprocesses. No local server required.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@gmickel/gno",
3
- "version": "0.30.0",
3
+ "version": "0.31.1",
4
4
  "description": "Local semantic search for your documents. Index Markdown, PDF, and Office files with hybrid BM25 + vector search.",
5
5
  "keywords": [
6
6
  "embeddings",
@@ -16,6 +16,7 @@ import { getIndexDbPath, getModelsCachePath } from "../../app/constants";
16
16
  import { getConfigPaths, isInitialized, loadConfig } from "../../config";
17
17
  import { ModelCache } from "../../llm/cache";
18
18
  import { getActivePreset } from "../../llm/registry";
19
+ import { loadFts5Snowball } from "../../store/sqlite/fts5-snowball";
19
20
  import {
20
21
  getCustomSqlitePath,
21
22
  getExtensionLoadingMode,
@@ -221,6 +222,17 @@ async function checkSqliteExtensions(): Promise<DoctorCheck[]> {
221
222
  message: jsonAvailable ? "JSON1 available" : "JSON1 not available",
222
223
  });
223
224
 
225
+ // Probe vendored fts5-snowball extension
226
+ const snowballResult = loadFts5Snowball(db);
227
+ checks.push({
228
+ name: "fts5-snowball",
229
+ status: snowballResult.loaded ? "ok" : "error",
230
+ message: snowballResult.loaded
231
+ ? "fts5-snowball loaded"
232
+ : (snowballResult.error ?? "fts5-snowball failed to load"),
233
+ details: snowballResult.path ? [`Path: ${snowballResult.path}`] : undefined,
234
+ });
235
+
224
236
  // Probe sqlite-vec extension
225
237
  let sqliteVecAvailable = false;
226
238
  let sqliteVecVersion = "";
@@ -44,6 +44,8 @@ import {
44
44
  export interface EmbedOptions {
45
45
  /** Override config path */
46
46
  configPath?: string;
47
+ /** Restrict embedding work to a single collection */
48
+ collection?: string;
47
49
  /** Override model URI */
48
50
  model?: string;
49
51
  /** Batch size for embedding */
@@ -102,6 +104,7 @@ interface BatchContext {
102
104
  embedPort: EmbeddingPort;
103
105
  vectorIndex: VectorIndexPort;
104
106
  modelUri: string;
107
+ collection?: string;
105
108
  batchSize: number;
106
109
  force: boolean;
107
110
  showProgress: boolean;
@@ -127,10 +130,11 @@ async function processBatches(ctx: BatchContext): Promise<BatchResult> {
127
130
  while (embedded + errors < ctx.totalToEmbed) {
128
131
  // Get next batch using seek pagination (cursor-based)
129
132
  const batchResult = ctx.force
130
- ? await getActiveChunks(ctx.db, ctx.batchSize, cursor)
133
+ ? await getActiveChunks(ctx.db, ctx.batchSize, cursor, ctx.collection)
131
134
  : await ctx.stats.getBacklog(ctx.modelUri, {
132
135
  limit: ctx.batchSize,
133
136
  after: cursor,
137
+ collection: ctx.collection,
134
138
  });
135
139
 
136
140
  if (!batchResult.ok) {
@@ -247,6 +251,7 @@ interface EmbedContext {
247
251
  */
248
252
  async function initEmbedContext(
249
253
  configPath?: string,
254
+ collection?: string,
250
255
  model?: string
251
256
  ): Promise<({ ok: true } & EmbedContext) | { ok: false; error: string }> {
252
257
  const initialized = await isInitialized(configPath);
@@ -259,6 +264,12 @@ async function initEmbedContext(
259
264
  return { ok: false, error: configResult.error.message };
260
265
  }
261
266
  const config = configResult.value;
267
+ if (
268
+ collection &&
269
+ !config.collections.some((candidate) => candidate.name === collection)
270
+ ) {
271
+ return { ok: false, error: `Collection not found: ${collection}` };
272
+ }
262
273
 
263
274
  const preset = getActivePreset(config);
264
275
  const modelUri = model ?? preset.embed;
@@ -289,7 +300,11 @@ export async function embed(options: EmbedOptions = {}): Promise<EmbedResult> {
289
300
  const dryRun = options.dryRun ?? false;
290
301
 
291
302
  // Initialize config and store
292
- const initResult = await initEmbedContext(options.configPath, options.model);
303
+ const initResult = await initEmbedContext(
304
+ options.configPath,
305
+ options.collection,
306
+ options.model
307
+ );
293
308
  if (!initResult.ok) {
294
309
  return { success: false, error: initResult.error };
295
310
  }
@@ -306,8 +321,8 @@ export async function embed(options: EmbedOptions = {}): Promise<EmbedResult> {
306
321
 
307
322
  // Get backlog count first (before loading model)
308
323
  const backlogResult = force
309
- ? await getActiveChunkCount(db)
310
- : await stats.countBacklog(modelUri);
324
+ ? await getActiveChunkCount(db, options.collection)
325
+ : await stats.countBacklog(modelUri, { collection: options.collection });
311
326
 
312
327
  if (!backlogResult.ok) {
313
328
  return { success: false, error: backlogResult.error.message };
@@ -392,6 +407,7 @@ export async function embed(options: EmbedOptions = {}): Promise<EmbedResult> {
392
407
  embedPort,
393
408
  vectorIndex,
394
409
  modelUri,
410
+ collection: options.collection,
395
411
  batchSize,
396
412
  force,
397
413
  showProgress: !options.json,
@@ -443,19 +459,23 @@ export async function embed(options: EmbedOptions = {}): Promise<EmbedResult> {
443
459
  // Helper: Get all active chunks (for --force mode)
444
460
  // ─────────────────────────────────────────────────────────────────────────────
445
461
 
446
- function getActiveChunkCount(db: Database): Promise<StoreResult<number>> {
462
+ function getActiveChunkCount(
463
+ db: Database,
464
+ collection?: string
465
+ ): Promise<StoreResult<number>> {
447
466
  try {
467
+ const collectionClause = collection ? " AND d.collection = ?" : "";
448
468
  const result = db
449
469
  .prepare(
450
470
  `
451
471
  SELECT COUNT(*) as count FROM content_chunks c
452
472
  WHERE EXISTS (
453
473
  SELECT 1 FROM documents d
454
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
474
+ WHERE d.mirror_hash = c.mirror_hash AND d.active = 1${collectionClause}
455
475
  )
456
476
  `
457
477
  )
458
- .get() as { count: number };
478
+ .get(...(collection ? [collection] : [])) as { count: number };
459
479
  return Promise.resolve(ok(result.count));
460
480
  } catch (e) {
461
481
  return Promise.resolve(
@@ -470,9 +490,11 @@ function getActiveChunkCount(db: Database): Promise<StoreResult<number>> {
470
490
  function getActiveChunks(
471
491
  db: Database,
472
492
  limit: number,
473
- after?: { mirrorHash: string; seq: number }
493
+ after?: { mirrorHash: string; seq: number },
494
+ collection?: string
474
495
  ): Promise<StoreResult<BacklogItem[]>> {
475
496
  try {
497
+ const collectionClause = collection ? " AND d.collection = ?" : "";
476
498
  // Include title for contextual embedding
477
499
  const sql = after
478
500
  ? `
@@ -482,7 +504,7 @@ function getActiveChunks(
482
504
  FROM content_chunks c
483
505
  WHERE EXISTS (
484
506
  SELECT 1 FROM documents d
485
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
507
+ WHERE d.mirror_hash = c.mirror_hash AND d.active = 1${collectionClause}
486
508
  )
487
509
  AND (c.mirror_hash > ? OR (c.mirror_hash = ? AND c.seq > ?))
488
510
  ORDER BY c.mirror_hash, c.seq
@@ -495,15 +517,21 @@ function getActiveChunks(
495
517
  FROM content_chunks c
496
518
  WHERE EXISTS (
497
519
  SELECT 1 FROM documents d
498
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
520
+ WHERE d.mirror_hash = c.mirror_hash AND d.active = 1${collectionClause}
499
521
  )
500
522
  ORDER BY c.mirror_hash, c.seq
501
523
  LIMIT ?
502
524
  `;
503
525
 
504
526
  const params = after
505
- ? [after.mirrorHash, after.mirrorHash, after.seq, limit]
506
- : [limit];
527
+ ? [
528
+ ...(collection ? [collection] : []),
529
+ after.mirrorHash,
530
+ after.mirrorHash,
531
+ after.seq,
532
+ limit,
533
+ ]
534
+ : [...(collection ? [collection] : []), limit];
507
535
 
508
536
  const results = db.prepare(sql).all(...params) as BacklogItem[];
509
537
  return Promise.resolve(ok(results));
@@ -71,6 +71,7 @@ export async function index(options: IndexOptions = {}): Promise<IndexResult> {
71
71
  const { embed } = await import("./embed");
72
72
  const result = await embed({
73
73
  configPath: options.configPath,
74
+ collection: options.collection,
74
75
  verbose: options.verbose,
75
76
  });
76
77
  if (result.success) {
@@ -95,6 +96,15 @@ export function formatIndex(
95
96
  result: IndexResult,
96
97
  options: IndexOptions
97
98
  ): string {
99
+ function formatDuration(seconds: number): string {
100
+ if (seconds < 60) {
101
+ return `${seconds.toFixed(1)}s`;
102
+ }
103
+ const mins = Math.floor(seconds / 60);
104
+ const secs = seconds % 60;
105
+ return `${mins}m ${secs.toFixed(0)}s`;
106
+ }
107
+
98
108
  if (!result.success) {
99
109
  return `Error: ${result.error}`;
100
110
  }
@@ -110,10 +120,12 @@ export function formatIndex(
110
120
  } else if (result.embedResult) {
111
121
  lines.push("");
112
122
  const { embedded, errors, duration } = result.embedResult;
113
- const errPart = errors > 0 ? ` (${errors} errors)` : "";
114
123
  lines.push(
115
- `Embedded ${embedded} chunks in ${(duration / 1000).toFixed(1)}s${errPart}`
124
+ `Embedded ${embedded.toLocaleString()} chunks in ${formatDuration(duration)}`
116
125
  );
126
+ if (errors > 0) {
127
+ lines.push(`${errors.toLocaleString()} chunks failed to embed.`);
128
+ }
117
129
  }
118
130
 
119
131
  return lines.join("\n");
@@ -17,6 +17,33 @@ import { err, ok } from "../types";
17
17
  * Uses EXISTS-based queries to avoid duplicates from multiple docs sharing mirror_hash.
18
18
  */
19
19
  export function createVectorStatsPort(db: Database): VectorStatsPort {
20
+ function buildActiveDocumentExistsClause(collection?: string): {
21
+ sql: string;
22
+ params: string[];
23
+ } {
24
+ if (collection) {
25
+ return {
26
+ sql: `
27
+ EXISTS (
28
+ SELECT 1 FROM documents d
29
+ WHERE d.mirror_hash = c.mirror_hash AND d.active = 1 AND d.collection = ?
30
+ )
31
+ `,
32
+ params: [collection],
33
+ };
34
+ }
35
+
36
+ return {
37
+ sql: `
38
+ EXISTS (
39
+ SELECT 1 FROM documents d
40
+ WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
41
+ )
42
+ `,
43
+ params: [],
44
+ };
45
+ }
46
+
20
47
  return {
21
48
  countVectors(model: string): Promise<StoreResult<number>> {
22
49
  try {
@@ -36,19 +63,18 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
36
63
  }
37
64
  },
38
65
 
39
- countBacklog(model: string): Promise<StoreResult<number>> {
66
+ countBacklog(
67
+ model: string,
68
+ options?: { collection?: string }
69
+ ): Promise<StoreResult<number>> {
40
70
  try {
71
+ const activeDoc = buildActiveDocumentExistsClause(options?.collection);
41
72
  // Count chunks needing embedding (fast for progress display)
42
73
  // Uses EXISTS to avoid duplicates when multiple docs share mirror_hash
43
- const result = db
44
- .prepare(
45
- `
74
+ const sql = `
46
75
  SELECT COUNT(*) as count
47
76
  FROM content_chunks c
48
- WHERE EXISTS (
49
- SELECT 1 FROM documents d
50
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
51
- )
77
+ WHERE ${activeDoc.sql}
52
78
  AND NOT EXISTS (
53
79
  SELECT 1 FROM content_vectors v
54
80
  WHERE v.mirror_hash = c.mirror_hash
@@ -56,9 +82,10 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
56
82
  AND v.model = ?
57
83
  AND v.embedded_at >= c.created_at
58
84
  )
59
- `
60
- )
61
- .get(model) as { count: number };
85
+ `;
86
+ const result = db.prepare(sql).get(...activeDoc.params, model) as {
87
+ count: number;
88
+ };
62
89
  return Promise.resolve(ok(result.count));
63
90
  } catch (e) {
64
91
  return Promise.resolve(
@@ -72,11 +99,16 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
72
99
 
73
100
  getBacklog(
74
101
  model: string,
75
- options?: { limit?: number; after?: { mirrorHash: string; seq: number } }
102
+ options?: {
103
+ limit?: number;
104
+ after?: { mirrorHash: string; seq: number };
105
+ collection?: string;
106
+ }
76
107
  ): Promise<StoreResult<BacklogItem[]>> {
77
108
  try {
78
109
  const limit = options?.limit ?? 1000;
79
110
  const after = options?.after;
111
+ const activeDoc = buildActiveDocumentExistsClause(options?.collection);
80
112
 
81
113
  // Seek pagination: use cursor to avoid skipping items as backlog shrinks
82
114
  // Query structure changes based on whether we have a cursor
@@ -95,10 +127,7 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
95
127
  ELSE 'changed'
96
128
  END as reason
97
129
  FROM content_chunks c
98
- WHERE EXISTS (
99
- SELECT 1 FROM documents d
100
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
101
- )
130
+ WHERE ${activeDoc.sql}
102
131
  AND NOT EXISTS (
103
132
  SELECT 1 FROM content_vectors v
104
133
  WHERE v.mirror_hash = c.mirror_hash
@@ -123,10 +152,7 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
123
152
  ELSE 'changed'
124
153
  END as reason
125
154
  FROM content_chunks c
126
- WHERE EXISTS (
127
- SELECT 1 FROM documents d
128
- WHERE d.mirror_hash = c.mirror_hash AND d.active = 1
129
- )
155
+ WHERE ${activeDoc.sql}
130
156
  AND NOT EXISTS (
131
157
  SELECT 1 FROM content_vectors v
132
158
  WHERE v.mirror_hash = c.mirror_hash
@@ -139,8 +165,16 @@ export function createVectorStatsPort(db: Database): VectorStatsPort {
139
165
  `;
140
166
 
141
167
  const params = after
142
- ? [model, model, after.mirrorHash, after.mirrorHash, after.seq, limit]
143
- : [model, model, limit];
168
+ ? [
169
+ model,
170
+ ...activeDoc.params,
171
+ model,
172
+ after.mirrorHash,
173
+ after.mirrorHash,
174
+ after.seq,
175
+ limit,
176
+ ]
177
+ : [model, ...activeDoc.params, model, limit];
144
178
 
145
179
  const results = db.prepare(sql).all(...params) as BacklogItem[];
146
180
  return Promise.resolve(ok(results));
@@ -108,11 +108,14 @@ export interface VectorStatsPort {
108
108
  countVectors(model: string): Promise<StoreResult<number>>;
109
109
 
110
110
  /** Count chunks needing embedding for a model */
111
- countBacklog(model: string): Promise<StoreResult<number>>;
111
+ countBacklog(
112
+ model: string,
113
+ options?: { collection?: string }
114
+ ): Promise<StoreResult<number>>;
112
115
 
113
116
  /** Get chunks needing embedding for a model (seek pagination) */
114
117
  getBacklog(
115
118
  model: string,
116
- options?: { limit?: number; after?: BacklogCursor }
119
+ options?: { limit?: number; after?: BacklogCursor; collection?: string }
117
120
  ): Promise<StoreResult<BacklogItem[]>>;
118
121
  }