@goondocks/myco 0.6.4 → 0.6.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/.claude-plugin/plugin.json +1 -1
  3. package/dist/{chunk-YIQLYIHW.js → chunk-4B5RO2YV.js} +4 -4
  4. package/dist/{chunk-7WHF2OIZ.js → chunk-4DYD4HHG.js} +25 -7
  5. package/dist/chunk-4DYD4HHG.js.map +1 -0
  6. package/dist/{chunk-NLUE6CYG.js → chunk-54WVLTKD.js} +3 -3
  7. package/dist/{chunk-NL6WQO56.js → chunk-5LMRZDH3.js} +2 -2
  8. package/dist/{chunk-O6PERU7U.js → chunk-AHZN4Z34.js} +2 -2
  9. package/dist/{chunk-J4D4CROB.js → chunk-DYDBF5W6.js} +5 -1
  10. package/dist/chunk-DYDBF5W6.js.map +1 -0
  11. package/dist/{chunk-Z74SDEKE.js → chunk-F7GAYVWF.js} +2 -2
  12. package/dist/{chunk-H7PRCVGQ.js → chunk-F7PGDD2X.js} +2 -2
  13. package/dist/{chunk-2ZIBCEYO.js → chunk-LEK6DEAE.js} +3 -3
  14. package/dist/{chunk-QLUE3BUL.js → chunk-O6TBHGVO.js} +9 -2
  15. package/dist/chunk-O6TBHGVO.js.map +1 -0
  16. package/dist/{chunk-FPRXMJLT.js → chunk-OEGZ5YTJ.js} +2 -2
  17. package/dist/{chunk-4XVKZ3WA.js → chunk-TK7A4RX7.js} +10 -3
  18. package/dist/chunk-TK7A4RX7.js.map +1 -0
  19. package/dist/{chunk-UP4P4OAA.js → chunk-V6BJVYNH.js} +2 -2
  20. package/dist/{chunk-QN4W3JUA.js → chunk-XH34FX4C.js} +2 -2
  21. package/dist/{chunk-YTFXA4RX.js → chunk-YRIIBPJD.js} +3 -3
  22. package/dist/{cli-IHILSS6N.js → cli-OJYHLO4Y.js} +20 -20
  23. package/dist/{client-AGFNR2S4.js → client-SS3C5MF6.js} +5 -5
  24. package/dist/{curate-3D4GHKJH.js → curate-4CKEMOPV.js} +6 -6
  25. package/dist/{detect-providers-XEP4QA3R.js → detect-providers-LFIVJYQO.js} +3 -3
  26. package/dist/{digest-7HLJXL77.js → digest-ZLARHLLY.js} +8 -8
  27. package/dist/{init-ARQ53JOR.js → init-3LVKVQ4L.js} +5 -5
  28. package/dist/{logs-IENORIYR.js → logs-6CWVP574.js} +3 -3
  29. package/dist/{main-6AGPIMH2.js → main-RB727YRP.js} +149 -28
  30. package/dist/main-RB727YRP.js.map +1 -0
  31. package/dist/{rebuild-Q2ACEB6F.js → rebuild-QWVVCBCZ.js} +6 -6
  32. package/dist/{reprocess-CDEFGQOV.js → reprocess-YG3WLUI2.js} +8 -8
  33. package/dist/{restart-XCMILOL5.js → restart-UIP7US4U.js} +6 -6
  34. package/dist/{search-7W25SKCB.js → search-BQLBW5CS.js} +4 -4
  35. package/dist/{server-6UDN35QN.js → server-43KSJ65Q.js} +77 -29
  36. package/dist/{server-6UDN35QN.js.map → server-43KSJ65Q.js.map} +1 -1
  37. package/dist/{session-start-K6IGAC7H.js → session-start-6SHGT2AW.js} +6 -6
  38. package/dist/{setup-digest-X5PN27F4.js → setup-digest-X735EZSD.js} +5 -5
  39. package/dist/{setup-llm-S5OHQJXK.js → setup-llm-QBSTQO7N.js} +5 -5
  40. package/dist/src/cli.js +4 -4
  41. package/dist/src/daemon/main.js +4 -4
  42. package/dist/src/hooks/post-tool-use.js +5 -5
  43. package/dist/src/hooks/session-end.js +5 -5
  44. package/dist/src/hooks/session-start.js +4 -4
  45. package/dist/src/hooks/stop.js +5 -5
  46. package/dist/src/hooks/user-prompt-submit.js +5 -5
  47. package/dist/src/mcp/server.js +4 -4
  48. package/dist/{stats-TTSDXGJV.js → stats-QBLIEFWL.js} +6 -6
  49. package/dist/ui/assets/index-CjWGVHhF.css +1 -0
  50. package/dist/ui/assets/{index-CMSMi4Jb.js → index-Cq-H7wgE.js} +20 -20
  51. package/dist/ui/index.html +2 -2
  52. package/dist/{verify-TOWQHPBX.js → verify-X272WGBD.js} +4 -4
  53. package/dist/{version-36RVCQA6.js → version-XE4GYTBV.js} +4 -4
  54. package/package.json +1 -1
  55. package/dist/chunk-4XVKZ3WA.js.map +0 -1
  56. package/dist/chunk-7WHF2OIZ.js.map +0 -1
  57. package/dist/chunk-J4D4CROB.js.map +0 -1
  58. package/dist/chunk-QLUE3BUL.js.map +0 -1
  59. package/dist/main-6AGPIMH2.js.map +0 -1
  60. package/dist/ui/assets/index-08wKT7wS.css +0 -1
  61. /package/dist/{chunk-YIQLYIHW.js.map → chunk-4B5RO2YV.js.map} +0 -0
  62. /package/dist/{chunk-NLUE6CYG.js.map → chunk-54WVLTKD.js.map} +0 -0
  63. /package/dist/{chunk-NL6WQO56.js.map → chunk-5LMRZDH3.js.map} +0 -0
  64. /package/dist/{chunk-O6PERU7U.js.map → chunk-AHZN4Z34.js.map} +0 -0
  65. /package/dist/{chunk-Z74SDEKE.js.map → chunk-F7GAYVWF.js.map} +0 -0
  66. /package/dist/{chunk-H7PRCVGQ.js.map → chunk-F7PGDD2X.js.map} +0 -0
  67. /package/dist/{chunk-2ZIBCEYO.js.map → chunk-LEK6DEAE.js.map} +0 -0
  68. /package/dist/{chunk-FPRXMJLT.js.map → chunk-OEGZ5YTJ.js.map} +0 -0
  69. /package/dist/{chunk-UP4P4OAA.js.map → chunk-V6BJVYNH.js.map} +0 -0
  70. /package/dist/{chunk-QN4W3JUA.js.map → chunk-XH34FX4C.js.map} +0 -0
  71. /package/dist/{chunk-YTFXA4RX.js.map → chunk-YRIIBPJD.js.map} +0 -0
  72. /package/dist/{cli-IHILSS6N.js.map → cli-OJYHLO4Y.js.map} +0 -0
  73. /package/dist/{client-AGFNR2S4.js.map → client-SS3C5MF6.js.map} +0 -0
  74. /package/dist/{curate-3D4GHKJH.js.map → curate-4CKEMOPV.js.map} +0 -0
  75. /package/dist/{detect-providers-XEP4QA3R.js.map → detect-providers-LFIVJYQO.js.map} +0 -0
  76. /package/dist/{digest-7HLJXL77.js.map → digest-ZLARHLLY.js.map} +0 -0
  77. /package/dist/{init-ARQ53JOR.js.map → init-3LVKVQ4L.js.map} +0 -0
  78. /package/dist/{logs-IENORIYR.js.map → logs-6CWVP574.js.map} +0 -0
  79. /package/dist/{rebuild-Q2ACEB6F.js.map → rebuild-QWVVCBCZ.js.map} +0 -0
  80. /package/dist/{reprocess-CDEFGQOV.js.map → reprocess-YG3WLUI2.js.map} +0 -0
  81. /package/dist/{restart-XCMILOL5.js.map → restart-UIP7US4U.js.map} +0 -0
  82. /package/dist/{search-7W25SKCB.js.map → search-BQLBW5CS.js.map} +0 -0
  83. /package/dist/{session-start-K6IGAC7H.js.map → session-start-6SHGT2AW.js.map} +0 -0
  84. /package/dist/{setup-digest-X5PN27F4.js.map → setup-digest-X735EZSD.js.map} +0 -0
  85. /package/dist/{setup-llm-S5OHQJXK.js.map → setup-llm-QBSTQO7N.js.map} +0 -0
  86. /package/dist/{stats-TTSDXGJV.js.map → stats-QBLIEFWL.js.map} +0 -0
  87. /package/dist/{verify-TOWQHPBX.js.map → verify-X272WGBD.js.map} +0 -0
  88. /package/dist/{version-36RVCQA6.js.map → version-XE4GYTBV.js.map} +0 -0
@@ -12,7 +12,7 @@
12
12
  "source": {
13
13
  "source": "npm",
14
14
  "package": "@goondocks/myco",
15
- "version": "0.6.3"
15
+ "version": "0.6.4"
16
16
  },
17
17
  "description": "Collective agent intelligence — captures session knowledge and serves it back via MCP",
18
18
  "license": "MIT",
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "myco",
3
- "version": "0.6.4",
3
+ "version": "0.6.5",
4
4
  "description": "Collective agent intelligence — captures session knowledge and serves it back to your team via MCP",
5
5
  "author": {
6
6
  "name": "goondocks-co",
@@ -1,16 +1,16 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  getPluginVersion
4
- } from "./chunk-QN4W3JUA.js";
4
+ } from "./chunk-XH34FX4C.js";
5
5
  import {
6
6
  AgentRegistry
7
- } from "./chunk-Z74SDEKE.js";
7
+ } from "./chunk-F7GAYVWF.js";
8
8
  import {
9
9
  DAEMON_CLIENT_TIMEOUT_MS,
10
10
  DAEMON_HEALTH_CHECK_TIMEOUT_MS,
11
11
  DAEMON_HEALTH_RETRY_DELAYS,
12
12
  DAEMON_STALE_GRACE_PERIOD_MS
13
- } from "./chunk-J4D4CROB.js";
13
+ } from "./chunk-DYDBF5W6.js";
14
14
 
15
15
  // src/hooks/client.ts
16
16
  import fs from "fs";
@@ -178,4 +178,4 @@ var DaemonClient = class {
178
178
  export {
179
179
  DaemonClient
180
180
  };
181
- //# sourceMappingURL=chunk-YIQLYIHW.js.map
181
+ //# sourceMappingURL=chunk-4B5RO2YV.js.map
@@ -3,7 +3,7 @@ import {
3
3
  DAEMON_CLIENT_TIMEOUT_MS,
4
4
  EMBEDDING_REQUEST_TIMEOUT_MS,
5
5
  LLM_REQUEST_TIMEOUT_MS
6
- } from "./chunk-J4D4CROB.js";
6
+ } from "./chunk-DYDBF5W6.js";
7
7
 
8
8
  // src/intelligence/ollama.ts
9
9
  var ENDPOINT_GENERATE = "/api/generate";
@@ -144,6 +144,8 @@ var LmStudioBackend = class _LmStudioBackend {
144
144
  instanceId = null;
145
145
  contextWindow;
146
146
  defaultMaxTokens;
147
+ /** Set after a model rejects the `reasoning` param — skip it on future calls. */
148
+ reasoningUnsupported = false;
147
149
  constructor(config) {
148
150
  this.baseUrl = config?.base_url ?? _LmStudioBackend.DEFAULT_BASE_URL;
149
151
  this.model = config?.model ?? config?.summary_model ?? "llama3.2";
@@ -172,10 +174,11 @@ var LmStudioBackend = class _LmStudioBackend {
172
174
  if (opts?.systemPrompt) {
173
175
  body.system_prompt = opts.systemPrompt;
174
176
  }
175
- if (opts?.reasoning) {
177
+ const sendReasoning = opts?.reasoning && !this.reasoningUnsupported;
178
+ if (sendReasoning) {
176
179
  body.reasoning = opts.reasoning;
177
180
  }
178
- const response = await fetch(`${this.baseUrl}${ENDPOINT_CHAT}`, {
181
+ let response = await fetch(`${this.baseUrl}${ENDPOINT_CHAT}`, {
179
182
  method: "POST",
180
183
  headers: { "Content-Type": "application/json" },
181
184
  body: JSON.stringify(body),
@@ -183,10 +186,25 @@ var LmStudioBackend = class _LmStudioBackend {
183
186
  });
184
187
  if (!response.ok) {
185
188
  const errorBody = await response.text().catch(() => "");
186
- if (response.status === 404 && this.instanceId) {
187
- this.instanceId = null;
189
+ if (sendReasoning && /does not support reasoning|"param"\s*:\s*"reasoning"/.test(errorBody)) {
190
+ this.reasoningUnsupported = true;
191
+ delete body.reasoning;
192
+ response = await fetch(`${this.baseUrl}${ENDPOINT_CHAT}`, {
193
+ method: "POST",
194
+ headers: { "Content-Type": "application/json" },
195
+ body: JSON.stringify(body),
196
+ signal: AbortSignal.timeout(opts?.timeoutMs ?? LLM_REQUEST_TIMEOUT_MS)
197
+ });
198
+ if (!response.ok) {
199
+ const retryError = await response.text().catch(() => "");
200
+ throw new Error(`LM Studio summarize failed: ${response.status} ${retryError.slice(0, 500)}`);
201
+ }
202
+ } else {
203
+ if (response.status === 404 && this.instanceId) {
204
+ this.instanceId = null;
205
+ }
206
+ throw new Error(`LM Studio summarize failed: ${response.status} ${errorBody.slice(0, 500)}`);
188
207
  }
189
- throw new Error(`LM Studio summarize failed: ${response.status} ${errorBody.slice(0, 500)}`);
190
208
  }
191
209
  const data = await response.json();
192
210
  const messageOutput = data.output.find((o) => o.type === "message");
@@ -307,4 +325,4 @@ export {
307
325
  OllamaBackend,
308
326
  LmStudioBackend
309
327
  };
310
- //# sourceMappingURL=chunk-7WHF2OIZ.js.map
328
+ //# sourceMappingURL=chunk-4DYD4HHG.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/intelligence/ollama.ts","../src/intelligence/lm-studio.ts"],"sourcesContent":["import type { LlmProvider, EmbeddingProvider, LlmResponse, EmbeddingResponse, LlmRequestOptions } from './llm.js';\nimport { LLM_REQUEST_TIMEOUT_MS, EMBEDDING_REQUEST_TIMEOUT_MS, DAEMON_CLIENT_TIMEOUT_MS } from '../constants.js';\n\ninterface OllamaConfig {\n model?: string;\n base_url?: string;\n context_window?: number;\n max_tokens?: number;\n // Legacy fields (ignored, kept for backward compat during migration)\n embedding_model?: string;\n summary_model?: string;\n}\n\n// Ollama API endpoints\nconst ENDPOINT_GENERATE = '/api/generate';\nconst ENDPOINT_EMBED = '/api/embed';\nconst ENDPOINT_TAGS = '/api/tags';\n\nexport class OllamaBackend implements LlmProvider, EmbeddingProvider {\n static readonly DEFAULT_BASE_URL = 'http://localhost:11434';\n readonly name = 'ollama';\n private baseUrl: string;\n private model: string;\n private defaultMaxTokens: number;\n private contextWindow: number | undefined;\n\n constructor(config?: OllamaConfig) {\n this.baseUrl = config?.base_url ?? OllamaBackend.DEFAULT_BASE_URL;\n this.model = config?.model ?? config?.summary_model ?? 'llama3.2';\n this.defaultMaxTokens = config?.max_tokens ?? 1024;\n this.contextWindow = config?.context_window;\n }\n\n async summarize(prompt: string, opts?: LlmRequestOptions): Promise<LlmResponse> {\n const maxTokens = opts?.maxTokens ?? this.defaultMaxTokens;\n\n // Send num_ctx from config or per-call override. Ollama reloads the model\n // on num_ctx changes, but consistent values (same num_ctx every call)\n // only cause one reload on first use. Without this, Ollama falls back to\n // its model default (often 2048), ignoring the user's configured context.\n const contextLength = opts?.contextLength ?? this.contextWindow;\n const options: Record<string, unknown> = { num_predict: maxTokens };\n if (contextLength) {\n options.num_ctx = contextLength;\n }\n\n const body: Record<string, unknown> = {\n model: this.model,\n prompt,\n stream: true,\n options,\n };\n\n // System prompt — sent as a separate field instead of concatenated into prompt\n if (opts?.systemPrompt) {\n body.system = opts.systemPrompt;\n }\n\n // Thinking control — false suppresses chain-of-thought for reasoning models\n if (opts?.reasoning) {\n body.think = opts.reasoning === 'off' ? false : opts.reasoning;\n }\n\n // Keep model loaded between requests (useful for digest cycles)\n if (opts?.keepAlive) {\n body.keep_alive = opts.keepAlive;\n }\n\n const response = await fetch(`${this.baseUrl}${ENDPOINT_GENERATE}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(opts?.timeoutMs ?? LLM_REQUEST_TIMEOUT_MS),\n });\n\n if (!response.ok) {\n const errorBody = await response.text().catch(() => '');\n throw new Error(`Ollama summarize failed: ${response.status} ${errorBody.slice(0, 500)}`);\n }\n\n return this.readStream(response);\n }\n\n /** Read an Ollama streaming response (newline-delimited JSON) and accumulate the result. */\n private async readStream(response: Response): Promise<LlmResponse> {\n const reader = response.body!.getReader();\n const decoder = new TextDecoder();\n let text = '';\n let model = this.model;\n let buffer = '';\n\n try {\n for (;;) {\n const { done, value } = await reader.read();\n if (done) break;\n\n buffer += decoder.decode(value, { stream: true });\n const lines = buffer.split('\\n');\n buffer = lines.pop() ?? '';\n\n for (const line of lines) {\n if (!line.trim()) continue;\n const chunk = JSON.parse(line) as { response?: string; model?: string; error?: string };\n if (chunk.error) throw new Error(`Ollama stream error: ${chunk.error}`);\n text += chunk.response ?? '';\n if (chunk.model) model = chunk.model;\n }\n }\n\n // Process remaining buffer\n if (buffer.trim()) {\n const chunk = JSON.parse(buffer) as { response?: string; model?: string; error?: string };\n if (chunk.error) throw new Error(`Ollama stream error: ${chunk.error}`);\n text += chunk.response ?? '';\n if (chunk.model) model = chunk.model;\n }\n } finally {\n reader.releaseLock();\n }\n\n return { text, model };\n }\n\n async embed(text: string): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_EMBED}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify({\n model: this.model,\n input: text,\n }),\n signal: AbortSignal.timeout(EMBEDDING_REQUEST_TIMEOUT_MS),\n });\n\n if (!response.ok) {\n throw new Error(`Ollama embed failed: ${response.status} ${response.statusText}`);\n }\n\n const data = await response.json() as { embeddings: number[][]; model: string };\n const embedding = data.embeddings[0];\n return { embedding, model: data.model, dimensions: embedding.length };\n }\n\n async isAvailable(): Promise<boolean> {\n try {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_TAGS}`, {\n signal: AbortSignal.timeout(DAEMON_CLIENT_TIMEOUT_MS),\n });\n return response.ok;\n } catch {\n return false;\n }\n }\n\n /** List available models on this Ollama instance. */\n async listModels(timeoutMs?: number): Promise<string[]> {\n try {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_TAGS}`, {\n signal: AbortSignal.timeout(timeoutMs ?? DAEMON_CLIENT_TIMEOUT_MS),\n });\n const data = await response.json() as { models: Array<{ name: string }> };\n return data.models.map((m) => m.name);\n } catch {\n return [];\n }\n }\n}\n","import type { LlmProvider, EmbeddingProvider, LlmResponse, EmbeddingResponse, LlmRequestOptions } from './llm.js';\nimport { LLM_REQUEST_TIMEOUT_MS, EMBEDDING_REQUEST_TIMEOUT_MS, DAEMON_CLIENT_TIMEOUT_MS } from '../constants.js';\n\ninterface LmStudioConfig {\n model?: string;\n base_url?: string;\n context_window?: number;\n max_tokens?: number;\n // Legacy fields\n embedding_model?: string;\n summary_model?: string;\n}\n\n// LM Studio API endpoints\nconst ENDPOINT_CHAT = '/api/v1/chat';\nconst ENDPOINT_MODELS_LOAD = '/api/v1/models/load';\nconst ENDPOINT_MODELS_LIST = '/v1/models';\nconst ENDPOINT_MODELS_NATIVE = '/api/v1/models';\nconst ENDPOINT_EMBEDDINGS = '/v1/embeddings';\n\n/** Shape of a loaded instance from the LM Studio native models API.\n * Config fields vary by engine — llama.cpp models include flash_attention\n * and offload_kv_cache_to_gpu, but other engines (MLX, etc.) may omit them. */\ninterface NativeLoadedInstance {\n id: string;\n config: {\n context_length: number;\n flash_attention?: boolean;\n offload_kv_cache_to_gpu?: boolean;\n };\n}\n\n/** Shape of a model entry from the LM Studio native models API. */\ninterface NativeModelEntry {\n type: string;\n key: string;\n loaded_instances: NativeLoadedInstance[];\n}\n\nexport class LmStudioBackend implements LlmProvider, EmbeddingProvider {\n static readonly DEFAULT_BASE_URL = 'http://localhost:1234';\n readonly name = 'lm-studio';\n private baseUrl: string;\n private model: string;\n private instanceId: string | null = null;\n private contextWindow: number | undefined;\n private defaultMaxTokens: number;\n /** Set after a model rejects the `reasoning` param — skip it on future calls. */\n private reasoningUnsupported = false;\n\n constructor(config?: LmStudioConfig) {\n this.baseUrl = config?.base_url ?? LmStudioBackend.DEFAULT_BASE_URL;\n this.model = config?.model ?? config?.summary_model ?? 'llama3.2';\n this.contextWindow = config?.context_window;\n this.defaultMaxTokens = config?.max_tokens ?? 1024;\n }\n\n /**\n * Generate text using LM Studio's native REST API (/api/v1/chat).\n * Routes to our specific instance by ID when available, with model name +\n * context_length as fallback. This ensures correct routing when multiple\n * daemons share the same LM Studio, and graceful degradation when our\n * instance is evicted by idle TTL.\n */\n async summarize(prompt: string, opts?: LlmRequestOptions): Promise<LlmResponse> {\n const maxTokens = opts?.maxTokens ?? this.defaultMaxTokens;\n const contextLength = opts?.contextLength ?? this.contextWindow;\n\n const body: Record<string, unknown> = {\n model: this.instanceId ?? this.model,\n input: prompt,\n max_output_tokens: maxTokens,\n store: false,\n };\n\n // Always send context_length — even when routing by instance ID.\n // If our instance was evicted and LM Studio auto-loads, this ensures\n // the replacement gets the correct context window.\n if (contextLength) {\n body.context_length = contextLength;\n }\n\n // System prompt — sent separately from user content\n if (opts?.systemPrompt) {\n body.system_prompt = opts.systemPrompt;\n }\n\n // Reasoning control — 'off' suppresses chain-of-thought for reasoning models.\n // Non-reasoning models reject this param, so we retry without it on failure.\n const sendReasoning = opts?.reasoning && !this.reasoningUnsupported;\n if (sendReasoning) {\n body.reasoning = opts!.reasoning;\n }\n\n let response = await fetch(`${this.baseUrl}${ENDPOINT_CHAT}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(opts?.timeoutMs ?? LLM_REQUEST_TIMEOUT_MS),\n });\n\n if (!response.ok) {\n const errorBody = await response.text().catch(() => '');\n\n // Model doesn't support reasoning — retry without it and remember\n // so subsequent calls skip the param entirely.\n if (sendReasoning && /does not support reasoning|\"param\"\\s*:\\s*\"reasoning\"/.test(errorBody)) {\n this.reasoningUnsupported = true;\n delete body.reasoning;\n response = await fetch(`${this.baseUrl}${ENDPOINT_CHAT}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(opts?.timeoutMs ?? LLM_REQUEST_TIMEOUT_MS),\n });\n if (!response.ok) {\n const retryError = await response.text().catch(() => '');\n throw new Error(`LM Studio summarize failed: ${response.status} ${retryError.slice(0, 500)}`);\n }\n } else {\n // If our instance was evicted, clear the ID so ensureLoaded\n // reloads on the next cycle instead of hitting a stale ID repeatedly\n if (response.status === 404 && this.instanceId) {\n this.instanceId = null;\n }\n throw new Error(`LM Studio summarize failed: ${response.status} ${errorBody.slice(0, 500)}`);\n }\n }\n\n const data = await response.json() as {\n model_instance_id: string;\n output: Array<{ type: string; content: string }>;\n };\n const messageOutput = data.output.find((o) => o.type === 'message');\n const text = messageOutput?.content ?? '';\n return { text, model: data.model_instance_id };\n }\n\n /**\n * Generate embeddings using LM Studio's OpenAI-compatible endpoint.\n * (The native API doesn't have an embedding endpoint — OpenAI-compat is fine here.)\n */\n async embed(text: string): Promise<EmbeddingResponse> {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_EMBEDDINGS}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify({\n model: this.model,\n input: text,\n }),\n signal: AbortSignal.timeout(EMBEDDING_REQUEST_TIMEOUT_MS),\n });\n\n if (!response.ok) {\n throw new Error(`LM Studio embed failed: ${response.status}`);\n }\n\n const data = await response.json() as {\n data: Array<{ embedding: number[] }>;\n model: string;\n };\n const embedding = data.data[0].embedding;\n return { embedding, model: data.model, dimensions: embedding.length };\n }\n\n /**\n * Ensure a model instance is loaded and capture its ID for routing.\n * Called every digest cycle so it recovers from idle TTL eviction.\n *\n * Strategy: reuse ANY loaded instance of this model. Only load a new one\n * when zero instances exist. This avoids the previous bug where strict\n * config matching (context_length, offload_kv_cache_to_gpu) caused new\n * instances to spawn every cycle — exhausting system resources.\n *\n * context_length is set per-request on /api/v1/chat, so we don't need\n * to match it at load time. Load-time-only params like\n * offload_kv_cache_to_gpu are llama.cpp-specific and may not apply to\n * all models (e.g., glm-4.7-flash has no KV cache setting).\n */\n async ensureLoaded(contextLength?: number, gpuKvCache?: boolean): Promise<void> {\n // Query native API for existing loaded instances of this model\n const instances = await this.getLoadedInstances();\n\n if (instances.length > 0) {\n // Reuse the first available instance — don't reject over config differences.\n // context_length is set per-request; load-time params like kv_cache are\n // model-dependent and may not even appear in the instance config.\n this.instanceId = instances[0].id;\n return;\n }\n\n // No instances loaded — load one with our preferred settings.\n // These are hints; LM Studio silently ignores params that don't apply to the model's engine.\n const ctx = contextLength ?? this.contextWindow;\n const body: Record<string, unknown> = {\n model: this.model,\n // llama.cpp-specific — ignored by other engines (MLX, etc.)\n flash_attention: true,\n };\n if (ctx) {\n body.context_length = ctx;\n }\n if (gpuKvCache) {\n body.offload_kv_cache_to_gpu = true;\n }\n\n const response = await fetch(`${this.baseUrl}${ENDPOINT_MODELS_LOAD}`, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify(body),\n signal: AbortSignal.timeout(LLM_REQUEST_TIMEOUT_MS),\n });\n\n if (!response.ok) {\n const errorBody = await response.text().catch(() => '');\n throw new Error(`LM Studio model load failed: ${response.status} ${errorBody.slice(0, 200)}`);\n }\n\n const loadResult = await response.json() as Record<string, unknown>;\n const id = (loadResult.instance_id ?? loadResult.id ?? loadResult.model_instance_id) as string | undefined;\n if (id) {\n this.instanceId = id;\n }\n }\n\n /**\n * Query the LM Studio native API for loaded instances of this model.\n * Returns an empty array if the API is unavailable or the model has no loaded instances.\n */\n private async getLoadedInstances(): Promise<NativeLoadedInstance[]> {\n try {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_MODELS_NATIVE}`, {\n signal: AbortSignal.timeout(DAEMON_CLIENT_TIMEOUT_MS),\n });\n if (!response.ok) return [];\n\n const data = await response.json() as { models: NativeModelEntry[] };\n const entry = data.models.find((m) => m.key === this.model);\n return entry?.loaded_instances ?? [];\n } catch {\n return [];\n }\n }\n\n async isAvailable(): Promise<boolean> {\n try {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_MODELS_LIST}`, {\n signal: AbortSignal.timeout(DAEMON_CLIENT_TIMEOUT_MS),\n });\n return response.ok;\n } catch {\n return false;\n }\n }\n\n /** List available models on this LM Studio instance. */\n async listModels(timeoutMs?: number): Promise<string[]> {\n try {\n const response = await fetch(`${this.baseUrl}${ENDPOINT_MODELS_LIST}`, {\n signal: AbortSignal.timeout(timeoutMs ?? DAEMON_CLIENT_TIMEOUT_MS),\n });\n const data = await response.json() as { data: Array<{ id: string }> };\n return data.data.map((m) => m.id);\n } catch {\n return [];\n }\n }\n}\n"],"mappings":";;;;;;;;AAcA,IAAM,oBAAoB;AAC1B,IAAM,iBAAiB;AACvB,IAAM,gBAAgB;AAEf,IAAM,gBAAN,MAAM,eAAwD;AAAA,EACnE,OAAgB,mBAAmB;AAAA,EAC1B,OAAO;AAAA,EACR;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAER,YAAY,QAAuB;AACjC,SAAK,UAAU,QAAQ,YAAY,eAAc;AACjD,SAAK,QAAQ,QAAQ,SAAS,QAAQ,iBAAiB;AACvD,SAAK,mBAAmB,QAAQ,cAAc;AAC9C,SAAK,gBAAgB,QAAQ;AAAA,EAC/B;AAAA,EAEA,MAAM,UAAU,QAAgB,MAAgD;AAC9E,UAAM,YAAY,MAAM,aAAa,KAAK;AAM1C,UAAM,gBAAgB,MAAM,iBAAiB,KAAK;AAClD,UAAM,UAAmC,EAAE,aAAa,UAAU;AAClE,QAAI,eAAe;AACjB,cAAQ,UAAU;AAAA,IACpB;AAEA,UAAM,OAAgC;AAAA,MACpC,OAAO,KAAK;AAAA,MACZ;AAAA,MACA,QAAQ;AAAA,MACR;AAAA,IACF;AAGA,QAAI,MAAM,cAAc;AACtB,WAAK,SAAS,KAAK;AAAA,IACrB;AAGA,QAAI,MAAM,WAAW;AACnB,WAAK,QAAQ,KAAK,cAAc,QAAQ,QAAQ,KAAK;AAAA,IACvD;AAGA,QAAI,MAAM,WAAW;AACnB,WAAK,aAAa,KAAK;AAAA,IACzB;AAEA,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,iBAAiB,IAAI;AAAA,MAClE,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB,QAAQ,YAAY,QAAQ,MAAM,aAAa,sBAAsB;AAAA,IACvE,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACtD,YAAM,IAAI,MAAM,4BAA4B,SAAS,MAAM,IAAI,UAAU,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IAC1F;AAEA,WAAO,KAAK,WAAW,QAAQ;AAAA,EACjC;AAAA;AAAA,EAGA,MAAc,WAAW,UAA0C;AACjE,UAAM,SAAS,SAAS,KAAM,UAAU;AACxC,UAAM,UAAU,IAAI,YAAY;AAChC,QAAI,OAAO;AACX,QAAI,QAAQ,KAAK;AACjB,QAAI,SAAS;AAEb,QAAI;AACF,iBAAS;AACP,cAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,YAAI,KAAM;AAEV,kBAAU,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AAChD,cAAM,QAAQ,OAAO,MAAM,IAAI;AAC/B,iBAAS,MAAM,IAAI,KAAK;AAExB,mBAAW,QAAQ,OAAO;AACxB,cAAI,CAAC,KAAK,KAAK,EAAG;AAClB,gBAAM,QAAQ,KAAK,MAAM,IAAI;AAC7B,cAAI,MAAM,MAAO,OAAM,IAAI,MAAM,wBAAwB,MAAM,KAAK,EAAE;AACtE,kBAAQ,MAAM,YAAY;AAC1B,cAAI,MAAM,MAAO,SAAQ,MAAM;AAAA,QACjC;AAAA,MACF;AAGA,UAAI,OAAO,KAAK,GAAG;AACjB,cAAM,QAAQ,KAAK,MAAM,MAAM;AAC/B,YAAI,MAAM,MAAO,OAAM,IAAI,MAAM,wBAAwB,MAAM,KAAK,EAAE;AACtE,gBAAQ,MAAM,YAAY;AAC1B,YAAI,MAAM,MAAO,SAAQ,MAAM;AAAA,MACjC;AAAA,IACF,UAAE;AACA,aAAO,YAAY;AAAA,IACrB;AAEA,WAAO,EAAE,MAAM,MAAM;AAAA,EACvB;AAAA,EAEA,MAAM,MAAM,MAA0C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,cAAc,IAAI;AAAA,MAC/D,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU;AAAA,QACnB,OAAO,KAAK;AAAA,QACZ,OAAO;AAAA,MACT,CAAC;AAAA,MACD,QAAQ,YAAY,QAAQ,4BAA4B;AAAA,IAC1D,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,IAAI,MAAM,wBAAwB,SAAS,MAAM,IAAI,SAAS,UAAU,EAAE;AAAA,IAClF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,UAAM,YAAY,KAAK,WAAW,CAAC;AACnC,WAAO,EAAE,WAAW,OAAO,KAAK,OAAO,YAAY,UAAU,OAAO;AAAA,EACtE;AAAA,EAEA,MAAM,cAAgC;AACpC,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,aAAa,IAAI;AAAA,QAC9D,QAAQ,YAAY,QAAQ,wBAAwB;AAAA,MACtD,CAAC;AACD,aAAO,SAAS;AAAA,IAClB,QAAQ;AACN,aAAO;AAAA,IACT;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,WAAW,WAAuC;AACtD,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,aAAa,IAAI;AAAA,QAC9D,QAAQ,YAAY,QAAQ,aAAa,wBAAwB;AAAA,MACnE,CAAC;AACD,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,aAAO,KAAK,OAAO,IAAI,CAAC,MAAM,EAAE,IAAI;AAAA,IACtC,QAAQ;AACN,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AACF;;;ACxJA,IAAM,gBAAgB;AACtB,IAAM,uBAAuB;AAC7B,IAAM,uBAAuB;AAC7B,IAAM,yBAAyB;AAC/B,IAAM,sBAAsB;AAqBrB,IAAM,kBAAN,MAAM,iBAA0D;AAAA,EACrE,OAAgB,mBAAmB;AAAA,EAC1B,OAAO;AAAA,EACR;AAAA,EACA;AAAA,EACA,aAA4B;AAAA,EAC5B;AAAA,EACA;AAAA;AAAA,EAEA,uBAAuB;AAAA,EAE/B,YAAY,QAAyB;AACnC,SAAK,UAAU,QAAQ,YAAY,iBAAgB;AACnD,SAAK,QAAQ,QAAQ,SAAS,QAAQ,iBAAiB;AACvD,SAAK,gBAAgB,QAAQ;AAC7B,SAAK,mBAAmB,QAAQ,cAAc;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,UAAU,QAAgB,MAAgD;AAC9E,UAAM,YAAY,MAAM,aAAa,KAAK;AAC1C,UAAM,gBAAgB,MAAM,iBAAiB,KAAK;AAElD,UAAM,OAAgC;AAAA,MACpC,OAAO,KAAK,cAAc,KAAK;AAAA,MAC/B,OAAO;AAAA,MACP,mBAAmB;AAAA,MACnB,OAAO;AAAA,IACT;AAKA,QAAI,eAAe;AACjB,WAAK,iBAAiB;AAAA,IACxB;AAGA,QAAI,MAAM,cAAc;AACtB,WAAK,gBAAgB,KAAK;AAAA,IAC5B;AAIA,UAAM,gBAAgB,MAAM,aAAa,CAAC,KAAK;AAC/C,QAAI,eAAe;AACjB,WAAK,YAAY,KAAM;AAAA,IACzB;AAEA,QAAI,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,aAAa,IAAI;AAAA,MAC5D,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB,QAAQ,YAAY,QAAQ,MAAM,aAAa,sBAAsB;AAAA,IACvE,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AAItD,UAAI,iBAAiB,uDAAuD,KAAK,SAAS,GAAG;AAC3F,aAAK,uBAAuB;AAC5B,eAAO,KAAK;AACZ,mBAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,aAAa,IAAI;AAAA,UACxD,QAAQ;AAAA,UACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,UAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,UACzB,QAAQ,YAAY,QAAQ,MAAM,aAAa,sBAAsB;AAAA,QACvE,CAAC;AACD,YAAI,CAAC,SAAS,IAAI;AAChB,gBAAM,aAAa,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACvD,gBAAM,IAAI,MAAM,+BAA+B,SAAS,MAAM,IAAI,WAAW,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,QAC9F;AAAA,MACF,OAAO;AAGL,YAAI,SAAS,WAAW,OAAO,KAAK,YAAY;AAC9C,eAAK,aAAa;AAAA,QACpB;AACA,cAAM,IAAI,MAAM,+BAA+B,SAAS,MAAM,IAAI,UAAU,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,MAC7F;AAAA,IACF;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAIjC,UAAM,gBAAgB,KAAK,OAAO,KAAK,CAAC,MAAM,EAAE,SAAS,SAAS;AAClE,UAAM,OAAO,eAAe,WAAW;AACvC,WAAO,EAAE,MAAM,OAAO,KAAK,kBAAkB;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,MAAM,MAA0C;AACpD,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,mBAAmB,IAAI;AAAA,MACpE,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU;AAAA,QACnB,OAAO,KAAK;AAAA,QACZ,OAAO;AAAA,MACT,CAAC;AAAA,MACD,QAAQ,YAAY,QAAQ,4BAA4B;AAAA,IAC1D,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,IAAI,MAAM,2BAA2B,SAAS,MAAM,EAAE;AAAA,IAC9D;AAEA,UAAM,OAAO,MAAM,SAAS,KAAK;AAIjC,UAAM,YAAY,KAAK,KAAK,CAAC,EAAE;AAC/B,WAAO,EAAE,WAAW,OAAO,KAAK,OAAO,YAAY,UAAU,OAAO;AAAA,EACtE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBA,MAAM,aAAa,eAAwB,YAAqC;AAE9E,UAAM,YAAY,MAAM,KAAK,mBAAmB;AAEhD,QAAI,UAAU,SAAS,GAAG;AAIxB,WAAK,aAAa,UAAU,CAAC,EAAE;AAC/B;AAAA,IACF;AAIA,UAAM,MAAM,iBAAiB,KAAK;AAClC,UAAM,OAAgC;AAAA,MACpC,OAAO,KAAK;AAAA;AAAA,MAEZ,iBAAiB;AAAA,IACnB;AACA,QAAI,KAAK;AACP,WAAK,iBAAiB;AAAA,IACxB;AACA,QAAI,YAAY;AACd,WAAK,0BAA0B;AAAA,IACjC;AAEA,UAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,oBAAoB,IAAI;AAAA,MACrE,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU,IAAI;AAAA,MACzB,QAAQ,YAAY,QAAQ,sBAAsB;AAAA,IACpD,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,YAAY,MAAM,SAAS,KAAK,EAAE,MAAM,MAAM,EAAE;AACtD,YAAM,IAAI,MAAM,gCAAgC,SAAS,MAAM,IAAI,UAAU,MAAM,GAAG,GAAG,CAAC,EAAE;AAAA,IAC9F;AAEA,UAAM,aAAa,MAAM,SAAS,KAAK;AACvC,UAAM,KAAM,WAAW,eAAe,WAAW,MAAM,WAAW;AAClE,QAAI,IAAI;AACN,WAAK,aAAa;AAAA,IACpB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,qBAAsD;AAClE,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,sBAAsB,IAAI;AAAA,QACvE,QAAQ,YAAY,QAAQ,wBAAwB;AAAA,MACtD,CAAC;AACD,UAAI,CAAC,SAAS,GAAI,QAAO,CAAC;AAE1B,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,YAAM,QAAQ,KAAK,OAAO,KAAK,CAAC,MAAM,EAAE,QAAQ,KAAK,KAAK;AAC1D,aAAO,OAAO,oBAAoB,CAAC;AAAA,IACrC,QAAQ;AACN,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA,EAEA,MAAM,cAAgC;AACpC,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,oBAAoB,IAAI;AAAA,QACrE,QAAQ,YAAY,QAAQ,wBAAwB;AAAA,MACtD,CAAC;AACD,aAAO,SAAS;AAAA,IAClB,QAAQ;AACN,aAAO;AAAA,IACT;AAAA,EACF;AAAA;AAAA,EAGA,MAAM,WAAW,WAAuC;AACtD,QAAI;AACF,YAAM,WAAW,MAAM,MAAM,GAAG,KAAK,OAAO,GAAG,oBAAoB,IAAI;AAAA,QACrE,QAAQ,YAAY,QAAQ,aAAa,wBAAwB;AAAA,MACnE,CAAC;AACD,YAAM,OAAO,MAAM,SAAS,KAAK;AACjC,aAAO,KAAK,KAAK,IAAI,CAAC,MAAM,EAAE,EAAE;AAAA,IAClC,QAAQ;AACN,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AACF;","names":[]}
@@ -2,10 +2,10 @@ import { createRequire as __cr } from 'node:module'; const require = __cr(import
2
2
  import {
3
3
  LmStudioBackend,
4
4
  OllamaBackend
5
- } from "./chunk-7WHF2OIZ.js";
5
+ } from "./chunk-4DYD4HHG.js";
6
6
  import {
7
7
  LLM_REQUEST_TIMEOUT_MS
8
- } from "./chunk-J4D4CROB.js";
8
+ } from "./chunk-DYDBF5W6.js";
9
9
 
10
10
  // node_modules/@anthropic-ai/sdk/internal/tslib.mjs
11
11
  function __classPrivateFieldSet(receiver, state, value, kind, f) {
@@ -4913,4 +4913,4 @@ export {
4913
4913
  createLlmProvider,
4914
4914
  createEmbeddingProvider
4915
4915
  };
4916
- //# sourceMappingURL=chunk-NLUE6CYG.js.map
4916
+ //# sourceMappingURL=chunk-54WVLTKD.js.map
@@ -4,7 +4,7 @@ import {
4
4
  } from "./chunk-4RMSHZE4.js";
5
5
  import {
6
6
  isProcessAlive
7
- } from "./chunk-YTFXA4RX.js";
7
+ } from "./chunk-YRIIBPJD.js";
8
8
 
9
9
  // src/services/stats.ts
10
10
  import fs from "fs";
@@ -62,4 +62,4 @@ function gatherStats(vaultDir, index, vectorIndex) {
62
62
  export {
63
63
  gatherStats
64
64
  };
65
- //# sourceMappingURL=chunk-NL6WQO56.js.map
65
+ //# sourceMappingURL=chunk-5LMRZDH3.js.map
@@ -1,7 +1,7 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  STDIN_TIMEOUT_MS
4
- } from "./chunk-J4D4CROB.js";
4
+ } from "./chunk-DYDBF5W6.js";
5
5
 
6
6
  // src/hooks/read-stdin.ts
7
7
  function readStdin() {
@@ -18,4 +18,4 @@ function readStdin() {
18
18
  export {
19
19
  readStdin
20
20
  };
21
- //# sourceMappingURL=chunk-O6PERU7U.js.map
21
+ //# sourceMappingURL=chunk-AHZN4Z34.js.map
@@ -11,6 +11,8 @@ var CONTENT_SNIPPET_CHARS = 120;
11
11
  var TOOL_OUTPUT_PREVIEW_CHARS = 200;
12
12
  var SESSION_SUMMARY_PREVIEW_CHARS = 300;
13
13
  var RECALL_SUMMARY_PREVIEW_CHARS = 200;
14
+ var LOG_PROMPT_PREVIEW_CHARS = 50;
15
+ var LOG_MESSAGE_PREVIEW_CHARS = 80;
14
16
  var CONTEXT_PLAN_PREVIEW_CHARS = 100;
15
17
  var CONTEXT_SESSION_PREVIEW_CHARS = 80;
16
18
  var CONTEXT_SPORE_PREVIEW_CHARS = 80;
@@ -91,6 +93,8 @@ export {
91
93
  TOOL_OUTPUT_PREVIEW_CHARS,
92
94
  SESSION_SUMMARY_PREVIEW_CHARS,
93
95
  RECALL_SUMMARY_PREVIEW_CHARS,
96
+ LOG_PROMPT_PREVIEW_CHARS,
97
+ LOG_MESSAGE_PREVIEW_CHARS,
94
98
  CONTEXT_PLAN_PREVIEW_CHARS,
95
99
  CONTEXT_SESSION_PREVIEW_CHARS,
96
100
  CONTEXT_SPORE_PREVIEW_CHARS,
@@ -140,4 +144,4 @@ export {
140
144
  CONSOLIDATION_VECTOR_FETCH_LIMIT,
141
145
  CONSOLIDATION_MAX_TOKENS
142
146
  };
143
- //# sourceMappingURL=chunk-J4D4CROB.js.map
147
+ //# sourceMappingURL=chunk-DYDBF5W6.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/constants.ts"],"sourcesContent":["/**\n * Shared constants for the Myco codebase.\n * Per CLAUDE.md: \"No Magic Literals — Numeric and string constants\n * MUST NOT appear inline in logic.\"\n */\n\n// --- Token estimation ---\n/** Approximate characters per token for the chars/4 heuristic. */\nexport const CHARS_PER_TOKEN = 4;\n\n/** Estimate token count from character length using the CHARS_PER_TOKEN heuristic. */\nexport function estimateTokens(text: string): number {\n return Math.ceil(text.length / CHARS_PER_TOKEN);\n}\n\n// --- Embedding ---\n/** Max characters of text sent to the embedding model. */\nexport const EMBEDDING_INPUT_LIMIT = 8000;\n\n// --- Truncation limits (display/preview) ---\n/** Max chars for a user prompt preview in event summaries. */\nexport const PROMPT_PREVIEW_CHARS = 300;\n/** Max chars for an AI response preview in event summaries. */\nexport const AI_RESPONSE_PREVIEW_CHARS = 500;\n/** Max chars for a command string preview. */\nexport const COMMAND_PREVIEW_CHARS = 80;\n/** Max chars for a content snippet in search results. */\nexport const CONTENT_SNIPPET_CHARS = 120;\n/** Max chars for a tool output preview in hooks. */\nexport const TOOL_OUTPUT_PREVIEW_CHARS = 200;\n/** Max chars for a session summary preview in MCP tools. */\nexport const SESSION_SUMMARY_PREVIEW_CHARS = 300;\n/** Max chars for a recall summary preview. */\nexport const RECALL_SUMMARY_PREVIEW_CHARS = 200;\n\n// --- Log preview limits (short previews for structured log fields) ---\n/** Max chars for a user prompt preview in log entries. */\nexport const LOG_PROMPT_PREVIEW_CHARS = 50;\n/** Max chars for an assistant message preview in log entries. */\nexport const LOG_MESSAGE_PREVIEW_CHARS = 80;\n\n// --- Context injection layer budgets (chars, not tokens — used with .slice()) ---\nexport const CONTEXT_PLAN_PREVIEW_CHARS = 100;\nexport const CONTEXT_SESSION_PREVIEW_CHARS = 80;\nexport const CONTEXT_SPORE_PREVIEW_CHARS = 80;\n\n// --- Processor maxTokens budgets ---\n/** Response token budget for observation extraction. */\nexport const EXTRACTION_MAX_TOKENS = 2048;\n/** Response token budget for session summary. */\nexport const SUMMARY_MAX_TOKENS = 512;\n/** Response token budget for session title generation. */\nexport const TITLE_MAX_TOKENS = 32;\n/** Response token budget for artifact classification. */\nexport const CLASSIFICATION_MAX_TOKENS = 1024;\n\n// --- Timeouts ---\n/** Daemon client HTTP request timeout (ms). */\nexport const DAEMON_CLIENT_TIMEOUT_MS = 2000;\n/** Health check timeout (ms) — fail fast if daemon isn't responding. */\nexport const DAEMON_HEALTH_CHECK_TIMEOUT_MS = 500;\n/** LLM request timeout (ms). All LLM calls are background daemon work — no need to be aggressive. */\nexport const LLM_REQUEST_TIMEOUT_MS = 180_000;\n/** Embedding request timeout (ms). Embeddings run in background batch processing — generous timeout. */\nexport const EMBEDDING_REQUEST_TIMEOUT_MS = 60_000;\n/** Digest LLM request timeout (ms). Digest cycles use large context windows and may need model loading time. */\nexport const DIGEST_LLM_REQUEST_TIMEOUT_MS = 600_000;\n/** Stdin read timeout for hooks (ms). */\nexport const STDIN_TIMEOUT_MS = 100;\n/** Chokidar write stability threshold (ms). */\nexport const FILE_WATCH_STABILITY_MS = 1000;\n/** Provider detection timeout for detect-providers CLI command (ms). */\nexport const PROVIDER_DETECT_TIMEOUT_MS = 3000;\n\n// --- Time ---\n/** Milliseconds in one day. */\nexport const MS_PER_DAY = 24 * 60 * 60 * 1000;\n\n// --- Buffer cleanup ---\n/** Max age for stale buffer files before cleanup (ms). */\nexport const STALE_BUFFER_MAX_AGE_MS = 1 * MS_PER_DAY;\n\n// --- Retry backoff ---\n/** Retry delays for daemon health check (ms). */\nexport const DAEMON_HEALTH_RETRY_DELAYS = [100, 200, 400, 800, 1500];\n\n/** Grace period after daemon.json is written before stale checks can trigger a restart (ms).\n * Prevents rapid restart loops from concurrent hooks or session reloads. */\nexport const DAEMON_STALE_GRACE_PERIOD_MS = 60_000;\n\n/** Grace period for SIGTERM before escalating to SIGKILL (ms).\n * Gives the old daemon a chance to shut down cleanly, but force-kills\n * to guarantee the configured port is reclaimed. */\nexport const DAEMON_EVICT_TIMEOUT_MS = 3000;\n/** Poll interval when waiting for an evicted daemon to die (ms). */\nexport const DAEMON_EVICT_POLL_MS = 100;\n\n// --- Slug limits ---\n/** Max length for slugified artifact IDs. */\nexport const MAX_SLUG_LENGTH = 100;\n\n// --- Content preview for classification prompt ---\n/** Max chars of file content per candidate in classification prompt. */\nexport const CANDIDATE_CONTENT_PREVIEW = 2000;\n\n// --- Turn rendering ---\n/** Max file paths displayed per turn in session notes. */\nexport const TURN_MAX_FILES_DISPLAYED = 10;\n\n// --- Transcript mining ---\n/** Minimum content length to consider a transcript entry meaningful. */\nexport const MIN_TRANSCRIPT_CONTENT_LENGTH = 10;\n\n// --- Query limits ---\n/** Max recent sessions to check for lineage heuristics. */\nexport const LINEAGE_RECENT_SESSIONS_LIMIT = 5;\n/** Max related spores to query for session notes. */\nexport const RELATED_SPORES_LIMIT = 50;\n\n// --- Context injection ---\n/** Max active plans to inject at session start. */\nexport const SESSION_CONTEXT_MAX_PLANS = 3;\n/** Max spores to inject per prompt. */\nexport const PROMPT_CONTEXT_MAX_SPORES = 3;\n/** Minimum similarity score for prompt context injection (0-1). */\nexport const PROMPT_CONTEXT_MIN_SIMILARITY = 0.3;\n/** Max token budget for session-start context injection. */\nexport const SESSION_CONTEXT_MAX_TOKENS = 500;\n/** Max token budget for per-prompt context injection. */\nexport const PROMPT_CONTEXT_MAX_TOKENS = 300;\n/** Minimum prompt length to trigger context search. */\nexport const PROMPT_CONTEXT_MIN_LENGTH = 10;\n\n// --- MCP tool defaults ---\n/** Default result limit for myco_search. */\nexport const MCP_SEARCH_DEFAULT_LIMIT = 10;\n/** Default result limit for myco_sessions. */\nexport const MCP_SESSIONS_DEFAULT_LIMIT = 20;\n/** Default result limit for myco_logs. */\nexport const MCP_LOGS_DEFAULT_LIMIT = 50;\n\n// --- Digest — Tiers ---\n/** Available token-budget tiers for digest synthesis. */\nexport const DIGEST_TIERS = [1500, 3000, 5000, 7500, 10000] as const;\nexport type DigestTier = (typeof DIGEST_TIERS)[number];\n\n// --- Digest — Context window minimums per tier ---\n/** Minimum context window (tokens) required to run a digest at a given tier. */\nexport const DIGEST_TIER_MIN_CONTEXT: Record<number, number> = {\n 1500: 6500,\n 3000: 11500,\n 5000: 18500,\n 7500: 24500,\n 10000: 30500,\n};\n\n// --- Digest — Substrate ---\n/** Default minimum substrate notes required before a digest cycle runs. */\nexport const DIGEST_MIN_NOTES_FOR_CYCLE = 10;\n\n/** Scoring weights by note type when selecting substrate for synthesis. */\nexport const DIGEST_SUBSTRATE_TYPE_WEIGHTS: Record<string, number> = {\n session: 3,\n spore: 3,\n plan: 2,\n artifact: 1,\n team: 1,\n};\n\n// --- LLM reasoning control ---\n/** Reasoning mode for all Myco LLM calls. Suppresses chain-of-thought tokens from reasoning models. */\nexport const LLM_REASONING_MODE = 'off' as const;\n\n// --- Digest — System prompt overhead estimate ---\n\n// --- Vault curation ---\n/** Max candidate spores after post-filtering for supersession check. */\nexport const SUPERSESSION_CANDIDATE_LIMIT = 5;\n\n/** Over-fetch from vector index before post-filtering by status/type. */\nexport const SUPERSESSION_VECTOR_FETCH_LIMIT = 20;\n\n/** Max output tokens for supersession LLM evaluation. */\nexport const SUPERSESSION_MAX_TOKENS = 256;\n\n/** Similarity threshold for clustering related spores in batch curation. */\nexport const CURATION_CLUSTER_SIMILARITY = 0.75;\n\n// --- Pipeline processing ---\n/** Default page size for pipeline items API listing. */\nexport const PIPELINE_ITEMS_DEFAULT_LIMIT = 50;\n\n// --- Pipeline retry ---\n/** Max retries for parse (structural) pipeline failures — fail fast. */\nexport const PIPELINE_PARSE_MAX_RETRIES = 1;\n/** Exponential backoff multiplier for successive pipeline retries. */\nexport const PIPELINE_BACKOFF_MULTIPLIER = 4;\n\n// --- Pipeline stages (ordered) ---\nexport const PIPELINE_STAGES = ['capture', 'extraction', 'embedding', 'consolidation', 'digest'] as const;\nexport type PipelineStage = typeof PIPELINE_STAGES[number];\n\n// --- Pipeline statuses ---\nexport const PIPELINE_STATUSES = ['pending', 'processing', 'succeeded', 'failed', 'blocked', 'skipped', 'poisoned'] as const;\nexport type PipelineStatus = typeof PIPELINE_STATUSES[number];\n\n// --- Provider roles for circuit breakers ---\nexport const PIPELINE_PROVIDER_ROLES = ['llm', 'embedding', 'digest-llm'] as const;\nexport type PipelineProviderRole = typeof PIPELINE_PROVIDER_ROLES[number];\n\n// --- Stage to provider role mapping ---\nexport const STAGE_PROVIDER_MAP: Record<PipelineStage, PipelineProviderRole | null> = {\n capture: null,\n extraction: 'llm',\n embedding: 'embedding',\n consolidation: 'digest-llm',\n digest: 'digest-llm',\n};\n\n/**\n * Stages processed by the pipeline tick timer.\n * Capture is handled at registration time, digest is gated by the metabolism timer.\n */\nexport const PIPELINE_TICK_STAGES: PipelineStage[] = ['extraction', 'embedding', 'consolidation'];\n\n// --- Item type to applicable stages ---\n// Sessions skip consolidation — consolidation applies to the spores\n// extracted FROM sessions, not the session work item itself.\n// Lineage detection stays outside the pipeline (fire-and-forget, non-critical).\nexport const ITEM_STAGE_MAP: Record<string, PipelineStage[]> = {\n session: ['capture', 'extraction', 'embedding', 'digest'],\n spore: ['capture', 'embedding', 'consolidation', 'digest'],\n artifact: ['capture', 'embedding', 'digest'],\n};\n\n// --- Automatic consolidation ---\n/** Minimum cluster size required before asking LLM to consolidate. */\nexport const CONSOLIDATION_MIN_CLUSTER_SIZE = 3;\n\n/** Over-fetch from vector index before post-filtering by status/type. */\nexport const CONSOLIDATION_VECTOR_FETCH_LIMIT = 20;\n\n/** Max output tokens for consolidation LLM synthesis.\n * Must be large enough for the full JSON response including content field. */\nexport const CONSOLIDATION_MAX_TOKENS = 2048;\n"],"mappings":";;;AAQO,IAAM,kBAAkB;AAGxB,SAAS,eAAe,MAAsB;AACnD,SAAO,KAAK,KAAK,KAAK,SAAS,eAAe;AAChD;AAIO,IAAM,wBAAwB;AAI9B,IAAM,uBAAuB;AAM7B,IAAM,wBAAwB;AAE9B,IAAM,4BAA4B;AAElC,IAAM,gCAAgC;AAEtC,IAAM,+BAA+B;AAIrC,IAAM,2BAA2B;AAEjC,IAAM,4BAA4B;AAGlC,IAAM,6BAA6B;AACnC,IAAM,gCAAgC;AACtC,IAAM,8BAA8B;AAcpC,IAAM,2BAA2B;AAEjC,IAAM,iCAAiC;AAEvC,IAAM,yBAAyB;AAE/B,IAAM,+BAA+B;AAErC,IAAM,gCAAgC;AAEtC,IAAM,mBAAmB;AAEzB,IAAM,0BAA0B;AAEhC,IAAM,6BAA6B;AAInC,IAAM,aAAa,KAAK,KAAK,KAAK;AAIlC,IAAM,0BAA0B,IAAI;AAIpC,IAAM,6BAA6B,CAAC,KAAK,KAAK,KAAK,KAAK,IAAI;AAI5D,IAAM,+BAA+B;AAKrC,IAAM,0BAA0B;AAEhC,IAAM,uBAAuB;AAI7B,IAAM,kBAAkB;AAIxB,IAAM,4BAA4B;AAIlC,IAAM,2BAA2B;AAQjC,IAAM,gCAAgC;AAEtC,IAAM,uBAAuB;AAI7B,IAAM,4BAA4B;AAElC,IAAM,4BAA4B;AAElC,IAAM,gCAAgC;AAMtC,IAAM,4BAA4B;AAIlC,IAAM,2BAA2B;AAEjC,IAAM,6BAA6B;AAEnC,IAAM,yBAAyB;AAI/B,IAAM,eAAe,CAAC,MAAM,KAAM,KAAM,MAAM,GAAK;AAKnD,IAAM,0BAAkD;AAAA,EAC7D,MAAM;AAAA,EACN,KAAM;AAAA,EACN,KAAM;AAAA,EACN,MAAM;AAAA,EACN,KAAO;AACT;AAOO,IAAM,gCAAwD;AAAA,EACnE,SAAS;AAAA,EACT,OAAO;AAAA,EACP,MAAM;AAAA,EACN,UAAU;AAAA,EACV,MAAM;AACR;AAIO,IAAM,qBAAqB;AAM3B,IAAM,+BAA+B;AAGrC,IAAM,kCAAkC;AAGxC,IAAM,0BAA0B;AAGhC,IAAM,8BAA8B;AAIpC,IAAM,+BAA+B;AAIrC,IAAM,6BAA6B;AAEnC,IAAM,8BAA8B;AAGpC,IAAM,kBAAkB,CAAC,WAAW,cAAc,aAAa,iBAAiB,QAAQ;AAQxF,IAAM,0BAA0B,CAAC,OAAO,aAAa,YAAY;AAIjE,IAAM,qBAAyE;AAAA,EACpF,SAAS;AAAA,EACT,YAAY;AAAA,EACZ,WAAW;AAAA,EACX,eAAe;AAAA,EACf,QAAQ;AACV;AAMO,IAAM,uBAAwC,CAAC,cAAc,aAAa,eAAe;AAMzF,IAAM,iBAAkD;AAAA,EAC7D,SAAS,CAAC,WAAW,cAAc,aAAa,QAAQ;AAAA,EACxD,OAAO,CAAC,WAAW,aAAa,iBAAiB,QAAQ;AAAA,EACzD,UAAU,CAAC,WAAW,aAAa,QAAQ;AAC7C;AAIO,IAAM,iCAAiC;AAGvC,IAAM,mCAAmC;AAIzC,IAAM,2BAA2B;","names":[]}
@@ -1,7 +1,7 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  PROMPT_PREVIEW_CHARS
4
- } from "./chunk-J4D4CROB.js";
4
+ } from "./chunk-DYDBF5W6.js";
5
5
 
6
6
  // src/agents/adapter.ts
7
7
  import fs from "fs";
@@ -351,4 +351,4 @@ export {
351
351
  claudeCodeAdapter,
352
352
  AgentRegistry
353
353
  };
354
- //# sourceMappingURL=chunk-Z74SDEKE.js.map
354
+ //# sourceMappingURL=chunk-F7GAYVWF.js.map
@@ -1,7 +1,7 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  LEVEL_ORDER
4
- } from "./chunk-QLUE3BUL.js";
4
+ } from "./chunk-O6TBHGVO.js";
5
5
 
6
6
  // src/logs/reader.ts
7
7
  import fs from "fs";
@@ -88,4 +88,4 @@ export {
88
88
  queryLogs,
89
89
  matchesFilter
90
90
  };
91
- //# sourceMappingURL=chunk-H7PRCVGQ.js.map
91
+ //# sourceMappingURL=chunk-F7PGDD2X.js.map
@@ -3,7 +3,7 @@ import {
3
3
  VaultWriter,
4
4
  indexNote,
5
5
  supersedeSpore
6
- } from "./chunk-UP4P4OAA.js";
6
+ } from "./chunk-V6BJVYNH.js";
7
7
  import {
8
8
  generateEmbedding
9
9
  } from "./chunk-RGVBGTD6.js";
@@ -13,7 +13,7 @@ import {
13
13
  import {
14
14
  DIGEST_TIERS,
15
15
  EMBEDDING_INPUT_LIMIT
16
- } from "./chunk-J4D4CROB.js";
16
+ } from "./chunk-DYDBF5W6.js";
17
17
 
18
18
  // src/mcp/tools/context.ts
19
19
  import fs from "fs";
@@ -110,4 +110,4 @@ export {
110
110
  consolidateSpores,
111
111
  handleMycoContext
112
112
  };
113
- //# sourceMappingURL=chunk-2ZIBCEYO.js.map
113
+ //# sourceMappingURL=chunk-LEK6DEAE.js.map
@@ -5,7 +5,7 @@ import fs from "fs";
5
5
  import path from "path";
6
6
 
7
7
  // src/daemon/log-buffer.ts
8
- var LOG_RING_BUFFER_CAPACITY = 1e3;
8
+ var LOG_RING_BUFFER_CAPACITY = 5e3;
9
9
  var LOG_QUERY_DEFAULT_LIMIT = 100;
10
10
  var LogRingBuffer = class {
11
11
  buffer;
@@ -49,6 +49,7 @@ var LogRingBuffer = class {
49
49
  const bufIdx = (this.head - this.count + i + this.capacity) % this.capacity;
50
50
  const entry = this.buffer[bufIdx];
51
51
  if (entry && LEVEL_ORDER[entry.level] >= minLevel) {
52
+ if (options?.component && entry.component !== options.component) continue;
52
53
  entries.push(entry);
53
54
  }
54
55
  }
@@ -104,6 +105,12 @@ var DaemonLogger = class {
104
105
  error(component, message, data) {
105
106
  this.write("error", component, message, data);
106
107
  }
108
+ /** Dispatch a log entry by dynamic level string. */
109
+ log(level, component, message, data) {
110
+ if (level in LEVEL_ORDER) {
111
+ this.write(level, component, message, data);
112
+ }
113
+ }
107
114
  close() {
108
115
  if (this.fd !== null) {
109
116
  fs.closeSync(this.fd);
@@ -158,4 +165,4 @@ export {
158
165
  LEVEL_ORDER,
159
166
  DaemonLogger
160
167
  };
161
- //# sourceMappingURL=chunk-QLUE3BUL.js.map
168
+ //# sourceMappingURL=chunk-O6TBHGVO.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/daemon/logger.ts","../src/daemon/log-buffer.ts"],"sourcesContent":["import fs from 'node:fs';\nimport path from 'node:path';\nimport { LogRingBuffer } from './log-buffer.js';\n\nexport interface LogEntry {\n timestamp: string;\n level: string;\n component: string;\n message: string;\n [key: string]: unknown;\n}\n\nexport type LogLevel = 'debug' | 'info' | 'warn' | 'error';\n\nexport const LEVEL_ORDER: Record<LogLevel, number> = {\n debug: 0, info: 1, warn: 2, error: 3,\n};\n\ninterface LoggerOptions {\n level?: LogLevel;\n maxSize?: number;\n maxFiles?: number;\n}\n\nexport class DaemonLogger {\n private logPath: string;\n private fd: number | null = null;\n private currentSize = 0;\n private level: LogLevel;\n private maxSize: number;\n private maxFiles: number;\n private logDir: string;\n private ringBuffer: LogRingBuffer;\n\n constructor(logDir: string, options: LoggerOptions = {}) {\n this.logDir = logDir;\n this.logPath = path.join(logDir, 'daemon.log');\n this.level = options.level ?? 'info';\n this.maxSize = options.maxSize ?? 5_242_880;\n this.maxFiles = options.maxFiles ?? 3;\n this.ringBuffer = new LogRingBuffer();\n\n fs.mkdirSync(logDir, { recursive: true });\n this.fd = fs.openSync(this.logPath, 'a');\n try {\n this.currentSize = fs.fstatSync(this.fd).size;\n } catch {\n this.currentSize = 0;\n }\n }\n\n debug(component: string, message: string, data?: Record<string, unknown>): void {\n this.write('debug', component, message, data);\n }\n\n info(component: string, message: string, data?: Record<string, unknown>): void {\n this.write('info', component, message, data);\n }\n\n warn(component: string, message: string, data?: Record<string, unknown>): void {\n this.write('warn', component, message, data);\n }\n\n error(component: string, message: string, data?: Record<string, unknown>): void {\n this.write('error', component, message, data);\n }\n\n /** Dispatch a log entry by dynamic level string. */\n log(level: string, component: string, message: string, data?: Record<string, unknown>): void {\n if (level in LEVEL_ORDER) {\n this.write(level as LogLevel, component, message, data);\n }\n }\n\n close(): void {\n if (this.fd !== null) {\n fs.closeSync(this.fd);\n this.fd = null;\n }\n }\n\n getRingBuffer(): LogRingBuffer {\n return this.ringBuffer;\n }\n\n private write(level: LogLevel, component: string, message: string, data?: Record<string, unknown>): void {\n if (LEVEL_ORDER[level] < LEVEL_ORDER[this.level]) return;\n\n const entry: LogEntry = {\n timestamp: new Date().toISOString(),\n level,\n component,\n message,\n ...data,\n };\n\n this.ringBuffer.push(entry);\n\n const line = JSON.stringify(entry) + '\\n';\n const bytes = Buffer.byteLength(line);\n\n if (this.currentSize + bytes > this.maxSize) {\n this.rotate();\n }\n\n if (this.fd !== null) {\n fs.writeSync(this.fd, line);\n this.currentSize += bytes;\n }\n }\n\n private rotate(): void {\n this.close();\n\n for (let i = this.maxFiles - 1; i >= 1; i--) {\n const from = path.join(this.logDir, `daemon.${i}.log`);\n const to = path.join(this.logDir, `daemon.${i + 1}.log`);\n if (fs.existsSync(from)) {\n if (i + 1 > this.maxFiles) {\n fs.unlinkSync(from);\n } else {\n fs.renameSync(from, to);\n }\n }\n }\n\n if (fs.existsSync(this.logPath)) {\n fs.renameSync(this.logPath, path.join(this.logDir, 'daemon.1.log'));\n }\n\n this.fd = fs.openSync(this.logPath, 'a');\n this.currentSize = 0;\n }\n}\n","import type { LogEntry, LogLevel } from './logger.js';\nimport { LEVEL_ORDER } from './logger.js';\n\nconst LOG_RING_BUFFER_CAPACITY = 5000;\nconst LOG_QUERY_DEFAULT_LIMIT = 100;\n\ninterface LogQueryResult {\n entries: LogEntry[];\n cursor: string;\n cursor_reset?: boolean;\n}\n\ninterface LogQueryOptions {\n level?: LogLevel;\n component?: string;\n limit?: number;\n}\n\nexport class LogRingBuffer {\n private buffer: LogEntry[];\n private head = 0;\n private count = 0;\n private sequence = 0;\n private startSequence = 0;\n private readonly capacity: number;\n\n constructor(capacity = LOG_RING_BUFFER_CAPACITY) {\n this.capacity = capacity;\n this.buffer = new Array(capacity);\n }\n\n push(entry: LogEntry): void {\n this.buffer[this.head] = entry;\n this.head = (this.head + 1) % this.capacity;\n if (this.count < this.capacity) {\n this.count++;\n } else {\n this.startSequence++;\n }\n this.sequence++;\n }\n\n since(cursor: string | null, options?: LogQueryOptions): LogQueryResult {\n const limit = options?.limit ?? LOG_QUERY_DEFAULT_LIMIT;\n const minLevel = options?.level ? LEVEL_ORDER[options.level] : 0;\n\n let startIdx = 0;\n let cursorReset = false;\n\n if (cursor !== null) {\n const seq = parseInt(cursor, 10);\n if (isNaN(seq) || seq < this.startSequence) {\n cursorReset = true;\n startIdx = 0;\n } else {\n startIdx = seq - this.startSequence;\n }\n } else {\n // No cursor: return last `limit` entries\n startIdx = Math.max(0, this.count - limit);\n }\n\n const entries: LogEntry[] = [];\n for (let i = startIdx; i < this.count && entries.length < limit; i++) {\n const bufIdx = (this.head - this.count + i + this.capacity) % this.capacity;\n const entry = this.buffer[bufIdx];\n if (entry && LEVEL_ORDER[entry.level as LogLevel] >= minLevel) {\n if (options?.component && entry.component !== options.component) continue;\n entries.push(entry);\n }\n }\n\n const result: LogQueryResult = {\n entries,\n cursor: String(this.sequence),\n };\n if (cursorReset) result.cursor_reset = true;\n return result;\n }\n}\n"],"mappings":";;;AAAA,OAAO,QAAQ;AACf,OAAO,UAAU;;;ACEjB,IAAM,2BAA2B;AACjC,IAAM,0BAA0B;AAczB,IAAM,gBAAN,MAAoB;AAAA,EACjB;AAAA,EACA,OAAO;AAAA,EACP,QAAQ;AAAA,EACR,WAAW;AAAA,EACX,gBAAgB;AAAA,EACP;AAAA,EAEjB,YAAY,WAAW,0BAA0B;AAC/C,SAAK,WAAW;AAChB,SAAK,SAAS,IAAI,MAAM,QAAQ;AAAA,EAClC;AAAA,EAEA,KAAK,OAAuB;AAC1B,SAAK,OAAO,KAAK,IAAI,IAAI;AACzB,SAAK,QAAQ,KAAK,OAAO,KAAK,KAAK;AACnC,QAAI,KAAK,QAAQ,KAAK,UAAU;AAC9B,WAAK;AAAA,IACP,OAAO;AACL,WAAK;AAAA,IACP;AACA,SAAK;AAAA,EACP;AAAA,EAEA,MAAM,QAAuB,SAA2C;AACtE,UAAM,QAAQ,SAAS,SAAS;AAChC,UAAM,WAAW,SAAS,QAAQ,YAAY,QAAQ,KAAK,IAAI;AAE/D,QAAI,WAAW;AACf,QAAI,cAAc;AAElB,QAAI,WAAW,MAAM;AACnB,YAAM,MAAM,SAAS,QAAQ,EAAE;AAC/B,UAAI,MAAM,GAAG,KAAK,MAAM,KAAK,eAAe;AAC1C,sBAAc;AACd,mBAAW;AAAA,MACb,OAAO;AACL,mBAAW,MAAM,KAAK;AAAA,MACxB;AAAA,IACF,OAAO;AAEL,iBAAW,KAAK,IAAI,GAAG,KAAK,QAAQ,KAAK;AAAA,IAC3C;AAEA,UAAM,UAAsB,CAAC;AAC7B,aAAS,IAAI,UAAU,IAAI,KAAK,SAAS,QAAQ,SAAS,OAAO,KAAK;AACpE,YAAM,UAAU,KAAK,OAAO,KAAK,QAAQ,IAAI,KAAK,YAAY,KAAK;AACnE,YAAM,QAAQ,KAAK,OAAO,MAAM;AAChC,UAAI,SAAS,YAAY,MAAM,KAAiB,KAAK,UAAU;AAC7D,YAAI,SAAS,aAAa,MAAM,cAAc,QAAQ,UAAW;AACjE,gBAAQ,KAAK,KAAK;AAAA,MACpB;AAAA,IACF;AAEA,UAAM,SAAyB;AAAA,MAC7B;AAAA,MACA,QAAQ,OAAO,KAAK,QAAQ;AAAA,IAC9B;AACA,QAAI,YAAa,QAAO,eAAe;AACvC,WAAO;AAAA,EACT;AACF;;;ADjEO,IAAM,cAAwC;AAAA,EACnD,OAAO;AAAA,EAAG,MAAM;AAAA,EAAG,MAAM;AAAA,EAAG,OAAO;AACrC;AAQO,IAAM,eAAN,MAAmB;AAAA,EAChB;AAAA,EACA,KAAoB;AAAA,EACpB,cAAc;AAAA,EACd;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAER,YAAY,QAAgB,UAAyB,CAAC,GAAG;AACvD,SAAK,SAAS;AACd,SAAK,UAAU,KAAK,KAAK,QAAQ,YAAY;AAC7C,SAAK,QAAQ,QAAQ,SAAS;AAC9B,SAAK,UAAU,QAAQ,WAAW;AAClC,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,aAAa,IAAI,cAAc;AAEpC,OAAG,UAAU,QAAQ,EAAE,WAAW,KAAK,CAAC;AACxC,SAAK,KAAK,GAAG,SAAS,KAAK,SAAS,GAAG;AACvC,QAAI;AACF,WAAK,cAAc,GAAG,UAAU,KAAK,EAAE,EAAE;AAAA,IAC3C,QAAQ;AACN,WAAK,cAAc;AAAA,IACrB;AAAA,EACF;AAAA,EAEA,MAAM,WAAmB,SAAiB,MAAsC;AAC9E,SAAK,MAAM,SAAS,WAAW,SAAS,IAAI;AAAA,EAC9C;AAAA,EAEA,KAAK,WAAmB,SAAiB,MAAsC;AAC7E,SAAK,MAAM,QAAQ,WAAW,SAAS,IAAI;AAAA,EAC7C;AAAA,EAEA,KAAK,WAAmB,SAAiB,MAAsC;AAC7E,SAAK,MAAM,QAAQ,WAAW,SAAS,IAAI;AAAA,EAC7C;AAAA,EAEA,MAAM,WAAmB,SAAiB,MAAsC;AAC9E,SAAK,MAAM,SAAS,WAAW,SAAS,IAAI;AAAA,EAC9C;AAAA;AAAA,EAGA,IAAI,OAAe,WAAmB,SAAiB,MAAsC;AAC3F,QAAI,SAAS,aAAa;AACxB,WAAK,MAAM,OAAmB,WAAW,SAAS,IAAI;AAAA,IACxD;AAAA,EACF;AAAA,EAEA,QAAc;AACZ,QAAI,KAAK,OAAO,MAAM;AACpB,SAAG,UAAU,KAAK,EAAE;AACpB,WAAK,KAAK;AAAA,IACZ;AAAA,EACF;AAAA,EAEA,gBAA+B;AAC7B,WAAO,KAAK;AAAA,EACd;AAAA,EAEQ,MAAM,OAAiB,WAAmB,SAAiB,MAAsC;AACvG,QAAI,YAAY,KAAK,IAAI,YAAY,KAAK,KAAK,EAAG;AAElD,UAAM,QAAkB;AAAA,MACtB,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA,GAAG;AAAA,IACL;AAEA,SAAK,WAAW,KAAK,KAAK;AAE1B,UAAM,OAAO,KAAK,UAAU,KAAK,IAAI;AACrC,UAAM,QAAQ,OAAO,WAAW,IAAI;AAEpC,QAAI,KAAK,cAAc,QAAQ,KAAK,SAAS;AAC3C,WAAK,OAAO;AAAA,IACd;AAEA,QAAI,KAAK,OAAO,MAAM;AACpB,SAAG,UAAU,KAAK,IAAI,IAAI;AAC1B,WAAK,eAAe;AAAA,IACtB;AAAA,EACF;AAAA,EAEQ,SAAe;AACrB,SAAK,MAAM;AAEX,aAAS,IAAI,KAAK,WAAW,GAAG,KAAK,GAAG,KAAK;AAC3C,YAAM,OAAO,KAAK,KAAK,KAAK,QAAQ,UAAU,CAAC,MAAM;AACrD,YAAM,KAAK,KAAK,KAAK,KAAK,QAAQ,UAAU,IAAI,CAAC,MAAM;AACvD,UAAI,GAAG,WAAW,IAAI,GAAG;AACvB,YAAI,IAAI,IAAI,KAAK,UAAU;AACzB,aAAG,WAAW,IAAI;AAAA,QACpB,OAAO;AACL,aAAG,WAAW,MAAM,EAAE;AAAA,QACxB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,GAAG,WAAW,KAAK,OAAO,GAAG;AAC/B,SAAG,WAAW,KAAK,SAAS,KAAK,KAAK,KAAK,QAAQ,cAAc,CAAC;AAAA,IACpE;AAEA,SAAK,KAAK,GAAG,SAAS,KAAK,SAAS,GAAG;AACvC,SAAK,cAAc;AAAA,EACrB;AACF;","names":[]}
@@ -1,7 +1,7 @@
1
1
  import { createRequire as __cr } from 'node:module'; const require = __cr(import.meta.url);
2
2
  import {
3
3
  AgentRegistry
4
- } from "./chunk-Z74SDEKE.js";
4
+ } from "./chunk-F7GAYVWF.js";
5
5
 
6
6
  // src/native-deps.ts
7
7
  import { execFileSync } from "child_process";
@@ -53,4 +53,4 @@ function ensureNativeDeps() {
53
53
  export {
54
54
  ensureNativeDeps
55
55
  };
56
- //# sourceMappingURL=chunk-FPRXMJLT.js.map
56
+ //# sourceMappingURL=chunk-OEGZ5YTJ.js.map
@@ -24,7 +24,7 @@ import {
24
24
  stripReasoningTokens,
25
25
  supersedeSpore,
26
26
  supersededIdsSchema
27
- } from "./chunk-UP4P4OAA.js";
27
+ } from "./chunk-V6BJVYNH.js";
28
28
  import {
29
29
  generateEmbedding
30
30
  } from "./chunk-RGVBGTD6.js";
@@ -49,7 +49,7 @@ import {
49
49
  LLM_REASONING_MODE,
50
50
  SUPERSESSION_MAX_TOKENS,
51
51
  estimateTokens
52
- } from "./chunk-J4D4CROB.js";
52
+ } from "./chunk-DYDBF5W6.js";
53
53
  import {
54
54
  __toESM
55
55
  } from "./chunk-PZUWP5VK.js";
@@ -301,6 +301,13 @@ ${body}
301
301
  substrateIndex[key].push(note.id);
302
302
  }
303
303
  }
304
+ this.log("debug", "Substrate breakdown", {
305
+ sessions: substrateIndex.sessions.length,
306
+ spores: substrateIndex.spores.length,
307
+ plans: substrateIndex.plans.length,
308
+ artifacts: substrateIndex.artifacts.length,
309
+ team: substrateIndex.team.length
310
+ });
304
311
  const cycleTimestamp = (/* @__PURE__ */ new Date()).toISOString();
305
312
  const systemPrompt = loadPrompt("digest-system");
306
313
  const allSubstrateIds = substrate.map((note) => note.id);
@@ -1075,4 +1082,4 @@ export {
1075
1082
  updateTitleAndSummary,
1076
1083
  runReprocess
1077
1084
  };
1078
- //# sourceMappingURL=chunk-4XVKZ3WA.js.map
1085
+ //# sourceMappingURL=chunk-TK7A4RX7.js.map