discoclaw 1.1.6 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example.full CHANGED
@@ -397,6 +397,15 @@ DISCOCLAW_DISCORD_ACTIONS_DEFER=1
397
397
  # Comma-separated channel IDs to restrict cold-storage ingestion and retrieval.
398
398
  # When set, only messages from these channels are stored/searched. Empty = all channels.
399
399
  #COLD_STORAGE_CHANNEL_FILTER=
400
+ # HyDE (Hypothetical Document Embedding) — generate a hypothetical answer before
401
+ # embedding, improving semantic search when queries are short or use different
402
+ # vocabulary than stored content. Only the vector leg is affected; FTS5 keyword
403
+ # search still uses the raw query. Enabled by default; set to 0 to disable.
404
+ #DISCOCLAW_COLD_STORAGE_HYDE_ENABLED=1
405
+ # Model used to generate the hypothetical document. Any chat-capable model string
406
+ # accepted by the AI runtime (e.g. "anthropic/claude-sonnet-4-5"). Falls back to a
407
+ # sensible default when unset.
408
+ #COLD_STORAGE_HYDE_MODEL=
400
409
 
401
410
  # ----------------------------------------------------------
402
411
  # Bot identity
package/README.md CHANGED
@@ -77,6 +77,14 @@ Voice is **off by default**. Enable with `DISCOCLAW_VOICE_ENABLED=1` plus API ke
77
77
 
78
78
  Full setup guide: [docs/voice.md](docs/voice.md)
79
79
 
80
+ ## Self-management — the bot maintains itself
81
+
82
+ - **Self-update** — `!update` checks for new npm versions; `!update apply` downloads, installs, and restarts without leaving Discord
83
+ - **Health checks** — `!health`, `!doctor`, `!status` for diagnostics
84
+ - **Secret management** — `!secret` manages `.env` entries from DMs
85
+ - **Model switching** — `!models` swaps AI models per role at runtime
86
+ - **Restart** — `!restart` restarts the service on demand
87
+
80
88
  ## How it works
81
89
 
82
90
  DiscoClaw orchestrates the flow between Discord and AI runtimes (Claude Code by default, with `gemini-api`, OpenAI, Codex, and OpenRouter adapters available via `PRIMARY_RUNTIME`). For 1.0, `Claude CLI` on a source checkout is the explicitly supported default path, and `Codex CLI` on a source checkout is the explicitly supported secondary path. See [docs/audit/provider-auth-1.0-matrix.md](docs/audit/provider-auth-1.0-matrix.md) for the full consolidated matrix. The OpenAI-compatible and OpenRouter adapters can expose optional tool use when `OPENAI_COMPAT_TOOLS_ENABLED=1` is set, but OpenRouter support claims stop at the narrower audited boundary described below. It doesn't contain intelligence itself — it decides *when* to call the AI, *what context* to give it, and *what to do* with the output. When you send a message, the orchestrator:
@@ -406,15 +414,22 @@ Do not treat `codex --version`, `OPENAI_API_KEY` presence, `pnpm preflight*`, or
406
414
 
407
415
  ## Updating
408
416
 
409
- **Global install:**
417
+ DiscoClaw can check for and apply updates from inside Discord — no SSH or terminal needed.
418
+
419
+ | Command | Description |
420
+ |---------|-------------|
421
+ | `!update` | Check if a newer version is available on npm |
422
+ | `!update apply` | Download the update, reinstall, and restart the service |
423
+ | `!update audit` | Show npm-managed runtime audit details |
424
+ | `!update help` | Show usage |
410
425
 
411
- If DiscoClaw is running, update from Discord:
426
+ **Global install (from Discord):**
412
427
 
413
428
  ```
414
429
  !update apply
415
430
  ```
416
431
 
417
- Or from the command line:
432
+ **Global install (from the command line):**
418
433
 
419
434
  ```bash
420
435
  npm update -g discoclaw
@@ -0,0 +1,50 @@
1
+ const HYDE_SYSTEM_PROMPT = 'You are a retrieval assistant. Given a query, write a short hypothetical passage ' +
2
+ 'that directly answers it as if the passage were already stored in a knowledge base. ' +
3
+ 'Do not include preamble or meta-commentary — output only the passage.';
4
+ const TIMEOUT_MS = 15_000;
5
+ /**
6
+ * Generate a hypothetical answer to `query` via an OpenAI-compatible chat
7
+ * completions endpoint. Returns the generated text, or `null` on any failure.
8
+ */
9
+ export async function generateHypotheticalAnswer(opts) {
10
+ const { apiKey, baseUrl, model, query, log } = opts;
11
+ const url = `${baseUrl.replace(/\/+$/, '')}/chat/completions`;
12
+ try {
13
+ const controller = new AbortController();
14
+ const timer = setTimeout(() => controller.abort(), TIMEOUT_MS);
15
+ const response = await fetch(url, {
16
+ method: 'POST',
17
+ headers: {
18
+ 'Authorization': `Bearer ${apiKey}`,
19
+ 'Content-Type': 'application/json',
20
+ },
21
+ body: JSON.stringify({
22
+ model,
23
+ messages: [
24
+ { role: 'system', content: HYDE_SYSTEM_PROMPT },
25
+ { role: 'user', content: query },
26
+ ],
27
+ max_tokens: 256,
28
+ temperature: 0.7,
29
+ }),
30
+ signal: controller.signal,
31
+ });
32
+ clearTimeout(timer);
33
+ if (!response.ok) {
34
+ const detail = await response.text().catch(() => '');
35
+ log?.warn({ status: response.status, detail }, 'hyde: chat completions API error, falling back to raw query');
36
+ return null;
37
+ }
38
+ const json = (await response.json());
39
+ const text = json.choices?.[0]?.message?.content?.trim() ?? '';
40
+ if (text.length === 0) {
41
+ log?.warn({}, 'hyde: empty response from model, falling back to raw query');
42
+ return null;
43
+ }
44
+ return text;
45
+ }
46
+ catch (err) {
47
+ log?.warn({ err }, 'hyde: generation failed, falling back to raw query');
48
+ return null;
49
+ }
50
+ }
@@ -0,0 +1,84 @@
1
+ import { afterEach, describe, expect, it, vi } from 'vitest';
2
+ import { generateHypotheticalAnswer } from './hyde.js';
3
+ function makeChatResponse(content) {
4
+ return new Response(JSON.stringify({
5
+ choices: [{ message: { content } }],
6
+ }), { status: 200, statusText: 'OK', headers: { 'Content-Type': 'application/json' } });
7
+ }
8
+ function defaultOpts(overrides = {}) {
9
+ return {
10
+ apiKey: 'test-key',
11
+ baseUrl: 'https://api.openai.com/v1',
12
+ model: 'gpt-4o-mini',
13
+ query: 'What is the capital of France?',
14
+ ...overrides,
15
+ };
16
+ }
17
+ describe('generateHypotheticalAnswer', () => {
18
+ const originalFetch = globalThis.fetch;
19
+ afterEach(() => {
20
+ globalThis.fetch = originalFetch;
21
+ });
22
+ it('returns generated text on success', async () => {
23
+ globalThis.fetch = vi.fn().mockResolvedValue(makeChatResponse('Paris is the capital of France.'));
24
+ const result = await generateHypotheticalAnswer(defaultOpts());
25
+ expect(result).toBe('Paris is the capital of France.');
26
+ expect(globalThis.fetch).toHaveBeenCalledTimes(1);
27
+ const [url, init] = globalThis.fetch.mock.calls[0];
28
+ expect(url).toBe('https://api.openai.com/v1/chat/completions');
29
+ const body = JSON.parse(init.body);
30
+ expect(body.model).toBe('gpt-4o-mini');
31
+ expect(body.messages).toHaveLength(2);
32
+ expect(body.messages[1].content).toBe('What is the capital of France?');
33
+ });
34
+ it('returns null on API error', async () => {
35
+ globalThis.fetch = vi.fn().mockResolvedValue(new Response('Unauthorized', { status: 401, statusText: 'Unauthorized' }));
36
+ const log = { info: vi.fn(), warn: vi.fn(), error: vi.fn() };
37
+ const result = await generateHypotheticalAnswer(defaultOpts({ log }));
38
+ expect(result).toBeNull();
39
+ expect(log.warn).toHaveBeenCalledTimes(1);
40
+ });
41
+ it('returns null on timeout', async () => {
42
+ globalThis.fetch = vi.fn().mockImplementation((_url, init) => new Promise((_resolve, reject) => {
43
+ init.signal?.addEventListener('abort', () => {
44
+ reject(new DOMException('The operation was aborted.', 'AbortError'));
45
+ });
46
+ }));
47
+ // Speed up the test by mocking timers
48
+ vi.useFakeTimers();
49
+ const log = { info: vi.fn(), warn: vi.fn(), error: vi.fn() };
50
+ const promise = generateHypotheticalAnswer(defaultOpts({ log }));
51
+ await vi.advanceTimersByTimeAsync(16_000);
52
+ const result = await promise;
53
+ expect(result).toBeNull();
54
+ expect(log.warn).toHaveBeenCalledTimes(1);
55
+ vi.useRealTimers();
56
+ });
57
+ it('returns null on empty response', async () => {
58
+ globalThis.fetch = vi.fn().mockResolvedValue(makeChatResponse(''));
59
+ const log = { info: vi.fn(), warn: vi.fn(), error: vi.fn() };
60
+ const result = await generateHypotheticalAnswer(defaultOpts({ log }));
61
+ expect(result).toBeNull();
62
+ expect(log.warn).toHaveBeenCalledTimes(1);
63
+ });
64
+ it('returns null on whitespace-only response', async () => {
65
+ globalThis.fetch = vi.fn().mockResolvedValue(makeChatResponse(' \n\t '));
66
+ const log = { info: vi.fn(), warn: vi.fn(), error: vi.fn() };
67
+ const result = await generateHypotheticalAnswer(defaultOpts({ log }));
68
+ expect(result).toBeNull();
69
+ expect(log.warn).toHaveBeenCalledTimes(1);
70
+ });
71
+ it('strips trailing slashes from baseUrl', async () => {
72
+ globalThis.fetch = vi.fn().mockResolvedValue(makeChatResponse('Some answer'));
73
+ await generateHypotheticalAnswer(defaultOpts({ baseUrl: 'https://api.openai.com/v1/' }));
74
+ const [url] = globalThis.fetch.mock.calls[0];
75
+ expect(url).toBe('https://api.openai.com/v1/chat/completions');
76
+ });
77
+ it('returns null on network error', async () => {
78
+ globalThis.fetch = vi.fn().mockRejectedValue(new Error('ECONNREFUSED'));
79
+ const log = { info: vi.fn(), warn: vi.fn(), error: vi.fn() };
80
+ const result = await generateHypotheticalAnswer(defaultOpts({ log }));
81
+ expect(result).toBeNull();
82
+ expect(log.warn).toHaveBeenCalledTimes(1);
83
+ });
84
+ });
package/dist/config.js CHANGED
@@ -787,6 +787,8 @@ export function parseConfig(env) {
787
787
  })(),
788
788
  coldStorageInjectMaxChars: parsePositiveInt(env, 'DISCOCLAW_COLD_STORAGE_INJECT_MAX_CHARS', 1500),
789
789
  coldStorageSearchLimit: parsePositiveInt(env, 'DISCOCLAW_COLD_STORAGE_SEARCH_LIMIT', 10),
790
+ coldStorageHydeEnabled: parseBoolean(env, 'DISCOCLAW_COLD_STORAGE_HYDE_ENABLED', true),
791
+ coldStorageHydeModel: parseTrimmedString(env, 'COLD_STORAGE_HYDE_MODEL'),
790
792
  summaryToDurableEnabled: parseBoolean(env, 'DISCOCLAW_SUMMARY_TO_DURABLE_ENABLED', true),
791
793
  shortTermMemoryEnabled: parseBoolean(env, 'DISCOCLAW_SHORTTERM_MEMORY_ENABLED', true),
792
794
  shortTermMaxEntries: parsePositiveInt(env, 'DISCOCLAW_SHORTTERM_MAX_ENTRIES', 20),
@@ -334,15 +334,6 @@ export async function buildDurableMemorySection(opts) {
334
334
  return '';
335
335
  }
336
336
  }
337
- // ---------------------------------------------------------------------------
338
- // Cold-storage prompt section
339
- // ---------------------------------------------------------------------------
340
- /**
341
- * Search cold storage and build a prompt section from the results.
342
- *
343
- * Returns an empty string when cold storage is disabled, unavailable,
344
- * or no results match the query. Never throws.
345
- */
346
337
  export async function buildColdStoragePromptSection(opts) {
347
338
  if (!opts.enabled || !opts.subsystem || !opts.query)
348
339
  return '';
@@ -352,15 +343,33 @@ export async function buildColdStoragePromptSection(opts) {
352
343
  return '';
353
344
  }
354
345
  try {
355
- // Generate embedding for the query (3-second timeout fail open on slow APIs)
346
+ // HyDE step: generate a hypothetical answer to embed instead of the raw query.
347
+ // Falls back to raw query if generator is absent, returns null, or times out.
348
+ let textToEmbed = opts.query;
349
+ if (opts.hydeGenerator) {
350
+ const HYDE_TIMEOUT_MS = 3_000;
351
+ try {
352
+ const hydeResult = await Promise.race([
353
+ opts.hydeGenerator(opts.query),
354
+ new Promise((resolve) => setTimeout(() => resolve(null), HYDE_TIMEOUT_MS)),
355
+ ]);
356
+ if (hydeResult != null) {
357
+ textToEmbed = hydeResult;
358
+ }
359
+ }
360
+ catch (err) {
361
+ opts.log?.warn({ err }, 'cold-storage HyDE generation failed, falling back to raw query');
362
+ }
363
+ }
364
+ // Generate embedding (3-second timeout — fail open on slow APIs)
356
365
  const EMBED_TIMEOUT_MS = 3_000;
357
366
  const embeddings = await Promise.race([
358
- opts.subsystem.embeddings.embed([opts.query]),
367
+ opts.subsystem.embeddings.embed([textToEmbed]),
359
368
  new Promise((_, reject) => setTimeout(() => reject(new Error('cold-storage embedding timeout')), EMBED_TIMEOUT_MS)),
360
369
  ]);
361
370
  if (embeddings.length === 0)
362
371
  return '';
363
- // Search with both vector and FTS
372
+ // Search with both vector and FTS — raw query drives keyword leg unchanged
364
373
  const results = opts.subsystem.store.search({
365
374
  embedding: embeddings[0],
366
375
  query: opts.query,
@@ -72,7 +72,10 @@ export class ToolAwareQueue {
72
72
  switch (this.state) {
73
73
  case 'idle':
74
74
  case 'buffering_text':
75
- // Discard buffered narration text.
75
+ // Flush buffered narration text before switching to tool_active.
76
+ if (this.buffer) {
77
+ this.emit({ type: 'stream_text', text: this.buffer });
78
+ }
76
79
  this.buffer = '';
77
80
  this.state = 'tool_active';
78
81
  this.emit({ type: 'show_activity', label });
@@ -96,15 +96,16 @@ describe('ToolAwareQueue', () => {
96
96
  expect(actions[1]).toEqual({ type: 'stream_text', text: '!' });
97
97
  taq.dispose();
98
98
  });
99
- it('text then tool: narration discarded, activity shown', () => {
99
+ it('text then tool: narration flushed as stream_text, then activity shown', () => {
100
100
  const { actions, emit } = collect();
101
101
  const taq = new ToolAwareQueue(emit, { flushDelayMs: 800, postToolDelayMs: 500 });
102
102
  taq.handleEvent({ type: 'text_delta', text: 'Let me read the file...' });
103
103
  taq.handleEvent({ type: 'tool_start', name: 'Read', input: { file_path: '/tmp/foo.ts' } });
104
- // Narration was discarded, only show_activity emitted.
105
- expect(actions).toHaveLength(1);
106
- expect(actions[0]).toMatchObject({ type: 'show_activity' });
107
- expect(actions[0].label).toContain('Reading');
104
+ // Narration flushed as stream_text, then show_activity emitted.
105
+ expect(actions).toHaveLength(2);
106
+ expect(actions[0]).toEqual({ type: 'stream_text', text: 'Let me read the file...' });
107
+ expect(actions[1]).toMatchObject({ type: 'show_activity' });
108
+ expect(actions[1].label).toContain('Reading');
108
109
  taq.dispose();
109
110
  });
110
111
  it('tool then text: activity during tool, text streams after', () => {
@@ -176,7 +177,7 @@ describe('ToolAwareQueue', () => {
176
177
  vi.advanceTimersByTime(5000);
177
178
  expect(actions).toHaveLength(0);
178
179
  });
179
- it('post-tool delay prevents flashing narration between tools', () => {
180
+ it('inter-tool narration is flushed before second tool activity', () => {
180
181
  const { actions, emit } = collect();
181
182
  const taq = new ToolAwareQueue(emit, { flushDelayMs: 800, postToolDelayMs: 500 });
182
183
  taq.handleEvent({ type: 'tool_start', name: 'Read' });
@@ -186,8 +187,12 @@ describe('ToolAwareQueue', () => {
186
187
  // Before post-tool delay fires, second tool starts.
187
188
  vi.advanceTimersByTime(200);
188
189
  taq.handleEvent({ type: 'tool_start', name: 'Bash' });
189
- // The narration text should not have been streamed.
190
- expect(actions.filter((a) => a.type === 'stream_text')).toHaveLength(0);
190
+ // Inter-tool narration flushed as stream_text before second tool.
191
+ expect(actions.filter((a) => a.type === 'stream_text')).toHaveLength(1);
192
+ expect(actions.filter((a) => a.type === 'stream_text')[0]).toEqual({
193
+ type: 'stream_text',
194
+ text: 'Now let me run...',
195
+ });
191
196
  // Two show_activity actions.
192
197
  expect(actions.filter((a) => a.type === 'show_activity')).toHaveLength(2);
193
198
  taq.dispose();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "discoclaw",
3
- "version": "1.1.6",
3
+ "version": "1.2.1",
4
4
  "description": "Personal AI orchestrator that turns Discord into a persistent workspace",
5
5
  "license": "MIT",
6
6
  "repository": {