shuvmaki 0.4.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/bin.js +70 -0
  2. package/dist/ai-tool-to-genai.js +210 -0
  3. package/dist/ai-tool-to-genai.test.js +267 -0
  4. package/dist/channel-management.js +97 -0
  5. package/dist/cli.js +709 -0
  6. package/dist/commands/abort.js +78 -0
  7. package/dist/commands/add-project.js +98 -0
  8. package/dist/commands/agent.js +152 -0
  9. package/dist/commands/ask-question.js +183 -0
  10. package/dist/commands/create-new-project.js +78 -0
  11. package/dist/commands/fork.js +186 -0
  12. package/dist/commands/model.js +313 -0
  13. package/dist/commands/permissions.js +126 -0
  14. package/dist/commands/queue.js +129 -0
  15. package/dist/commands/resume.js +145 -0
  16. package/dist/commands/session.js +142 -0
  17. package/dist/commands/share.js +80 -0
  18. package/dist/commands/types.js +2 -0
  19. package/dist/commands/undo-redo.js +161 -0
  20. package/dist/commands/user-command.js +145 -0
  21. package/dist/database.js +184 -0
  22. package/dist/discord-bot.js +384 -0
  23. package/dist/discord-utils.js +217 -0
  24. package/dist/escape-backticks.test.js +410 -0
  25. package/dist/format-tables.js +96 -0
  26. package/dist/format-tables.test.js +418 -0
  27. package/dist/genai-worker-wrapper.js +109 -0
  28. package/dist/genai-worker.js +297 -0
  29. package/dist/genai.js +232 -0
  30. package/dist/interaction-handler.js +144 -0
  31. package/dist/logger.js +51 -0
  32. package/dist/markdown.js +310 -0
  33. package/dist/markdown.test.js +262 -0
  34. package/dist/message-formatting.js +273 -0
  35. package/dist/message-formatting.test.js +73 -0
  36. package/dist/openai-realtime.js +228 -0
  37. package/dist/opencode.js +216 -0
  38. package/dist/session-handler.js +580 -0
  39. package/dist/system-message.js +61 -0
  40. package/dist/tools.js +356 -0
  41. package/dist/utils.js +85 -0
  42. package/dist/voice-handler.js +541 -0
  43. package/dist/voice.js +314 -0
  44. package/dist/worker-types.js +4 -0
  45. package/dist/xml.js +92 -0
  46. package/dist/xml.test.js +32 -0
  47. package/package.json +60 -0
  48. package/src/__snapshots__/compact-session-context-no-system.md +35 -0
  49. package/src/__snapshots__/compact-session-context.md +47 -0
  50. package/src/ai-tool-to-genai.test.ts +296 -0
  51. package/src/ai-tool-to-genai.ts +255 -0
  52. package/src/channel-management.ts +161 -0
  53. package/src/cli.ts +1010 -0
  54. package/src/commands/abort.ts +94 -0
  55. package/src/commands/add-project.ts +139 -0
  56. package/src/commands/agent.ts +201 -0
  57. package/src/commands/ask-question.ts +276 -0
  58. package/src/commands/create-new-project.ts +111 -0
  59. package/src/commands/fork.ts +257 -0
  60. package/src/commands/model.ts +402 -0
  61. package/src/commands/permissions.ts +146 -0
  62. package/src/commands/queue.ts +181 -0
  63. package/src/commands/resume.ts +230 -0
  64. package/src/commands/session.ts +184 -0
  65. package/src/commands/share.ts +96 -0
  66. package/src/commands/types.ts +25 -0
  67. package/src/commands/undo-redo.ts +213 -0
  68. package/src/commands/user-command.ts +178 -0
  69. package/src/database.ts +220 -0
  70. package/src/discord-bot.ts +513 -0
  71. package/src/discord-utils.ts +282 -0
  72. package/src/escape-backticks.test.ts +447 -0
  73. package/src/format-tables.test.ts +440 -0
  74. package/src/format-tables.ts +110 -0
  75. package/src/genai-worker-wrapper.ts +160 -0
  76. package/src/genai-worker.ts +366 -0
  77. package/src/genai.ts +321 -0
  78. package/src/interaction-handler.ts +187 -0
  79. package/src/logger.ts +57 -0
  80. package/src/markdown.test.ts +358 -0
  81. package/src/markdown.ts +365 -0
  82. package/src/message-formatting.test.ts +81 -0
  83. package/src/message-formatting.ts +340 -0
  84. package/src/openai-realtime.ts +363 -0
  85. package/src/opencode.ts +277 -0
  86. package/src/session-handler.ts +758 -0
  87. package/src/system-message.ts +62 -0
  88. package/src/tools.ts +428 -0
  89. package/src/utils.ts +118 -0
  90. package/src/voice-handler.ts +760 -0
  91. package/src/voice.ts +432 -0
  92. package/src/worker-types.ts +66 -0
  93. package/src/xml.test.ts +37 -0
  94. package/src/xml.ts +121 -0
package/dist/voice.js ADDED
@@ -0,0 +1,314 @@
1
+ // Audio transcription service using Google Gemini.
2
+ // Transcribes voice messages with code-aware context, using grep/glob tools
3
+ // to verify technical terms, filenames, and function names in the codebase.
4
+ import { GoogleGenAI, Type, } from '@google/genai';
5
+ import { createLogger } from './logger.js';
6
+ import { glob } from 'glob';
7
+ import { ripGrep } from 'ripgrep-js';
8
+ const voiceLogger = createLogger('VOICE');
9
+ async function runGrep({ pattern, directory, }) {
10
+ try {
11
+ const results = await ripGrep(directory, {
12
+ string: pattern,
13
+ globs: ['!node_modules/**', '!.git/**', '!dist/**', '!build/**'],
14
+ });
15
+ if (results.length === 0) {
16
+ return 'No matches found';
17
+ }
18
+ const output = results
19
+ .slice(0, 10)
20
+ .map((match) => {
21
+ return `${match.path.text}:${match.line_number}: ${match.lines.text.trim()}`;
22
+ })
23
+ .join('\n');
24
+ return output.slice(0, 2000);
25
+ }
26
+ catch (e) {
27
+ voiceLogger.error('grep search failed:', e);
28
+ return 'grep search failed';
29
+ }
30
+ }
31
+ async function runGlob({ pattern, directory, }) {
32
+ try {
33
+ const files = await glob(pattern, {
34
+ cwd: directory,
35
+ nodir: false,
36
+ ignore: ['node_modules/**', '.git/**', 'dist/**', 'build/**'],
37
+ maxDepth: 10,
38
+ });
39
+ if (files.length === 0) {
40
+ return 'No files found';
41
+ }
42
+ return files.slice(0, 30).join('\n');
43
+ }
44
+ catch (error) {
45
+ return `Glob search failed: ${error instanceof Error ? error.message : 'Unknown error'}`;
46
+ }
47
+ }
48
+ const grepToolDeclaration = {
49
+ name: 'grep',
50
+ description: 'Search for a pattern in file contents to verify if a technical term, function name, or variable exists in the code. Use this to check if transcribed words match actual code.',
51
+ parameters: {
52
+ type: Type.OBJECT,
53
+ properties: {
54
+ pattern: {
55
+ type: Type.STRING,
56
+ description: 'The search pattern (case-insensitive). Can be a word, function name, or partial match.',
57
+ },
58
+ },
59
+ required: ['pattern'],
60
+ },
61
+ };
62
+ const globToolDeclaration = {
63
+ name: 'glob',
64
+ description: 'Search for files by name pattern. Use this to verify if a filename or directory mentioned in the audio actually exists in the project.',
65
+ parameters: {
66
+ type: Type.OBJECT,
67
+ properties: {
68
+ pattern: {
69
+ type: Type.STRING,
70
+ description: 'The glob pattern to match files. Examples: "*.ts", "**/*.json", "**/config*", "src/**/*.tsx"',
71
+ },
72
+ },
73
+ required: ['pattern'],
74
+ },
75
+ };
76
+ const transcriptionResultToolDeclaration = {
77
+ name: 'transcriptionResult',
78
+ description: 'MANDATORY: You MUST call this tool to complete the task. This is the ONLY way to return results - text responses are ignored. Call this with your transcription, even if imperfect. An imperfect transcription is better than none.',
79
+ parameters: {
80
+ type: Type.OBJECT,
81
+ properties: {
82
+ transcription: {
83
+ type: Type.STRING,
84
+ description: 'The final transcription of the audio. MUST be non-empty. If audio is unclear, transcribe your best interpretation. If silent, use "[inaudible audio]".',
85
+ },
86
+ },
87
+ required: ['transcription'],
88
+ },
89
+ };
90
+ function createToolRunner({ directory, }) {
91
+ const hasDirectory = directory && directory.trim().length > 0;
92
+ return async ({ name, args }) => {
93
+ if (name === 'transcriptionResult') {
94
+ return {
95
+ type: 'result',
96
+ transcription: args?.transcription || '',
97
+ };
98
+ }
99
+ if (name === 'grep' && hasDirectory) {
100
+ const pattern = args?.pattern || '';
101
+ voiceLogger.log(`Grep search: "${pattern}"`);
102
+ const output = await runGrep({ pattern, directory });
103
+ voiceLogger.log(`Grep result: ${output.slice(0, 100)}...`);
104
+ return { type: 'toolResponse', name: 'grep', output };
105
+ }
106
+ if (name === 'glob' && hasDirectory) {
107
+ const pattern = args?.pattern || '';
108
+ voiceLogger.log(`Glob search: "${pattern}"`);
109
+ const output = await runGlob({ pattern, directory });
110
+ voiceLogger.log(`Glob result: ${output.slice(0, 100)}...`);
111
+ return { type: 'toolResponse', name: 'glob', output };
112
+ }
113
+ return { type: 'skip' };
114
+ };
115
+ }
116
+ export async function runTranscriptionLoop({ genAI, model, initialContents, tools, temperature, toolRunner, maxSteps = 10, }) {
117
+ let response = await genAI.models.generateContent({
118
+ model,
119
+ contents: initialContents,
120
+ config: {
121
+ temperature,
122
+ thinkingConfig: {
123
+ thinkingBudget: 1024,
124
+ },
125
+ tools,
126
+ },
127
+ });
128
+ const conversationHistory = [...initialContents];
129
+ let stepsRemaining = maxSteps;
130
+ while (true) {
131
+ const candidate = response.candidates?.[0];
132
+ if (!candidate?.content?.parts) {
133
+ const text = response.text?.trim();
134
+ if (text) {
135
+ voiceLogger.log(`No parts but got text response: "${text.slice(0, 100)}..."`);
136
+ return text;
137
+ }
138
+ throw new Error('Transcription failed: No response content from model');
139
+ }
140
+ const functionCalls = candidate.content.parts.filter((part) => 'functionCall' in part && !!part.functionCall);
141
+ if (functionCalls.length === 0) {
142
+ const text = response.text?.trim();
143
+ if (text) {
144
+ voiceLogger.log(`No function calls but got text: "${text.slice(0, 100)}..."`);
145
+ return text;
146
+ }
147
+ throw new Error('Transcription failed: Model did not produce a transcription');
148
+ }
149
+ conversationHistory.push({
150
+ role: 'model',
151
+ parts: candidate.content.parts,
152
+ });
153
+ const functionResponseParts = [];
154
+ for (const part of functionCalls) {
155
+ const call = part.functionCall;
156
+ const args = call.args;
157
+ const result = await toolRunner({ name: call.name || '', args });
158
+ if (result.type === 'result') {
159
+ const transcription = result.transcription?.trim() || '';
160
+ voiceLogger.log(`Transcription result received: "${transcription.slice(0, 100)}..."`);
161
+ if (!transcription) {
162
+ throw new Error('Transcription failed: Model returned empty transcription');
163
+ }
164
+ return transcription;
165
+ }
166
+ if (result.type === 'toolResponse') {
167
+ stepsRemaining--;
168
+ const stepsWarning = (() => {
169
+ if (stepsRemaining <= 0) {
170
+ return '\n\n[CRITICAL: Tool limit reached. You MUST call transcriptionResult NOW. No more grep/glob allowed. Call transcriptionResult immediately with your best transcription.]';
171
+ }
172
+ if (stepsRemaining === 1) {
173
+ return '\n\n[URGENT: FINAL STEP. You MUST call transcriptionResult NOW. Do NOT call grep or glob. Call transcriptionResult with your transcription immediately.]';
174
+ }
175
+ if (stepsRemaining <= 3) {
176
+ return `\n\n[WARNING: Only ${stepsRemaining} steps remaining. Finish searching soon and call transcriptionResult. Do not wait until the last step.]`;
177
+ }
178
+ return '';
179
+ })();
180
+ functionResponseParts.push({
181
+ functionResponse: {
182
+ name: result.name,
183
+ response: { output: result.output + stepsWarning },
184
+ },
185
+ });
186
+ }
187
+ }
188
+ if (functionResponseParts.length === 0) {
189
+ throw new Error('Transcription failed: No valid tool responses');
190
+ }
191
+ conversationHistory.push({
192
+ role: 'user',
193
+ parts: functionResponseParts,
194
+ });
195
+ response = await genAI.models.generateContent({
196
+ model,
197
+ contents: conversationHistory,
198
+ config: {
199
+ temperature,
200
+ thinkingConfig: {
201
+ thinkingBudget: 512,
202
+ },
203
+ tools: stepsRemaining <= 0 ? [{ functionDeclarations: [transcriptionResultToolDeclaration] }] : tools,
204
+ },
205
+ });
206
+ }
207
+ }
208
+ export async function transcribeAudio({ audio, prompt, language, temperature, geminiApiKey, directory, currentSessionContext, lastSessionContext, }) {
209
+ try {
210
+ const apiKey = geminiApiKey || process.env.GEMINI_API_KEY;
211
+ if (!apiKey) {
212
+ throw new Error('Gemini API key is required for audio transcription');
213
+ }
214
+ const genAI = new GoogleGenAI({ apiKey });
215
+ let audioBase64;
216
+ if (typeof audio === 'string') {
217
+ audioBase64 = audio;
218
+ }
219
+ else if (audio instanceof Buffer) {
220
+ audioBase64 = audio.toString('base64');
221
+ }
222
+ else if (audio instanceof Uint8Array) {
223
+ audioBase64 = Buffer.from(audio).toString('base64');
224
+ }
225
+ else if (audio instanceof ArrayBuffer) {
226
+ audioBase64 = Buffer.from(audio).toString('base64');
227
+ }
228
+ else {
229
+ throw new Error('Invalid audio format');
230
+ }
231
+ const languageHint = language ? `The audio is in ${language}.\n\n` : '';
232
+ // build session context section
233
+ const sessionContextParts = [];
234
+ if (lastSessionContext) {
235
+ sessionContextParts.push(`<last_session>
236
+ ${lastSessionContext}
237
+ </last_session>`);
238
+ }
239
+ if (currentSessionContext) {
240
+ sessionContextParts.push(`<current_session>
241
+ ${currentSessionContext}
242
+ </current_session>`);
243
+ }
244
+ const sessionContextSection = sessionContextParts.length > 0
245
+ ? `\nSession context (use to understand references to files, functions, tools used):\n${sessionContextParts.join('\n\n')}`
246
+ : '';
247
+ const transcriptionPrompt = `${languageHint}Transcribe this audio for a coding agent (like Claude Code or OpenCode).
248
+
249
+ CRITICAL REQUIREMENT: You MUST call the "transcriptionResult" tool to complete this task.
250
+ - The transcriptionResult tool is the ONLY way to return results
251
+ - Text responses are completely ignored - only tool calls work
252
+ - You MUST call transcriptionResult even if you run out of tool calls
253
+ - An imperfect transcription is better than no transcription
254
+ - DO NOT end without calling transcriptionResult
255
+
256
+ This is a software development environment. The speaker is giving instructions to an AI coding assistant. Expect:
257
+ - File paths, function names, CLI commands, package names, API endpoints
258
+
259
+ RULES:
260
+ 1. If audio is unclear, transcribe your best interpretation, interpreting words event with strong accents are present, identifying the accent being used first so you can guess what the words meawn
261
+ 2. If audio seems silent/empty, call transcriptionResult with "[inaudible audio]"
262
+ 3. Use the session context below to understand technical terms, file names, function names mentioned
263
+
264
+ Common corrections (apply without tool calls):
265
+ - "reacked" → "React", "jason" → "JSON", "get hub" → "GitHub", "no JS" → "Node.js", "dacker" → "Docker"
266
+
267
+ Project file structure:
268
+ <file_tree>
269
+ ${prompt}
270
+ </file_tree>
271
+ ${sessionContextSection}
272
+
273
+ REMEMBER: Call "transcriptionResult" tool with your transcription. This is mandatory.
274
+
275
+ Note: "critique" is a CLI tool for showing diffs in the browser.`;
276
+ // const hasDirectory = directory && directory.trim().length > 0
277
+ const tools = [
278
+ {
279
+ functionDeclarations: [
280
+ transcriptionResultToolDeclaration,
281
+ // grep/glob disabled - was causing transcription to hang
282
+ // ...(hasDirectory ? [grepToolDeclaration, globToolDeclaration] : []),
283
+ ],
284
+ },
285
+ ];
286
+ const initialContents = [
287
+ {
288
+ role: 'user',
289
+ parts: [
290
+ { text: transcriptionPrompt },
291
+ {
292
+ inlineData: {
293
+ data: audioBase64,
294
+ mimeType: 'audio/mpeg',
295
+ },
296
+ },
297
+ ],
298
+ },
299
+ ];
300
+ const toolRunner = createToolRunner({ directory });
301
+ return await runTranscriptionLoop({
302
+ genAI,
303
+ model: 'gemini-2.5-flash',
304
+ initialContents,
305
+ tools,
306
+ temperature: temperature ?? 0.3,
307
+ toolRunner,
308
+ });
309
+ }
310
+ catch (error) {
311
+ voiceLogger.error('Failed to transcribe audio:', error);
312
+ throw new Error(`Audio transcription failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
313
+ }
314
+ }
@@ -0,0 +1,4 @@
1
+ // Type definitions for worker thread message passing.
2
+ // Defines the protocol between main thread and GenAI worker for
3
+ // audio streaming, tool calls, and session lifecycle management.
4
+ export {};
package/dist/xml.js ADDED
@@ -0,0 +1,92 @@
1
+ // XML/HTML tag content extractor.
2
+ // Parses XML-like tags from strings (e.g., channel topics) to extract
3
+ // Kimaki configuration like directory paths and app IDs.
4
+ import { DomHandler, Parser, ElementType } from 'htmlparser2';
5
+ import { createLogger } from './logger.js';
6
+ const xmlLogger = createLogger('XML');
7
+ export function extractTagsArrays({ xml, tags, }) {
8
+ const result = {
9
+ others: [],
10
+ };
11
+ // Initialize arrays for each tag
12
+ tags.forEach((tag) => {
13
+ result[tag] = [];
14
+ });
15
+ try {
16
+ const handler = new DomHandler((error, dom) => {
17
+ if (error) {
18
+ xmlLogger.error('Error parsing XML:', error);
19
+ }
20
+ else {
21
+ const findTags = (nodes, path = []) => {
22
+ nodes.forEach((node) => {
23
+ if (node.type === ElementType.Tag) {
24
+ const element = node;
25
+ const currentPath = [...path, element.name];
26
+ const pathString = currentPath.join('.');
27
+ // Extract content using original string positions
28
+ const extractContent = () => {
29
+ // Use element's own indices but exclude the tags
30
+ if (element.startIndex !== null &&
31
+ element.endIndex !== null) {
32
+ // Extract the full element including tags
33
+ const fullElement = xml.substring(element.startIndex, element.endIndex + 1);
34
+ // Find where content starts (after opening tag)
35
+ const contentStart = fullElement.indexOf('>') + 1;
36
+ // Find where content ends (before this element's closing tag)
37
+ const closingTag = `</${element.name}>`;
38
+ const contentEnd = fullElement.lastIndexOf(closingTag);
39
+ if (contentStart > 0 && contentEnd > contentStart) {
40
+ return fullElement.substring(contentStart, contentEnd);
41
+ }
42
+ return '';
43
+ }
44
+ return '';
45
+ };
46
+ // Check both single tag names and nested paths
47
+ if (tags.includes(element.name)) {
48
+ const content = extractContent();
49
+ result[element.name]?.push(content);
50
+ }
51
+ // Check for nested path matches
52
+ if (tags.includes(pathString)) {
53
+ const content = extractContent();
54
+ result[pathString]?.push(content);
55
+ }
56
+ if (element.children) {
57
+ findTags(element.children, currentPath);
58
+ }
59
+ }
60
+ else if (node.type === ElementType.Text &&
61
+ node.parent?.type === ElementType.Root) {
62
+ const textNode = node;
63
+ if (textNode.data.trim()) {
64
+ // console.log('node.parent',node.parent)
65
+ result.others?.push(textNode.data.trim());
66
+ }
67
+ }
68
+ });
69
+ };
70
+ findTags(dom);
71
+ }
72
+ }, {
73
+ withStartIndices: true,
74
+ withEndIndices: true,
75
+ xmlMode: true,
76
+ });
77
+ const parser = new Parser(handler, {
78
+ xmlMode: true,
79
+ decodeEntities: false,
80
+ });
81
+ parser.write(xml);
82
+ parser.end();
83
+ }
84
+ catch (error) {
85
+ xmlLogger.error('Unexpected error in extractTags:', error);
86
+ }
87
+ return result;
88
+ }
89
+ export function extractNonXmlContent(xml) {
90
+ const result = extractTagsArrays({ xml, tags: [] });
91
+ return result.others.join('\n');
92
+ }
@@ -0,0 +1,32 @@
1
+ import { describe, test, expect } from 'vitest';
2
+ import { extractNonXmlContent } from './xml.js';
3
+ describe('extractNonXmlContent', () => {
4
+ test('removes xml tags and returns only text content', () => {
5
+ const xml = 'Hello <tag>content</tag> world <nested><inner>deep</inner></nested> end';
6
+ expect(extractNonXmlContent(xml)).toMatchInlineSnapshot(`
7
+ "Hello
8
+ world
9
+ end"
10
+ `);
11
+ });
12
+ test('handles multiple text segments', () => {
13
+ const xml = 'Start <a>tag1</a> middle <b>tag2</b> finish';
14
+ expect(extractNonXmlContent(xml)).toMatchInlineSnapshot(`
15
+ "Start
16
+ middle
17
+ finish"
18
+ `);
19
+ });
20
+ test('handles only xml without text', () => {
21
+ const xml = '<root><child>content</child></root>';
22
+ expect(extractNonXmlContent(xml)).toMatchInlineSnapshot(`""`);
23
+ });
24
+ test('handles only text without xml', () => {
25
+ const xml = 'Just plain text';
26
+ expect(extractNonXmlContent(xml)).toMatchInlineSnapshot(`"Just plain text"`);
27
+ });
28
+ test('handles empty string', () => {
29
+ const xml = '';
30
+ expect(extractNonXmlContent(xml)).toMatchInlineSnapshot(`""`);
31
+ });
32
+ });
package/package.json ADDED
@@ -0,0 +1,60 @@
1
+ {
2
+ "name": "shuvmaki",
3
+ "module": "index.ts",
4
+ "type": "module",
5
+ "version": "0.4.26",
6
+ "scripts": {
7
+ "dev": "tsx --env-file .env src/cli.ts",
8
+ "prepublishOnly": "pnpm tsc",
9
+ "dev:bun": "DEBUG=1 bun --env-file .env src/cli.ts",
10
+ "watch": "tsx scripts/watch-session.ts",
11
+ "test:events": "tsx test-events.ts",
12
+ "pcm-to-mp3": "bun scripts/pcm-to-mp3",
13
+ "test:send": "tsx send-test-message.ts",
14
+ "register-commands": "tsx scripts/register-commands.ts"
15
+ },
16
+ "repository": "https://github.com/remorses/kimaki",
17
+ "bin": "bin.js",
18
+ "files": [
19
+ "dist",
20
+ "src",
21
+ "bin.js"
22
+ ],
23
+ "devDependencies": {
24
+ "@opencode-ai/plugin": "^1.1.12",
25
+ "@types/better-sqlite3": "^7.6.13",
26
+ "@types/bun": "latest",
27
+ "@types/js-yaml": "^4.0.9",
28
+ "@types/ms": "^2.1.0",
29
+ "@types/node": "^24.3.0",
30
+ "tsx": "^4.20.5",
31
+ "typescript": "^5.9.3"
32
+ },
33
+ "dependencies": {
34
+ "@ai-sdk/google": "^2.0.47",
35
+ "@clack/prompts": "^0.11.0",
36
+ "@discordjs/opus": "^0.10.0",
37
+ "@discordjs/voice": "^0.19.0",
38
+ "@google/genai": "^1.34.0",
39
+ "@opencode-ai/sdk": "^1.1.12",
40
+ "@purinton/resampler": "^1.0.4",
41
+ "@snazzah/davey": "^0.1.6",
42
+ "ai": "^5.0.114",
43
+ "better-sqlite3": "^12.3.0",
44
+ "cac": "^6.7.14",
45
+ "discord.js": "^14.16.3",
46
+ "domhandler": "^5.0.3",
47
+ "glob": "^13.0.0",
48
+ "go-try": "^3.0.2",
49
+ "htmlparser2": "^10.0.0",
50
+ "js-yaml": "^4.1.0",
51
+ "marked": "^16.3.0",
52
+ "picocolors": "^1.1.1",
53
+ "pretty-ms": "^9.3.0",
54
+ "prism-media": "^1.3.5",
55
+ "ripgrep-js": "^3.0.0",
56
+ "string-dedent": "^3.0.2",
57
+ "undici": "^7.16.0",
58
+ "zod": "^4.2.1"
59
+ }
60
+ }
@@ -0,0 +1,35 @@
1
+ [Assistant]: Still not showing. The session might not have a `system` field and the first assistant message might not have `synthetic: true`. Let me add debug logging to see what's available:
2
+
3
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= // Get system prompt if requested
4
+ if (includeSystemPrompt) {
5
+ // try session.system fie, newString= // Get system prompt if requested
6
+ if (includeSystemPrompt) {
7
+ // try session.system fie
8
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= } else if (messages.length > 0) {
9
+ // fallback: look for synthetic text part in first a, newString= } else if (messages.length > 0) {
10
+ // fallback: look for synthetic text part in first a
11
+ [Tool bash]: command=pnpm vitest run --testNamePattern "getCompactSessionContext generates" 2>&1 | tail -30, description=run test and check output, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord, timeout=120000
12
+ [Tool bash]: command=grep -i "session keys\|text parts" tmp/kimaki.log | tail -10, description=check debug output, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord
13
+ [Assistant]: Session doesn't have a `system` field. Let me fix the debug logging:
14
+
15
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= const textParts = (firstAssistant.parts || []).filter((p) => p.type === 'text')
16
+ , newString= const textParts = (firstAssistant.parts || []).filter((p) => p.type === 'text')
17
+
18
+ [Tool bash]: command=pnpm vitest run --testNamePattern "getCompactSessionContext generates" 2>&1 && grep -i "text parts" , description=run test and check debug, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord, timeout=120000
19
+ [Assistant]: I see - the first assistant text part has **no `synthetic` field** (it's `undefined`), and it's only 44 chars. The session doesn't expose the system prompt.
20
+
21
+ Let me remove debug logging and fix the test:
22
+
23
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= // Get system prompt if requested
24
+ if (includeSystemPrompt) {
25
+ // try session.system fie, newString= // Get system prompt if requested
26
+ // Note: OpenCode SDK doesn't expose system prompt directl
27
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= try {
28
+ // fetch session info to get system prompt
29
+ const sessionResponse = await client.sess, newString= try {
30
+ const messagesResponse = await client.session.messages({
31
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.test.ts, oldString= expect(context).toBeTruthy()
32
+ expect(context.length).toBeGreaterThan(0)
33
+ // should have tool cal, newString= expect(context).toBeTruthy()
34
+ expect(context.length).toBeGreaterThan(0)
35
+ // should have tool cal
@@ -0,0 +1,47 @@
1
+ [Tool bash]: command=grep -A5 -B5 "Session" node_modules/@opencode-ai/.ignored_sdk/dist/gen/core/types.gen.d.ts 2>/dev/nu, description=check session types, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord
2
+ [Tool read]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, offset=248, limit=35
3
+ [Assistant]: Let me update the code to also look for system role messages and fetch session info:
4
+
5
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= try {
6
+ const messagesResponse = await client.session.messages({
7
+ path: { id: sessionId },
8
+ , newString= try {
9
+ // fetch session info to get system prompt
10
+ const sessionResponse = await client.sess
11
+ [Tool bash]: command=pnpm vitest run -u --testNamePattern "getCompactSessionContext", description=run tests to check system prompt, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord, timeout=120000
12
+ [Tool bash]: command=head -30 discord/src/__snapshots__/compact-session-context.md, description=check if system prompt is in snapshot
13
+ [Assistant]: Still not showing. The session might not have a `system` field and the first assistant message might not have `synthetic: true`. Let me add debug logging to see what's available:
14
+
15
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= // Get system prompt if requested
16
+ if (includeSystemPrompt) {
17
+ // try session.system fie, newString= // Get system prompt if requested
18
+ if (includeSystemPrompt) {
19
+ // try session.system fie
20
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= } else if (messages.length > 0) {
21
+ // fallback: look for synthetic text part in first a, newString= } else if (messages.length > 0) {
22
+ // fallback: look for synthetic text part in first a
23
+ [Tool bash]: command=pnpm vitest run --testNamePattern "getCompactSessionContext generates" 2>&1 | tail -30, description=run test and check output, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord, timeout=120000
24
+ [Tool bash]: command=grep -i "session keys\|text parts" tmp/kimaki.log | tail -10, description=check debug output, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord
25
+ [Assistant]: Session doesn't have a `system` field. Let me fix the debug logging:
26
+
27
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= const textParts = (firstAssistant.parts || []).filter((p) => p.type === 'text')
28
+ , newString= const textParts = (firstAssistant.parts || []).filter((p) => p.type === 'text')
29
+
30
+ [Tool bash]: command=pnpm vitest run --testNamePattern "getCompactSessionContext generates" 2>&1 && grep -i "text parts" , description=run test and check debug, workdir=/Users/morse/Documents/GitHub/kimakivoice/discord, timeout=120000
31
+ [Assistant]: I see - the first assistant text part has **no `synthetic` field** (it's `undefined`), and it's only 44 chars. The session doesn't expose the system prompt.
32
+
33
+ Let me remove debug logging and fix the test:
34
+
35
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= // Get system prompt if requested
36
+ if (includeSystemPrompt) {
37
+ // try session.system fie, newString= // Get system prompt if requested
38
+ // Note: OpenCode SDK doesn't expose system prompt directl
39
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.ts, oldString= try {
40
+ // fetch session info to get system prompt
41
+ const sessionResponse = await client.sess, newString= try {
42
+ const messagesResponse = await client.session.messages({
43
+ [Tool edit]: filePath=/Users/morse/Documents/GitHub/kimakivoice/discord/src/markdown.test.ts, oldString= expect(context).toBeTruthy()
44
+ expect(context.length).toBeGreaterThan(0)
45
+ // should have tool cal, newString= expect(context).toBeTruthy()
46
+ expect(context.length).toBeGreaterThan(0)
47
+ // should have tool cal