kimaki 0.0.3 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,104 @@
1
+ import { Worker } from 'node:worker_threads';
2
+ import { createLogger } from './logger.js';
3
+ const genaiWorkerLogger = createLogger('GENAI WORKER');
4
+ const genaiWrapperLogger = createLogger('GENAI WORKER WRAPPER');
5
+ export function createGenAIWorker(options) {
6
+ return new Promise((resolve, reject) => {
7
+ const worker = new Worker(new URL('../dist/genai-worker.js', import.meta.url));
8
+ // Handle messages from worker
9
+ worker.on('message', (message) => {
10
+ switch (message.type) {
11
+ case 'assistantOpusPacket':
12
+ options.onAssistantOpusPacket(message.packet);
13
+ break;
14
+ case 'assistantStartSpeaking':
15
+ options.onAssistantStartSpeaking?.();
16
+ break;
17
+ case 'assistantStopSpeaking':
18
+ options.onAssistantStopSpeaking?.();
19
+ break;
20
+ case 'assistantInterruptSpeaking':
21
+ options.onAssistantInterruptSpeaking?.();
22
+ break;
23
+ case 'toolCallCompleted':
24
+ options.onToolCallCompleted?.(message);
25
+ break;
26
+ case 'error':
27
+ genaiWorkerLogger.error('Error:', message.error);
28
+ options.onError?.(message.error);
29
+ break;
30
+ case 'ready':
31
+ genaiWorkerLogger.log('Ready');
32
+ // Resolve with the worker interface
33
+ resolve({
34
+ sendRealtimeInput({ audio, audioStreamEnd }) {
35
+ worker.postMessage({
36
+ type: 'sendRealtimeInput',
37
+ audio,
38
+ audioStreamEnd,
39
+ });
40
+ },
41
+ sendTextInput(text) {
42
+ worker.postMessage({
43
+ type: 'sendTextInput',
44
+ text,
45
+ });
46
+ },
47
+ interrupt() {
48
+ worker.postMessage({
49
+ type: 'interrupt',
50
+ });
51
+ },
52
+ async stop() {
53
+ genaiWrapperLogger.log('Stopping worker...');
54
+ // Send stop message to trigger graceful shutdown
55
+ worker.postMessage({ type: 'stop' });
56
+ // Wait for worker to exit gracefully (with timeout)
57
+ await new Promise((resolve) => {
58
+ let resolved = false;
59
+ // Listen for worker exit
60
+ worker.once('exit', (code) => {
61
+ if (!resolved) {
62
+ resolved = true;
63
+ genaiWrapperLogger.log(`[GENAI WORKER WRAPPER] Worker exited with code ${code}`);
64
+ resolve();
65
+ }
66
+ });
67
+ // Timeout after 5 seconds and force terminate
68
+ setTimeout(() => {
69
+ if (!resolved) {
70
+ resolved = true;
71
+ genaiWrapperLogger.log('[GENAI WORKER WRAPPER] Worker did not exit gracefully, terminating...');
72
+ worker.terminate().then(() => {
73
+ genaiWrapperLogger.log('Worker terminated');
74
+ resolve();
75
+ });
76
+ }
77
+ }, 5000);
78
+ });
79
+ },
80
+ });
81
+ break;
82
+ }
83
+ });
84
+ // Handle worker errors
85
+ worker.on('error', (error) => {
86
+ genaiWorkerLogger.error('Worker error:', error);
87
+ reject(error);
88
+ });
89
+ worker.on('exit', (code) => {
90
+ if (code !== 0) {
91
+ genaiWorkerLogger.error(`Worker stopped with exit code ${code}`);
92
+ }
93
+ });
94
+ // Send initialization message
95
+ const initMessage = {
96
+ type: 'init',
97
+ directory: options.directory,
98
+ systemMessage: options.systemMessage,
99
+ guildId: options.guildId,
100
+ channelId: options.channelId,
101
+ };
102
+ worker.postMessage(initMessage);
103
+ });
104
+ }
@@ -0,0 +1,293 @@
1
+ import { parentPort, threadId } from 'node:worker_threads';
2
+ import { createWriteStream } from 'node:fs';
3
+ import { mkdir } from 'node:fs/promises';
4
+ import path from 'node:path';
5
+ import { Resampler } from '@purinton/resampler';
6
+ import * as prism from 'prism-media';
7
+ import { startGenAiSession } from './genai.js';
8
+ import { getTools } from './tools.js';
9
+ import { createLogger } from './logger.js';
10
+ if (!parentPort) {
11
+ throw new Error('This module must be run as a worker thread');
12
+ }
13
+ const workerLogger = createLogger(`WORKER ${threadId}`);
14
+ workerLogger.log('GenAI worker started');
15
+ // Define sendError early so it can be used by global handlers
16
+ function sendError(error) {
17
+ if (parentPort) {
18
+ parentPort.postMessage({
19
+ type: 'error',
20
+ error,
21
+ });
22
+ }
23
+ }
24
+ // Add global error handlers for the worker thread
25
+ process.on('uncaughtException', (error) => {
26
+ workerLogger.error('Uncaught exception in worker:', error);
27
+ sendError(`Worker crashed: ${error.message}`);
28
+ // Exit immediately on uncaught exception
29
+ process.exit(1);
30
+ });
31
+ process.on('unhandledRejection', (reason, promise) => {
32
+ workerLogger.error('Unhandled rejection in worker:', reason, 'at promise:', promise);
33
+ sendError(`Worker unhandled rejection: ${reason}`);
34
+ });
35
+ // Audio configuration
36
+ const AUDIO_CONFIG = {
37
+ inputSampleRate: 24000, // GenAI output
38
+ inputChannels: 1,
39
+ outputSampleRate: 48000, // Discord expects
40
+ outputChannels: 2,
41
+ opusFrameSize: 960, // 20ms at 48kHz
42
+ };
43
+ // Initialize audio processing components
44
+ const resampler = new Resampler({
45
+ inRate: AUDIO_CONFIG.inputSampleRate,
46
+ outRate: AUDIO_CONFIG.outputSampleRate,
47
+ inChannels: AUDIO_CONFIG.inputChannels,
48
+ outChannels: AUDIO_CONFIG.outputChannels,
49
+ volume: 1,
50
+ filterWindow: 8,
51
+ });
52
+ const opusEncoder = new prism.opus.Encoder({
53
+ rate: AUDIO_CONFIG.outputSampleRate,
54
+ channels: AUDIO_CONFIG.outputChannels,
55
+ frameSize: AUDIO_CONFIG.opusFrameSize,
56
+ });
57
+ // Pipe resampler to encoder with error handling
58
+ resampler.pipe(opusEncoder).on('error', (error) => {
59
+ workerLogger.error('Pipe error between resampler and encoder:', error);
60
+ sendError(`Audio pipeline error: ${error.message}`);
61
+ });
62
+ // Opus packet queue and interval for 20ms packet sending
63
+ const opusPacketQueue = [];
64
+ let packetInterval = null;
65
+ // Send packets every 20ms
66
+ function startPacketSending() {
67
+ if (packetInterval)
68
+ return;
69
+ packetInterval = setInterval(() => {
70
+ const packet = opusPacketQueue.shift();
71
+ if (!packet)
72
+ return;
73
+ // Transfer packet as ArrayBuffer
74
+ const arrayBuffer = packet.buffer.slice(packet.byteOffset, packet.byteOffset + packet.byteLength);
75
+ parentPort.postMessage({
76
+ type: 'assistantOpusPacket',
77
+ packet: arrayBuffer,
78
+ }, [arrayBuffer]);
79
+ }, 20);
80
+ }
81
+ function stopPacketSending() {
82
+ if (packetInterval) {
83
+ clearInterval(packetInterval);
84
+ packetInterval = null;
85
+ }
86
+ opusPacketQueue.length = 0;
87
+ }
88
+ // Session state
89
+ let session = null;
90
+ // Audio log stream for assistant audio
91
+ let audioLogStream = null;
92
+ // Create assistant audio log stream for debugging
93
+ async function createAssistantAudioLogStream(guildId, channelId) {
94
+ if (!process.env.DEBUG)
95
+ return null;
96
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
97
+ const audioDir = path.join(process.cwd(), 'discord-audio-logs', guildId, channelId);
98
+ try {
99
+ await mkdir(audioDir, { recursive: true });
100
+ // Create stream for assistant audio (24kHz mono s16le PCM)
101
+ const outputFileName = `assistant_${timestamp}.24.pcm`;
102
+ const outputFilePath = path.join(audioDir, outputFileName);
103
+ const outputAudioStream = createWriteStream(outputFilePath);
104
+ // Add error handler to prevent crashes
105
+ outputAudioStream.on('error', (error) => {
106
+ workerLogger.error(`Assistant audio log stream error:`, error);
107
+ });
108
+ workerLogger.log(`Created assistant audio log: ${outputFilePath}`);
109
+ return outputAudioStream;
110
+ }
111
+ catch (error) {
112
+ workerLogger.error(`Failed to create audio log directory:`, error);
113
+ return null;
114
+ }
115
+ }
116
+ // Handle encoded Opus packets
117
+ opusEncoder.on('data', (packet) => {
118
+ opusPacketQueue.push(packet);
119
+ });
120
+ // Handle stream end events
121
+ opusEncoder.on('end', () => {
122
+ workerLogger.log('Opus encoder stream ended');
123
+ });
124
+ resampler.on('end', () => {
125
+ workerLogger.log('Resampler stream ended');
126
+ });
127
+ // Handle errors
128
+ resampler.on('error', (error) => {
129
+ workerLogger.error(`Resampler error:`, error);
130
+ sendError(`Resampler error: ${error.message}`);
131
+ });
132
+ opusEncoder.on('error', (error) => {
133
+ workerLogger.error(`Encoder error:`, error);
134
+ // Check for specific corrupted data errors
135
+ if (error.message?.includes('The compressed data passed is corrupted')) {
136
+ workerLogger.warn('Received corrupted audio data in opus encoder');
137
+ }
138
+ else {
139
+ sendError(`Encoder error: ${error.message}`);
140
+ }
141
+ });
142
+ async function cleanupAsync() {
143
+ workerLogger.log(`Starting async cleanup`);
144
+ stopPacketSending();
145
+ if (session) {
146
+ workerLogger.log(`Stopping GenAI session`);
147
+ session.stop();
148
+ session = null;
149
+ }
150
+ // Wait for audio log stream to finish writing
151
+ if (audioLogStream) {
152
+ workerLogger.log(`Closing assistant audio log stream`);
153
+ await new Promise((resolve, reject) => {
154
+ audioLogStream.end(() => {
155
+ workerLogger.log(`Assistant audio log stream closed`);
156
+ resolve();
157
+ });
158
+ audioLogStream.on('error', reject);
159
+ // Add timeout to prevent hanging
160
+ setTimeout(() => {
161
+ workerLogger.log(`Audio stream close timeout, continuing`);
162
+ resolve();
163
+ }, 3000);
164
+ });
165
+ audioLogStream = null;
166
+ }
167
+ // Unpipe and end the encoder first
168
+ resampler.unpipe(opusEncoder);
169
+ // End the encoder stream
170
+ await new Promise((resolve) => {
171
+ opusEncoder.end(() => {
172
+ workerLogger.log(`Opus encoder ended`);
173
+ resolve();
174
+ });
175
+ // Add timeout
176
+ setTimeout(resolve, 1000);
177
+ });
178
+ // End the resampler stream
179
+ await new Promise((resolve) => {
180
+ resampler.end(() => {
181
+ workerLogger.log(`Resampler ended`);
182
+ resolve();
183
+ });
184
+ // Add timeout
185
+ setTimeout(resolve, 1000);
186
+ });
187
+ workerLogger.log(`Async cleanup complete`);
188
+ }
189
+ // Handle messages from main thread
190
+ parentPort.on('message', async (message) => {
191
+ try {
192
+ switch (message.type) {
193
+ case 'init': {
194
+ workerLogger.log(`Initializing with directory:`, message.directory);
195
+ // Create audio log stream for assistant audio
196
+ audioLogStream = await createAssistantAudioLogStream(message.guildId, message.channelId);
197
+ // Start packet sending interval
198
+ startPacketSending();
199
+ // Get tools for the directory
200
+ const { tools } = await getTools({
201
+ directory: message.directory,
202
+ onMessageCompleted: (params) => {
203
+ parentPort.postMessage({
204
+ type: 'toolCallCompleted',
205
+ ...params,
206
+ });
207
+ },
208
+ });
209
+ // Start GenAI session
210
+ session = await startGenAiSession({
211
+ tools,
212
+ systemMessage: message.systemMessage,
213
+ onAssistantAudioChunk({ data }) {
214
+ // Write to audio log if enabled
215
+ if (audioLogStream && !audioLogStream.destroyed) {
216
+ audioLogStream.write(data, (err) => {
217
+ if (err) {
218
+ workerLogger.error('Error writing to audio log:', err);
219
+ }
220
+ });
221
+ }
222
+ // Write PCM data to resampler which will output Opus packets
223
+ if (!resampler.destroyed) {
224
+ resampler.write(data, (err) => {
225
+ if (err) {
226
+ workerLogger.error('Error writing to resampler:', err);
227
+ sendError(`Failed to process audio: ${err.message}`);
228
+ }
229
+ });
230
+ }
231
+ },
232
+ onAssistantStartSpeaking() {
233
+ parentPort.postMessage({
234
+ type: 'assistantStartSpeaking',
235
+ });
236
+ },
237
+ onAssistantStopSpeaking() {
238
+ parentPort.postMessage({
239
+ type: 'assistantStopSpeaking',
240
+ });
241
+ },
242
+ onAssistantInterruptSpeaking() {
243
+ parentPort.postMessage({
244
+ type: 'assistantInterruptSpeaking',
245
+ });
246
+ },
247
+ });
248
+ // Notify main thread we're ready
249
+ parentPort.postMessage({
250
+ type: 'ready',
251
+ });
252
+ break;
253
+ }
254
+ case 'sendRealtimeInput': {
255
+ if (!session) {
256
+ sendError('Session not initialized');
257
+ return;
258
+ }
259
+ session.session.sendRealtimeInput({
260
+ audio: message.audio,
261
+ audioStreamEnd: message.audioStreamEnd,
262
+ });
263
+ break;
264
+ }
265
+ case 'sendTextInput': {
266
+ if (!session) {
267
+ sendError('Session not initialized');
268
+ return;
269
+ }
270
+ session.session.sendRealtimeInput({
271
+ text: message.text,
272
+ });
273
+ break;
274
+ }
275
+ case 'interrupt': {
276
+ workerLogger.log(`Interrupting playback`);
277
+ // Clear the opus packet queue
278
+ opusPacketQueue.length = 0;
279
+ break;
280
+ }
281
+ case 'stop': {
282
+ workerLogger.log(`Stopping worker`);
283
+ await cleanupAsync();
284
+ // process.exit(0)
285
+ break;
286
+ }
287
+ }
288
+ }
289
+ catch (error) {
290
+ workerLogger.error(`Error handling message:`, error);
291
+ sendError(error instanceof Error ? error.message : 'Unknown error in worker');
292
+ }
293
+ });
package/dist/genai.js ADDED
@@ -0,0 +1,224 @@
1
+ import { GoogleGenAI, LiveServerMessage, MediaResolution, Modality, Session, } from '@google/genai';
2
+ import { writeFile } from 'fs';
3
+ import { createLogger } from './logger.js';
4
+ import { aiToolToCallableTool } from './ai-tool-to-genai.js';
5
+ const genaiLogger = createLogger('GENAI');
6
+ const audioParts = [];
7
+ function saveBinaryFile(fileName, content) {
8
+ writeFile(fileName, content, 'utf8', (err) => {
9
+ if (err) {
10
+ genaiLogger.error(`Error writing file ${fileName}:`, err);
11
+ return;
12
+ }
13
+ genaiLogger.log(`Appending stream content to file ${fileName}.`);
14
+ });
15
+ }
16
+ function convertToWav(rawData, mimeType) {
17
+ const options = parseMimeType(mimeType);
18
+ const dataLength = rawData.reduce((a, b) => a + b.length, 0);
19
+ const wavHeader = createWavHeader(dataLength, options);
20
+ const buffer = Buffer.concat(rawData);
21
+ return Buffer.concat([wavHeader, buffer]);
22
+ }
23
+ function parseMimeType(mimeType) {
24
+ const [fileType, ...params] = mimeType.split(';').map((s) => s.trim());
25
+ const [_, format] = fileType?.split('/') || [];
26
+ const options = {
27
+ numChannels: 1,
28
+ bitsPerSample: 16,
29
+ };
30
+ if (format && format.startsWith('L')) {
31
+ const bits = parseInt(format.slice(1), 10);
32
+ if (!isNaN(bits)) {
33
+ options.bitsPerSample = bits;
34
+ }
35
+ }
36
+ for (const param of params) {
37
+ const [key, value] = param.split('=').map((s) => s.trim());
38
+ if (key === 'rate') {
39
+ options.sampleRate = parseInt(value || '', 10);
40
+ }
41
+ }
42
+ return options;
43
+ }
44
+ function createWavHeader(dataLength, options) {
45
+ const { numChannels, sampleRate, bitsPerSample } = options;
46
+ // http://soundfile.sapp.org/doc/WaveFormat
47
+ const byteRate = (sampleRate * numChannels * bitsPerSample) / 8;
48
+ const blockAlign = (numChannels * bitsPerSample) / 8;
49
+ const buffer = Buffer.alloc(44);
50
+ buffer.write('RIFF', 0); // ChunkID
51
+ buffer.writeUInt32LE(36 + dataLength, 4); // ChunkSize
52
+ buffer.write('WAVE', 8); // Format
53
+ buffer.write('fmt ', 12); // Subchunk1ID
54
+ buffer.writeUInt32LE(16, 16); // Subchunk1Size (PCM)
55
+ buffer.writeUInt16LE(1, 20); // AudioFormat (1 = PCM)
56
+ buffer.writeUInt16LE(numChannels, 22); // NumChannels
57
+ buffer.writeUInt32LE(sampleRate, 24); // SampleRate
58
+ buffer.writeUInt32LE(byteRate, 28); // ByteRate
59
+ buffer.writeUInt16LE(blockAlign, 32); // BlockAlign
60
+ buffer.writeUInt16LE(bitsPerSample, 34); // BitsPerSample
61
+ buffer.write('data', 36); // Subchunk2ID
62
+ buffer.writeUInt32LE(dataLength, 40); // Subchunk2Size
63
+ return buffer;
64
+ }
65
+ function defaultAudioChunkHandler({ data, mimeType, }) {
66
+ audioParts.push(data);
67
+ const fileName = 'audio.wav';
68
+ const buffer = convertToWav(audioParts, mimeType);
69
+ saveBinaryFile(fileName, buffer);
70
+ }
71
+ export async function startGenAiSession({ onAssistantAudioChunk, onAssistantStartSpeaking, onAssistantStopSpeaking, onAssistantInterruptSpeaking, systemMessage, tools, } = {}) {
72
+ let session = undefined;
73
+ const callableTools = [];
74
+ let isAssistantSpeaking = false;
75
+ const audioChunkHandler = onAssistantAudioChunk || defaultAudioChunkHandler;
76
+ // Convert AI SDK tools to GenAI CallableTools
77
+ if (tools) {
78
+ for (const [name, tool] of Object.entries(tools)) {
79
+ callableTools.push(aiToolToCallableTool(tool, name));
80
+ }
81
+ }
82
+ function handleModelTurn(message) {
83
+ if (message.toolCall) {
84
+ genaiLogger.log('Tool call:', message.toolCall);
85
+ // Handle tool calls
86
+ if (message.toolCall.functionCalls && callableTools.length > 0) {
87
+ for (const tool of callableTools) {
88
+ if (!message.toolCall.functionCalls.some((x) => x.name === tool.name)) {
89
+ continue;
90
+ }
91
+ tool
92
+ .callTool(message.toolCall.functionCalls)
93
+ .then((parts) => {
94
+ const functionResponses = parts
95
+ .filter((part) => part.functionResponse)
96
+ .map((part) => ({
97
+ response: part.functionResponse.response,
98
+ id: part.functionResponse.id,
99
+ name: part.functionResponse.name,
100
+ }));
101
+ if (functionResponses.length > 0 && session) {
102
+ session.sendToolResponse({ functionResponses });
103
+ genaiLogger.log('client-toolResponse: ' +
104
+ JSON.stringify({ functionResponses }));
105
+ }
106
+ })
107
+ .catch((error) => {
108
+ genaiLogger.error('Error handling tool calls:', error);
109
+ });
110
+ }
111
+ }
112
+ }
113
+ if (message.serverContent?.modelTurn?.parts) {
114
+ for (const part of message.serverContent.modelTurn.parts) {
115
+ if (part?.fileData) {
116
+ genaiLogger.log(`File: ${part?.fileData.fileUri}`);
117
+ }
118
+ if (part?.inlineData) {
119
+ const inlineData = part.inlineData;
120
+ if (!inlineData.mimeType ||
121
+ !inlineData.mimeType.startsWith('audio/')) {
122
+ genaiLogger.log('Skipping non-audio inlineData:', inlineData.mimeType);
123
+ continue;
124
+ }
125
+ // Trigger start speaking callback the first time audio is received
126
+ if (!isAssistantSpeaking && onAssistantStartSpeaking) {
127
+ isAssistantSpeaking = true;
128
+ onAssistantStartSpeaking();
129
+ }
130
+ const buffer = Buffer.from(inlineData?.data ?? '', 'base64');
131
+ audioChunkHandler({
132
+ data: buffer,
133
+ mimeType: inlineData.mimeType ?? '',
134
+ });
135
+ }
136
+ if (part?.text) {
137
+ genaiLogger.log('Text:', part.text);
138
+ }
139
+ }
140
+ }
141
+ // Handle input transcription (user's audio transcription)
142
+ if (message.serverContent?.inputTranscription?.text) {
143
+ genaiLogger.log('[user transcription]', message.serverContent.inputTranscription.text);
144
+ }
145
+ // Handle output transcription (model's audio transcription)
146
+ if (message.serverContent?.outputTranscription?.text) {
147
+ genaiLogger.log('[assistant transcription]', message.serverContent.outputTranscription.text);
148
+ }
149
+ if (message.serverContent?.interrupted) {
150
+ genaiLogger.log('Assistant was interrupted');
151
+ if (isAssistantSpeaking && onAssistantInterruptSpeaking) {
152
+ isAssistantSpeaking = false;
153
+ onAssistantInterruptSpeaking();
154
+ }
155
+ }
156
+ if (message.serverContent?.turnComplete) {
157
+ genaiLogger.log('Assistant turn complete');
158
+ if (isAssistantSpeaking && onAssistantStopSpeaking) {
159
+ isAssistantSpeaking = false;
160
+ onAssistantStopSpeaking();
161
+ }
162
+ }
163
+ }
164
+ const ai = new GoogleGenAI({
165
+ apiKey: process.env.GEMINI_API_KEY,
166
+ });
167
+ const model = 'models/gemini-2.5-flash-live-preview';
168
+ session = await ai.live.connect({
169
+ model,
170
+ callbacks: {
171
+ onopen: function () {
172
+ genaiLogger.debug('Opened');
173
+ },
174
+ onmessage: function (message) {
175
+ // genaiLogger.log(message)
176
+ try {
177
+ handleModelTurn(message);
178
+ }
179
+ catch (error) {
180
+ genaiLogger.error('Error handling turn:', error);
181
+ }
182
+ },
183
+ onerror: function (e) {
184
+ genaiLogger.debug('Error:', e.message);
185
+ },
186
+ onclose: function (e) {
187
+ genaiLogger.debug('Close:', e.reason);
188
+ },
189
+ },
190
+ config: {
191
+ tools: callableTools,
192
+ responseModalities: [Modality.AUDIO],
193
+ mediaResolution: MediaResolution.MEDIA_RESOLUTION_MEDIUM,
194
+ inputAudioTranscription: {}, // transcribes your input speech
195
+ outputAudioTranscription: {}, // transcribes the model's spoken audio
196
+ systemInstruction: {
197
+ parts: [
198
+ {
199
+ text: systemMessage || '',
200
+ },
201
+ ],
202
+ },
203
+ speechConfig: {
204
+ voiceConfig: {
205
+ prebuiltVoiceConfig: {
206
+ voiceName: 'Charon', // Orus also not bad
207
+ },
208
+ },
209
+ },
210
+ contextWindowCompression: {
211
+ triggerTokens: '25600',
212
+ slidingWindow: { targetTokens: '12800' },
213
+ },
214
+ },
215
+ });
216
+ return {
217
+ session,
218
+ stop: () => {
219
+ const currentSession = session;
220
+ session = undefined;
221
+ currentSession?.close();
222
+ },
223
+ };
224
+ }
package/dist/logger.js ADDED
@@ -0,0 +1,10 @@
1
+ import { log } from '@clack/prompts';
2
+ export function createLogger(prefix) {
3
+ return {
4
+ log: (...args) => log.info([`[${prefix}]`, ...args.map((arg) => String(arg))].join(' ')),
5
+ error: (...args) => log.error([`[${prefix}]`, ...args.map((arg) => String(arg))].join(' ')),
6
+ warn: (...args) => log.warn([`[${prefix}]`, ...args.map((arg) => String(arg))].join(' ')),
7
+ info: (...args) => log.info([`[${prefix}]`, ...args.map((arg) => String(arg))].join(' ')),
8
+ debug: (...args) => log.info([`[${prefix}]`, ...args.map((arg) => String(arg))].join(' ')),
9
+ };
10
+ }