@eldrforge/ai-service 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/.github/dependabot.yml +12 -0
  2. package/.github/workflows/npm-publish.yml +48 -0
  3. package/.github/workflows/test.yml +33 -0
  4. package/LICENSE +190 -0
  5. package/README.md +48 -0
  6. package/dist/index.js +816 -0
  7. package/dist/instructions/commit.md +133 -0
  8. package/dist/instructions/release.md +188 -0
  9. package/dist/instructions/review.md +169 -0
  10. package/dist/personas/releaser.md +24 -0
  11. package/dist/personas/you.md +55 -0
  12. package/dist/src/ai.d.ts.map +1 -0
  13. package/dist/src/index.d.ts.map +1 -0
  14. package/dist/src/interactive.d.ts.map +1 -0
  15. package/dist/src/logger.d.ts.map +1 -0
  16. package/dist/src/prompts/commit.d.ts.map +1 -0
  17. package/dist/src/prompts/index.d.ts.map +1 -0
  18. package/dist/src/prompts/release.d.ts.map +1 -0
  19. package/dist/src/prompts/review.d.ts.map +1 -0
  20. package/dist/src/types.d.ts.map +1 -0
  21. package/eslint.config.mjs +84 -0
  22. package/package.json +75 -0
  23. package/src/ai.ts +421 -0
  24. package/src/index.ts +14 -0
  25. package/src/interactive.ts +562 -0
  26. package/src/logger.ts +69 -0
  27. package/src/prompts/commit.ts +85 -0
  28. package/src/prompts/index.ts +28 -0
  29. package/src/prompts/instructions/commit.md +133 -0
  30. package/src/prompts/instructions/release.md +188 -0
  31. package/src/prompts/instructions/review.md +169 -0
  32. package/src/prompts/personas/releaser.md +24 -0
  33. package/src/prompts/personas/you.md +55 -0
  34. package/src/prompts/release.ts +118 -0
  35. package/src/prompts/review.ts +72 -0
  36. package/src/types.ts +112 -0
  37. package/tests/ai-complete-coverage.test.ts +241 -0
  38. package/tests/ai-create-completion.test.ts +288 -0
  39. package/tests/ai-edge-cases.test.ts +221 -0
  40. package/tests/ai-openai-error.test.ts +35 -0
  41. package/tests/ai-transcribe.test.ts +169 -0
  42. package/tests/ai.test.ts +139 -0
  43. package/tests/interactive-editor.test.ts +253 -0
  44. package/tests/interactive-secure-temp.test.ts +264 -0
  45. package/tests/interactive-user-choice.test.ts +173 -0
  46. package/tests/interactive-user-text.test.ts +174 -0
  47. package/tests/interactive.test.ts +94 -0
  48. package/tests/logger-noop.test.ts +40 -0
  49. package/tests/logger.test.ts +122 -0
  50. package/tests/prompts.test.ts +179 -0
  51. package/tsconfig.json +35 -0
  52. package/vite.config.ts +69 -0
  53. package/vitest.config.ts +25 -0
@@ -0,0 +1,84 @@
1
+ import { defineConfig, globalIgnores } from "eslint/config";
2
+ import typescriptEslint from "@typescript-eslint/eslint-plugin";
3
+ import importPlugin from "eslint-plugin-import";
4
+ import globals from "globals";
5
+ import tsParser from "@typescript-eslint/parser";
6
+ import path from "node:path";
7
+ import { fileURLToPath } from "node:url";
8
+ import js from "@eslint/js";
9
+ import { FlatCompat } from "@eslint/eslintrc";
10
+
11
+ const __filename = fileURLToPath(import.meta.url);
12
+ const __dirname = path.dirname(__filename);
13
+ const compat = new FlatCompat({
14
+ baseDirectory: __dirname,
15
+ recommendedConfig: js.configs.recommended,
16
+ allConfig: js.configs.all
17
+ });
18
+
19
+ export default defineConfig([
20
+ globalIgnores([
21
+ "dist/**",
22
+ "node_modules/**",
23
+ "**/*.test.ts",
24
+ "temp-dist/**",
25
+ ]),
26
+ {
27
+ extends: compat.extends("eslint:recommended", "plugin:@typescript-eslint/recommended"),
28
+
29
+ plugins: {
30
+ "@typescript-eslint": typescriptEslint,
31
+ "import": importPlugin,
32
+ },
33
+
34
+ languageOptions: {
35
+ globals: {
36
+ ...globals.node,
37
+ },
38
+
39
+ parser: tsParser,
40
+ ecmaVersion: "latest",
41
+ sourceType: "module",
42
+ },
43
+
44
+ rules: {
45
+ "@typescript-eslint/no-explicit-any": "off",
46
+ "@typescript-eslint/explicit-function-return-type": "off",
47
+
48
+ "@typescript-eslint/no-unused-vars": ["warn", {
49
+ argsIgnorePattern: "^_",
50
+ }],
51
+
52
+ indent: ["warn", 4, {
53
+ SwitchCase: 1,
54
+ }],
55
+
56
+ "import/extensions": ["error", "never", {
57
+ ignorePackages: true,
58
+ pattern: {
59
+ "js": "never",
60
+ "ts": "never",
61
+ "d": "always"
62
+ }
63
+ }],
64
+
65
+ "import/no-extraneous-dependencies": ["error", {
66
+ devDependencies: true,
67
+ optionalDependencies: false,
68
+ peerDependencies: false,
69
+ }],
70
+
71
+ "no-console": ["error"],
72
+
73
+ "no-restricted-imports": ["error", {
74
+ paths: [],
75
+ patterns: [
76
+ {
77
+ group: ["src/**"],
78
+ message: "Use absolute imports instead of relative imports"
79
+ }
80
+ ]
81
+ }]
82
+ },
83
+ }]);
84
+
package/package.json ADDED
@@ -0,0 +1,75 @@
1
+ {
2
+ "name": "@eldrforge/ai-service",
3
+ "version": "0.1.1",
4
+ "description": "AI-powered content generation for automation - OpenAI integration with structured prompts",
5
+ "main": "dist/index.js",
6
+ "type": "module",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./dist/index.js",
10
+ "types": "./dist/index.d.ts"
11
+ }
12
+ },
13
+ "repository": {
14
+ "type": "git",
15
+ "url": "git+https://github.com/calenvarek/ai-service.git"
16
+ },
17
+ "scripts": {
18
+ "build": "npm run lint && tsc --noEmit && vite build",
19
+ "dev": "vite",
20
+ "watch": "vite build --watch",
21
+ "test": "vitest run --coverage",
22
+ "lint": "eslint . --ext .ts",
23
+ "lint:fix": "eslint . --ext .ts --fix",
24
+ "clean": "rm -rf dist",
25
+ "precommit": "npm run clean && npm run build && npm run lint && npm run test",
26
+ "prepublishOnly": "npm run clean && npm run lint && npm run build && npm run test"
27
+ },
28
+ "keywords": [
29
+ "ai",
30
+ "openai",
31
+ "llm",
32
+ "prompt-engineering",
33
+ "content-generation",
34
+ "automation",
35
+ "gpt",
36
+ "commit-messages",
37
+ "release-notes"
38
+ ],
39
+ "author": "Calen Varek <calenvarek@gmail.com>",
40
+ "license": "Apache-2.0",
41
+ "dependencies": {
42
+ "@eldrforge/git-tools": "^0.1.3",
43
+ "@riotprompt/riotprompt": "^0.0.8",
44
+ "openai": "^6.3.0"
45
+ },
46
+ "peerDependencies": {
47
+ "winston": "^3.17.0"
48
+ },
49
+ "peerDependenciesMeta": {
50
+ "winston": {
51
+ "optional": true
52
+ }
53
+ },
54
+ "devDependencies": {
55
+ "@eslint/eslintrc": "^3.3.1",
56
+ "@eslint/js": "^9.33.0",
57
+ "@swc/core": "^1.13.3",
58
+ "@types/node": "^24.2.1",
59
+ "@types/winston": "^2.4.4",
60
+ "@typescript-eslint/eslint-plugin": "^8.39.1",
61
+ "@typescript-eslint/parser": "^8.39.1",
62
+ "@vitest/coverage-v8": "^3.2.4",
63
+ "esbuild": "0.25.10",
64
+ "eslint": "^9.33.0",
65
+ "eslint-plugin-import": "^2.32.0",
66
+ "globals": "^16.3.0",
67
+ "mockdate": "^3.0.5",
68
+ "typescript": "^5.9.2",
69
+ "vite": "^7.1.2",
70
+ "vite-plugin-dts": "^4.3.0",
71
+ "vite-plugin-node": "^7.0.0",
72
+ "vitest": "^3.2.4",
73
+ "winston": "^3.17.0"
74
+ }
75
+ }
package/src/ai.ts ADDED
@@ -0,0 +1,421 @@
1
+ import { OpenAI } from 'openai';
2
+ import type { ChatCompletionMessageParam } from 'openai/resources';
3
+ import { safeJsonParse } from '@eldrforge/git-tools';
4
+ import fs from 'fs';
5
+ import { getLogger } from './logger';
6
+ import type { AIConfig, Transcription, StorageAdapter, Logger } from './types';
7
+
8
+ export interface OpenAIOptions {
9
+ responseFormat?: any;
10
+ model?: string;
11
+ debug?: boolean;
12
+ debugFile?: string;
13
+ debugRequestFile?: string;
14
+ debugResponseFile?: string;
15
+ maxTokens?: number;
16
+ openaiReasoning?: 'low' | 'medium' | 'high';
17
+ openaiMaxOutputTokens?: number;
18
+ storage?: StorageAdapter;
19
+ logger?: Logger;
20
+ }
21
+
22
+ export interface TranscriptionOptions {
23
+ model?: string;
24
+ debug?: boolean;
25
+ debugFile?: string;
26
+ debugRequestFile?: string;
27
+ debugResponseFile?: string;
28
+ outputDirectory?: string;
29
+ storage?: StorageAdapter;
30
+ logger?: Logger;
31
+ onArchive?: (audioPath: string, transcriptionText: string) => Promise<void>;
32
+ }
33
+
34
+ /**
35
+ * Get the appropriate model to use based on command-specific configuration
36
+ * Command-specific model overrides the global model setting
37
+ */
38
+ export function getModelForCommand(config: AIConfig, commandName: string): string {
39
+ let commandModel: string | undefined;
40
+
41
+ switch (commandName) {
42
+ case 'commit':
43
+ case 'audio-commit':
44
+ commandModel = config.commands?.commit?.model;
45
+ break;
46
+ case 'release':
47
+ commandModel = config.commands?.release?.model;
48
+ break;
49
+ case 'review':
50
+ case 'audio-review':
51
+ commandModel = config.commands?.review?.model;
52
+ break;
53
+ default:
54
+ // For other commands, just use global model
55
+ break;
56
+ }
57
+
58
+ // Return command-specific model if available, otherwise global model
59
+ return commandModel || config.model || 'gpt-4o-mini';
60
+ }
61
+
62
+ /**
63
+ * Get the appropriate OpenAI reasoning level based on command-specific configuration
64
+ * Command-specific reasoning overrides the global reasoning setting
65
+ */
66
+ export function getOpenAIReasoningForCommand(config: AIConfig, commandName: string): 'low' | 'medium' | 'high' {
67
+ let commandReasoning: 'low' | 'medium' | 'high' | undefined;
68
+
69
+ switch (commandName) {
70
+ case 'commit':
71
+ case 'audio-commit':
72
+ commandReasoning = config.commands?.commit?.reasoning;
73
+ break;
74
+ case 'release':
75
+ commandReasoning = config.commands?.release?.reasoning;
76
+ break;
77
+ case 'review':
78
+ case 'audio-review':
79
+ commandReasoning = config.commands?.review?.reasoning;
80
+ break;
81
+ default:
82
+ // For other commands, just use global reasoning
83
+ break;
84
+ }
85
+
86
+ // Return command-specific reasoning if available, otherwise global reasoning
87
+ return commandReasoning || config.reasoning || 'low';
88
+ }
89
+
90
+ export class OpenAIError extends Error {
91
+ constructor(message: string, public readonly isTokenLimitError: boolean = false) {
92
+ super(message);
93
+ this.name = 'OpenAIError';
94
+ }
95
+ }
96
+
97
+ // Check if an error is a token limit exceeded error
98
+ export function isTokenLimitError(error: any): boolean {
99
+ if (!error?.message) return false;
100
+
101
+ const message = error.message.toLowerCase();
102
+ return message.includes('maximum context length') ||
103
+ message.includes('context_length_exceeded') ||
104
+ message.includes('token limit') ||
105
+ message.includes('too many tokens') ||
106
+ message.includes('reduce the length');
107
+ }
108
+
109
+ // Check if an error is a rate limit error
110
+ export function isRateLimitError(error: any): boolean {
111
+ if (!error?.message && !error?.code && !error?.status) return false;
112
+
113
+ // Check for OpenAI specific rate limit indicators
114
+ if (error.status === 429 || error.code === 'rate_limit_exceeded') {
115
+ return true;
116
+ }
117
+
118
+ // Only check message if it exists
119
+ if (error.message) {
120
+ const message = error.message.toLowerCase();
121
+ return message.includes('rate limit exceeded') ||
122
+ message.includes('too many requests') ||
123
+ message.includes('quota exceeded') ||
124
+ (message.includes('rate') && message.includes('limit'));
125
+ }
126
+
127
+ return false;
128
+ }
129
+
130
+ /**
131
+ * Create OpenAI completion with optional debug and retry support
132
+ */
133
+ export async function createCompletion(
134
+ messages: ChatCompletionMessageParam[],
135
+ options: OpenAIOptions = { model: "gpt-4o-mini" }
136
+ ): Promise<string | any> {
137
+ const logger = options.logger || getLogger();
138
+ let openai: OpenAI | null = null;
139
+
140
+ try {
141
+ const apiKey = process.env.OPENAI_API_KEY;
142
+ if (!apiKey) {
143
+ throw new OpenAIError('OPENAI_API_KEY environment variable is not set');
144
+ }
145
+
146
+ // Create the client which we'll close in the finally block.
147
+ const timeoutMs = parseInt(process.env.OPENAI_TIMEOUT_MS || '300000'); // Default to 5 minutes
148
+ openai = new OpenAI({
149
+ apiKey: apiKey,
150
+ timeout: timeoutMs,
151
+ });
152
+
153
+ const modelToUse = options.model || "gpt-4o-mini";
154
+
155
+ // Calculate request size
156
+ const requestSize = JSON.stringify(messages).length;
157
+ const requestSizeKB = (requestSize / 1024).toFixed(2);
158
+
159
+ // Log model, reasoning level, and request size
160
+ const reasoningInfo = options.openaiReasoning ? ` | Reasoning: ${options.openaiReasoning}` : '';
161
+ logger.info('🤖 Making request to OpenAI');
162
+ logger.info(' Model: %s%s', modelToUse, reasoningInfo);
163
+ logger.info(' Request size: %s KB (%s bytes)', requestSizeKB, requestSize.toLocaleString());
164
+
165
+ logger.debug('Sending prompt to OpenAI: %j', messages);
166
+
167
+ // Use openaiMaxOutputTokens if specified (highest priority), otherwise fall back to maxTokens, or default to 10000
168
+ const maxCompletionTokens = options.openaiMaxOutputTokens ?? options.maxTokens ?? 10000;
169
+
170
+ // Save request debug file if enabled
171
+ if (options.debug && (options.debugRequestFile || options.debugFile) && options.storage) {
172
+ const requestData = {
173
+ model: modelToUse,
174
+ messages,
175
+ max_completion_tokens: maxCompletionTokens,
176
+ response_format: options.responseFormat,
177
+ reasoning_effort: options.openaiReasoning,
178
+ };
179
+ const debugFile = options.debugRequestFile || options.debugFile;
180
+ await options.storage.writeTemp(debugFile!, JSON.stringify(requestData, null, 2));
181
+ logger.debug('Wrote request debug file to %s', debugFile);
182
+ }
183
+
184
+ // Prepare the API call options
185
+ const apiOptions: any = {
186
+ model: modelToUse,
187
+ messages,
188
+ max_completion_tokens: maxCompletionTokens,
189
+ response_format: options.responseFormat,
190
+ };
191
+
192
+ // Add reasoning parameter if specified and model supports it
193
+ if (options.openaiReasoning && (modelToUse.includes('gpt-5') || modelToUse.includes('o3'))) {
194
+ apiOptions.reasoning_effort = options.openaiReasoning;
195
+ }
196
+
197
+ // Add timeout wrapper to the OpenAI API call
198
+ const startTime = Date.now();
199
+ const completionPromise = openai.chat.completions.create(apiOptions);
200
+
201
+ // Create timeout promise with proper cleanup to prevent memory leaks
202
+ let timeoutId: NodeJS.Timeout | null = null;
203
+ const timeoutPromise = new Promise<never>((_, reject) => {
204
+ const timeoutMs = parseInt(process.env.OPENAI_TIMEOUT_MS || '300000'); // Default to 5 minutes
205
+ timeoutId = setTimeout(() => reject(new OpenAIError(`OpenAI API call timed out after ${timeoutMs/1000} seconds`)), timeoutMs);
206
+ });
207
+
208
+ let completion;
209
+ try {
210
+ completion = await Promise.race([completionPromise, timeoutPromise]);
211
+ } finally {
212
+ // Clear the timeout to prevent memory leaks
213
+ if (timeoutId !== null) {
214
+ clearTimeout(timeoutId);
215
+ }
216
+ }
217
+
218
+ const elapsedTime = Date.now() - startTime;
219
+
220
+ // Save response debug file if enabled
221
+ if (options.debug && (options.debugResponseFile || options.debugFile) && options.storage) {
222
+ const debugFile = options.debugResponseFile || options.debugFile;
223
+ await options.storage.writeTemp(debugFile!, JSON.stringify(completion, null, 2));
224
+ logger.debug('Wrote response debug file to %s', debugFile);
225
+ }
226
+
227
+ const response = completion.choices[0]?.message?.content?.trim();
228
+ if (!response) {
229
+ throw new OpenAIError('No response received from OpenAI');
230
+ }
231
+
232
+ // Calculate and log response size
233
+ const responseSize = response.length;
234
+ const responseSizeKB = (responseSize / 1024).toFixed(2);
235
+ logger.info(' Response size: %s KB (%s bytes)', responseSizeKB, responseSize.toLocaleString());
236
+
237
+ // Log elapsed time
238
+ const elapsedTimeFormatted = elapsedTime >= 1000
239
+ ? `${(elapsedTime / 1000).toFixed(1)}s`
240
+ : `${elapsedTime}ms`;
241
+ logger.info(' Time: %s', elapsedTimeFormatted);
242
+
243
+ // Log token usage if available
244
+ if (completion.usage) {
245
+ logger.info(' Token usage: %s prompt + %s completion = %s total',
246
+ completion.usage.prompt_tokens?.toLocaleString() || '?',
247
+ completion.usage.completion_tokens?.toLocaleString() || '?',
248
+ completion.usage.total_tokens?.toLocaleString() || '?'
249
+ );
250
+ }
251
+
252
+ logger.debug('Received response from OpenAI: %s...', response.substring(0, 30));
253
+ if (options.responseFormat) {
254
+ return safeJsonParse(response, 'OpenAI API response');
255
+ } else {
256
+ return response;
257
+ }
258
+
259
+ } catch (error: any) {
260
+ logger.error('Error calling OpenAI API: %s %s', error.message, error.stack);
261
+ const isTokenError = isTokenLimitError(error);
262
+ throw new OpenAIError(`Failed to create completion: ${error.message}`, isTokenError);
263
+ } finally {
264
+ // OpenAI client cleanup is handled automatically by the library
265
+ // No manual cleanup needed for newer versions
266
+ }
267
+ }
268
+
269
+ /**
270
+ * Create completion with automatic retry on token limit errors
271
+ */
272
+ export async function createCompletionWithRetry(
273
+ messages: ChatCompletionMessageParam[],
274
+ options: OpenAIOptions = { model: "gpt-4o-mini" },
275
+ retryCallback?: (attempt: number) => Promise<ChatCompletionMessageParam[]>
276
+ ): Promise<string | any> {
277
+ const logger = options.logger || getLogger();
278
+ const maxRetries = 3;
279
+
280
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
281
+ try {
282
+ const messagesToSend = attempt === 1 ? messages : (retryCallback ? await retryCallback(attempt) : messages);
283
+ return await createCompletion(messagesToSend, options);
284
+ } catch (error: any) {
285
+ if (error instanceof OpenAIError && error.isTokenLimitError && attempt < maxRetries && retryCallback) {
286
+ logger.warn('Token limit exceeded on attempt %d/%d, retrying with reduced content...', attempt, maxRetries);
287
+ // Add exponential backoff for token limit errors
288
+ const backoffMs = Math.min(1000 * Math.pow(2, attempt - 1), 10000);
289
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
290
+ continue;
291
+ } else if (isRateLimitError(error) && attempt < maxRetries) {
292
+ // Handle rate limiting with exponential backoff
293
+ const backoffMs = Math.min(2000 * Math.pow(2, attempt - 1), 15000); // More reasonable backoff: 2s, 4s, 8s, max 15s
294
+ logger.warn(`Rate limit hit on attempt ${attempt}/${maxRetries}, waiting ${backoffMs}ms before retry...`);
295
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
296
+ continue;
297
+ }
298
+ throw error;
299
+ }
300
+ }
301
+
302
+ // This should never be reached, but TypeScript requires it
303
+ throw new OpenAIError('Max retries exceeded');
304
+ }
305
+
306
+ /**
307
+ * Transcribe audio file using OpenAI Whisper API
308
+ */
309
+ export async function transcribeAudio(
310
+ filePath: string,
311
+ options: TranscriptionOptions = { model: "whisper-1" }
312
+ ): Promise<Transcription> {
313
+ const logger = options.logger || getLogger();
314
+ let openai: OpenAI | null = null;
315
+ let audioStream: fs.ReadStream | null = null;
316
+ let streamClosed = false;
317
+
318
+ // Helper function to safely close the stream
319
+ const closeAudioStream = () => {
320
+ if (audioStream && !streamClosed) {
321
+ try {
322
+ // Only call destroy if it exists and the stream isn't already destroyed
323
+ if (typeof audioStream.destroy === 'function' && !audioStream.destroyed) {
324
+ audioStream.destroy();
325
+ }
326
+ streamClosed = true;
327
+ logger.debug('Audio stream closed successfully');
328
+ } catch (streamErr) {
329
+ logger.debug('Failed to destroy audio read stream: %s', (streamErr as Error).message);
330
+ streamClosed = true; // Mark as closed even if destroy failed
331
+ }
332
+ }
333
+ };
334
+
335
+ try {
336
+ const apiKey = process.env.OPENAI_API_KEY;
337
+ if (!apiKey) {
338
+ throw new OpenAIError('OPENAI_API_KEY environment variable is not set');
339
+ }
340
+
341
+ openai = new OpenAI({
342
+ apiKey: apiKey,
343
+ });
344
+
345
+ logger.debug('Transcribing audio file: %s', filePath);
346
+
347
+ // Save request debug file if enabled
348
+ if (options.debug && (options.debugRequestFile || options.debugFile) && options.storage) {
349
+ const requestData = {
350
+ model: options.model || "whisper-1",
351
+ file: filePath, // Can't serialize the stream, so just save the file path
352
+ response_format: "json",
353
+ };
354
+ const debugFile = options.debugRequestFile || options.debugFile;
355
+ await options.storage.writeTemp(debugFile!, JSON.stringify(requestData, null, 2));
356
+ logger.debug('Wrote request debug file to %s', debugFile);
357
+ }
358
+
359
+ audioStream = fs.createReadStream(filePath);
360
+
361
+ // Set up error handler for the stream to ensure cleanup on stream errors
362
+ // Only add handler if the stream has the 'on' method (real streams)
363
+ if (audioStream && typeof audioStream.on === 'function') {
364
+ audioStream.on('error', (streamError) => {
365
+ logger.error('Audio stream error: %s', streamError.message);
366
+ closeAudioStream();
367
+ });
368
+ }
369
+
370
+ let transcription;
371
+ try {
372
+ transcription = await openai.audio.transcriptions.create({
373
+ model: options.model || "whisper-1",
374
+ file: audioStream,
375
+ response_format: "json",
376
+ });
377
+ // Close the stream immediately after successful API call to prevent race conditions
378
+ closeAudioStream();
379
+ } catch (apiError) {
380
+ // Close the stream immediately if the API call fails
381
+ closeAudioStream();
382
+ throw apiError;
383
+ }
384
+
385
+ // Save response debug file if enabled
386
+ if (options.debug && (options.debugResponseFile || options.debugFile) && options.storage) {
387
+ const debugFile = options.debugResponseFile || options.debugFile;
388
+ await options.storage.writeTemp(debugFile!, JSON.stringify(transcription, null, 2));
389
+ logger.debug('Wrote response debug file to %s', debugFile);
390
+ }
391
+
392
+ const response = transcription;
393
+ if (!response) {
394
+ throw new OpenAIError('No transcription received from OpenAI');
395
+ }
396
+
397
+ logger.debug('Received transcription from OpenAI: %s', response);
398
+
399
+ // Archive the audio file and transcription if callback provided
400
+ if (options.onArchive) {
401
+ try {
402
+ await options.onArchive(filePath, response.text);
403
+ } catch (archiveError: any) {
404
+ // Don't fail the transcription if archiving fails, just log the error
405
+ logger.warn('Failed to archive audio file: %s', archiveError.message);
406
+ }
407
+ }
408
+
409
+ return response;
410
+
411
+ } catch (error: any) {
412
+ logger.error('Error transcribing audio file: %s %s', error.message, error.stack);
413
+ throw new OpenAIError(`Failed to transcribe audio: ${error.message}`);
414
+ } finally {
415
+ // Ensure the audio stream is properly closed to release file handles
416
+ closeAudioStream();
417
+ // OpenAI client cleanup is handled automatically by the library
418
+ // No manual cleanup needed for newer versions
419
+ }
420
+ }
421
+
package/src/index.ts ADDED
@@ -0,0 +1,14 @@
1
+ /**
2
+ * @eldrforge/ai-service
3
+ *
4
+ * AI-powered content generation for automation tools.
5
+ * Provides OpenAI integration with structured prompts for
6
+ * generating commit messages, release notes, and code reviews.
7
+ */
8
+
9
+ // Core functionality
10
+ export * from './types';
11
+ export * from './logger';
12
+ export * from './ai';
13
+ export * from './interactive';
14
+ export * from './prompts';