@eldrforge/ai-service 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +2 -0
- package/dist/index.js.map +1 -0
- package/dist/src/ai.d.ts +55 -0
- package/{src/index.ts → dist/src/index.d.ts} +1 -2
- package/dist/src/interactive.d.ts +122 -0
- package/dist/src/logger.d.ts +19 -0
- package/dist/src/prompts/commit.d.ts +29 -0
- package/dist/src/prompts/index.d.ts +10 -0
- package/dist/src/prompts/release.d.ts +25 -0
- package/dist/src/prompts/review.d.ts +21 -0
- package/dist/src/types.d.ts +99 -0
- package/package.json +11 -8
- package/.github/dependabot.yml +0 -12
- package/.github/workflows/npm-publish.yml +0 -48
- package/.github/workflows/test.yml +0 -33
- package/eslint.config.mjs +0 -84
- package/src/ai.ts +0 -421
- package/src/interactive.ts +0 -562
- package/src/logger.ts +0 -69
- package/src/prompts/commit.ts +0 -85
- package/src/prompts/index.ts +0 -28
- package/src/prompts/instructions/commit.md +0 -133
- package/src/prompts/instructions/release.md +0 -188
- package/src/prompts/instructions/review.md +0 -169
- package/src/prompts/personas/releaser.md +0 -24
- package/src/prompts/personas/you.md +0 -55
- package/src/prompts/release.ts +0 -118
- package/src/prompts/review.ts +0 -72
- package/src/types.ts +0 -112
- package/tests/ai-complete-coverage.test.ts +0 -241
- package/tests/ai-create-completion.test.ts +0 -288
- package/tests/ai-edge-cases.test.ts +0 -221
- package/tests/ai-openai-error.test.ts +0 -35
- package/tests/ai-transcribe.test.ts +0 -169
- package/tests/ai.test.ts +0 -139
- package/tests/interactive-editor.test.ts +0 -253
- package/tests/interactive-secure-temp.test.ts +0 -264
- package/tests/interactive-user-choice.test.ts +0 -173
- package/tests/interactive-user-text.test.ts +0 -174
- package/tests/interactive.test.ts +0 -94
- package/tests/logger-noop.test.ts +0 -40
- package/tests/logger.test.ts +0 -122
- package/tests/prompts.test.ts +0 -179
- package/tsconfig.json +0 -35
- package/vite.config.ts +0 -69
- package/vitest.config.ts +0 -25
|
@@ -1,288 +0,0 @@
|
|
|
1
|
-
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
|
2
|
-
import { createCompletion, createCompletionWithRetry, OpenAIError } from '../src/ai';
|
|
3
|
-
import type { AIConfig, Logger } from '../src/types';
|
|
4
|
-
|
|
5
|
-
// Create mock functions
|
|
6
|
-
const mockChatCreate = vi.fn();
|
|
7
|
-
|
|
8
|
-
// Mock OpenAI
|
|
9
|
-
vi.mock('openai', () => ({
|
|
10
|
-
OpenAI: vi.fn().mockImplementation(() => ({
|
|
11
|
-
chat: {
|
|
12
|
-
completions: {
|
|
13
|
-
create: mockChatCreate,
|
|
14
|
-
},
|
|
15
|
-
},
|
|
16
|
-
})),
|
|
17
|
-
}));
|
|
18
|
-
|
|
19
|
-
// Mock logger
|
|
20
|
-
const mockLoggerInstance = {
|
|
21
|
-
info: vi.fn(),
|
|
22
|
-
error: vi.fn(),
|
|
23
|
-
warn: vi.fn(),
|
|
24
|
-
debug: vi.fn(),
|
|
25
|
-
};
|
|
26
|
-
|
|
27
|
-
vi.mock('../src/logger', () => ({
|
|
28
|
-
getLogger: vi.fn(() => mockLoggerInstance),
|
|
29
|
-
}));
|
|
30
|
-
|
|
31
|
-
// Mock safeJsonParse
|
|
32
|
-
vi.mock('@eldrforge/git-tools', () => ({
|
|
33
|
-
safeJsonParse: vi.fn((json: string) => JSON.parse(json)),
|
|
34
|
-
}));
|
|
35
|
-
|
|
36
|
-
describe('createCompletion', () => {
|
|
37
|
-
beforeEach(() => {
|
|
38
|
-
vi.clearAllMocks();
|
|
39
|
-
process.env.OPENAI_API_KEY = 'test-key';
|
|
40
|
-
});
|
|
41
|
-
|
|
42
|
-
it('should create completion successfully', async () => {
|
|
43
|
-
mockChatCreate.mockResolvedValue({
|
|
44
|
-
choices: [{ message: { content: 'Test response' } }],
|
|
45
|
-
usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
|
|
46
|
-
});
|
|
47
|
-
|
|
48
|
-
const result = await createCompletion(
|
|
49
|
-
[{ role: 'user', content: 'test' }],
|
|
50
|
-
{ model: 'gpt-4o-mini' }
|
|
51
|
-
);
|
|
52
|
-
|
|
53
|
-
expect(result).toBe('Test response');
|
|
54
|
-
expect(mockChatCreate).toHaveBeenCalled();
|
|
55
|
-
});
|
|
56
|
-
|
|
57
|
-
it('should throw error if API key not set', async () => {
|
|
58
|
-
delete process.env.OPENAI_API_KEY;
|
|
59
|
-
|
|
60
|
-
await expect(
|
|
61
|
-
createCompletion([{ role: 'user', content: 'test' }])
|
|
62
|
-
).rejects.toThrow('OPENAI_API_KEY environment variable is not set');
|
|
63
|
-
});
|
|
64
|
-
|
|
65
|
-
it('should use specified model', async () => {
|
|
66
|
-
mockChatCreate.mockResolvedValue({
|
|
67
|
-
choices: [{ message: { content: 'Response' } }],
|
|
68
|
-
usage: {},
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
await createCompletion(
|
|
72
|
-
[{ role: 'user', content: 'test' }],
|
|
73
|
-
{ model: 'gpt-4o' }
|
|
74
|
-
);
|
|
75
|
-
|
|
76
|
-
expect(mockChatCreate).toHaveBeenCalledWith(
|
|
77
|
-
expect.objectContaining({ model: 'gpt-4o' })
|
|
78
|
-
);
|
|
79
|
-
});
|
|
80
|
-
|
|
81
|
-
it('should handle empty response', async () => {
|
|
82
|
-
mockChatCreate.mockResolvedValue({
|
|
83
|
-
choices: [{ message: { content: '' } }],
|
|
84
|
-
usage: {},
|
|
85
|
-
});
|
|
86
|
-
|
|
87
|
-
await expect(
|
|
88
|
-
createCompletion([{ role: 'user', content: 'test' }])
|
|
89
|
-
).rejects.toThrow('No response received from OpenAI');
|
|
90
|
-
});
|
|
91
|
-
|
|
92
|
-
it('should handle missing content in response', async () => {
|
|
93
|
-
mockChatCreate.mockResolvedValue({
|
|
94
|
-
choices: [{ message: {} }],
|
|
95
|
-
usage: {},
|
|
96
|
-
});
|
|
97
|
-
|
|
98
|
-
await expect(
|
|
99
|
-
createCompletion([{ role: 'user', content: 'test' }])
|
|
100
|
-
).rejects.toThrow('No response received from OpenAI');
|
|
101
|
-
});
|
|
102
|
-
|
|
103
|
-
it('should log request and response sizes', async () => {
|
|
104
|
-
mockChatCreate.mockResolvedValue({
|
|
105
|
-
choices: [{ message: { content: 'Test response' } }],
|
|
106
|
-
usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
|
|
107
|
-
});
|
|
108
|
-
|
|
109
|
-
await createCompletion([{ role: 'user', content: 'test' }]);
|
|
110
|
-
|
|
111
|
-
expect(mockLoggerInstance.info).toHaveBeenCalledWith(
|
|
112
|
-
expect.stringContaining('Request size'),
|
|
113
|
-
expect.any(String),
|
|
114
|
-
expect.any(String)
|
|
115
|
-
);
|
|
116
|
-
expect(mockLoggerInstance.info).toHaveBeenCalledWith(
|
|
117
|
-
expect.stringContaining('Response size'),
|
|
118
|
-
expect.any(String),
|
|
119
|
-
expect.any(String)
|
|
120
|
-
);
|
|
121
|
-
});
|
|
122
|
-
|
|
123
|
-
it('should log token usage when available', async () => {
|
|
124
|
-
mockChatCreate.mockResolvedValue({
|
|
125
|
-
choices: [{ message: { content: 'Response' } }],
|
|
126
|
-
usage: { prompt_tokens: 100, completion_tokens: 50, total_tokens: 150 },
|
|
127
|
-
});
|
|
128
|
-
|
|
129
|
-
await createCompletion([{ role: 'user', content: 'test' }]);
|
|
130
|
-
|
|
131
|
-
expect(mockLoggerInstance.info).toHaveBeenCalledWith(
|
|
132
|
-
expect.stringContaining('Token usage'),
|
|
133
|
-
expect.any(String),
|
|
134
|
-
expect.any(String),
|
|
135
|
-
expect.any(String)
|
|
136
|
-
);
|
|
137
|
-
});
|
|
138
|
-
|
|
139
|
-
it('should handle API errors with stack traces', async () => {
|
|
140
|
-
const error = new Error('API failed');
|
|
141
|
-
mockChatCreate.mockRejectedValue(error);
|
|
142
|
-
|
|
143
|
-
await expect(
|
|
144
|
-
createCompletion([{ role: 'user', content: 'test' }])
|
|
145
|
-
).rejects.toThrow(OpenAIError);
|
|
146
|
-
|
|
147
|
-
expect(mockLoggerInstance.error).toHaveBeenCalledWith(
|
|
148
|
-
expect.stringContaining('Error calling OpenAI API'),
|
|
149
|
-
expect.any(String),
|
|
150
|
-
expect.anything()
|
|
151
|
-
);
|
|
152
|
-
});
|
|
153
|
-
|
|
154
|
-
it('should set maxCompletionTokens', async () => {
|
|
155
|
-
mockChatCreate.mockResolvedValue({
|
|
156
|
-
choices: [{ message: { content: 'Response' } }],
|
|
157
|
-
usage: {},
|
|
158
|
-
});
|
|
159
|
-
|
|
160
|
-
await createCompletion(
|
|
161
|
-
[{ role: 'user', content: 'test' }],
|
|
162
|
-
{ maxTokens: 5000 }
|
|
163
|
-
);
|
|
164
|
-
|
|
165
|
-
expect(mockChatCreate).toHaveBeenCalledWith(
|
|
166
|
-
expect.objectContaining({ max_completion_tokens: 5000 })
|
|
167
|
-
);
|
|
168
|
-
});
|
|
169
|
-
|
|
170
|
-
it('should use openaiMaxOutputTokens over maxTokens', async () => {
|
|
171
|
-
mockChatCreate.mockResolvedValue({
|
|
172
|
-
choices: [{ message: { content: 'Response' } }],
|
|
173
|
-
usage: {},
|
|
174
|
-
});
|
|
175
|
-
|
|
176
|
-
await createCompletion(
|
|
177
|
-
[{ role: 'user', content: 'test' }],
|
|
178
|
-
{ maxTokens: 5000, openaiMaxOutputTokens: 8000 }
|
|
179
|
-
);
|
|
180
|
-
|
|
181
|
-
expect(mockChatCreate).toHaveBeenCalledWith(
|
|
182
|
-
expect.objectContaining({ max_completion_tokens: 8000 })
|
|
183
|
-
);
|
|
184
|
-
});
|
|
185
|
-
|
|
186
|
-
it('should add reasoning_effort for supported models', async () => {
|
|
187
|
-
mockChatCreate.mockResolvedValue({
|
|
188
|
-
choices: [{ message: { content: 'Response' } }],
|
|
189
|
-
usage: {},
|
|
190
|
-
});
|
|
191
|
-
|
|
192
|
-
await createCompletion(
|
|
193
|
-
[{ role: 'user', content: 'test' }],
|
|
194
|
-
{ model: 'gpt-5-turbo', openaiReasoning: 'high' }
|
|
195
|
-
);
|
|
196
|
-
|
|
197
|
-
expect(mockChatCreate).toHaveBeenCalledWith(
|
|
198
|
-
expect.objectContaining({ reasoning_effort: 'high' })
|
|
199
|
-
);
|
|
200
|
-
});
|
|
201
|
-
});
|
|
202
|
-
|
|
203
|
-
describe('createCompletionWithRetry', () => {
|
|
204
|
-
beforeEach(() => {
|
|
205
|
-
vi.clearAllMocks();
|
|
206
|
-
process.env.OPENAI_API_KEY = 'test-key';
|
|
207
|
-
});
|
|
208
|
-
|
|
209
|
-
it('should succeed on first try', async () => {
|
|
210
|
-
mockChatCreate.mockResolvedValue({
|
|
211
|
-
choices: [{ message: { content: 'Response' } }],
|
|
212
|
-
usage: {},
|
|
213
|
-
});
|
|
214
|
-
|
|
215
|
-
const result = await createCompletionWithRetry(
|
|
216
|
-
[{ role: 'user', content: 'test' }]
|
|
217
|
-
);
|
|
218
|
-
|
|
219
|
-
expect(result).toBe('Response');
|
|
220
|
-
expect(mockChatCreate).toHaveBeenCalledTimes(1);
|
|
221
|
-
});
|
|
222
|
-
|
|
223
|
-
it('should retry on rate limit error', async () => {
|
|
224
|
-
const rateLimitError: any = new Error('Rate limit exceeded');
|
|
225
|
-
rateLimitError.status = 429;
|
|
226
|
-
|
|
227
|
-
mockChatCreate
|
|
228
|
-
.mockRejectedValueOnce(rateLimitError)
|
|
229
|
-
.mockResolvedValue({
|
|
230
|
-
choices: [{ message: { content: 'Response after retry' } }],
|
|
231
|
-
usage: {},
|
|
232
|
-
});
|
|
233
|
-
|
|
234
|
-
const result = await createCompletionWithRetry(
|
|
235
|
-
[{ role: 'user', content: 'test' }]
|
|
236
|
-
);
|
|
237
|
-
|
|
238
|
-
expect(result).toBe('Response after retry');
|
|
239
|
-
expect(mockChatCreate).toHaveBeenCalledTimes(2);
|
|
240
|
-
});
|
|
241
|
-
|
|
242
|
-
it('should retry with callback on token limit error', async () => {
|
|
243
|
-
const tokenLimitError = new OpenAIError('maximum context length exceeded', true);
|
|
244
|
-
|
|
245
|
-
mockChatCreate
|
|
246
|
-
.mockRejectedValueOnce(tokenLimitError)
|
|
247
|
-
.mockResolvedValue({
|
|
248
|
-
choices: [{ message: { content: 'Response with less content' } }],
|
|
249
|
-
usage: {},
|
|
250
|
-
});
|
|
251
|
-
|
|
252
|
-
const retryCallback = vi.fn().mockResolvedValue([
|
|
253
|
-
{ role: 'user', content: 'reduced' }
|
|
254
|
-
]);
|
|
255
|
-
|
|
256
|
-
const result = await createCompletionWithRetry(
|
|
257
|
-
[{ role: 'user', content: 'long content' }],
|
|
258
|
-
{},
|
|
259
|
-
retryCallback
|
|
260
|
-
);
|
|
261
|
-
|
|
262
|
-
expect(result).toBe('Response with less content');
|
|
263
|
-
expect(retryCallback).toHaveBeenCalledWith(2);
|
|
264
|
-
});
|
|
265
|
-
|
|
266
|
-
it('should throw after max retries on persistent rate limit', async () => {
|
|
267
|
-
const rateLimitError: any = new Error('Rate limit');
|
|
268
|
-
rateLimitError.status = 429;
|
|
269
|
-
|
|
270
|
-
mockChatCreate.mockRejectedValue(rateLimitError);
|
|
271
|
-
|
|
272
|
-
await expect(
|
|
273
|
-
createCompletionWithRetry([{ role: 'user', content: 'test' }])
|
|
274
|
-
).rejects.toThrow();
|
|
275
|
-
|
|
276
|
-
expect(mockChatCreate).toHaveBeenCalledTimes(3);
|
|
277
|
-
}, 30000); // Increase timeout to account for backoff delays
|
|
278
|
-
|
|
279
|
-
it('should throw immediately on non-retryable errors', async () => {
|
|
280
|
-
mockChatCreate.mockRejectedValue(new Error('Invalid API key'));
|
|
281
|
-
|
|
282
|
-
await expect(
|
|
283
|
-
createCompletionWithRetry([{ role: 'user', content: 'test' }])
|
|
284
|
-
).rejects.toThrow();
|
|
285
|
-
|
|
286
|
-
expect(mockChatCreate).toHaveBeenCalledTimes(1);
|
|
287
|
-
});
|
|
288
|
-
});
|
|
@@ -1,221 +0,0 @@
|
|
|
1
|
-
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
|
2
|
-
import { createCompletion, transcribeAudio } from '../src/ai';
|
|
3
|
-
import type { StorageAdapter } from '../src/types';
|
|
4
|
-
|
|
5
|
-
// Mock OpenAI
|
|
6
|
-
const mockChatCreate = vi.fn();
|
|
7
|
-
const mockTranscriptionsCreate = vi.fn();
|
|
8
|
-
|
|
9
|
-
vi.mock('openai', () => ({
|
|
10
|
-
OpenAI: vi.fn().mockImplementation(() => ({
|
|
11
|
-
chat: {
|
|
12
|
-
completions: {
|
|
13
|
-
create: mockChatCreate,
|
|
14
|
-
},
|
|
15
|
-
},
|
|
16
|
-
audio: {
|
|
17
|
-
transcriptions: {
|
|
18
|
-
create: mockTranscriptionsCreate,
|
|
19
|
-
},
|
|
20
|
-
},
|
|
21
|
-
})),
|
|
22
|
-
}));
|
|
23
|
-
|
|
24
|
-
// Mock logger
|
|
25
|
-
vi.mock('../src/logger', () => ({
|
|
26
|
-
getLogger: vi.fn(() => ({
|
|
27
|
-
info: vi.fn(),
|
|
28
|
-
error: vi.fn(),
|
|
29
|
-
warn: vi.fn(),
|
|
30
|
-
debug: vi.fn(),
|
|
31
|
-
})),
|
|
32
|
-
}));
|
|
33
|
-
|
|
34
|
-
// Mock fs
|
|
35
|
-
const mockReadStreamDestroy = vi.fn();
|
|
36
|
-
const mockReadStreamOn = vi.fn();
|
|
37
|
-
vi.mock('fs', () => ({
|
|
38
|
-
default: {
|
|
39
|
-
createReadStream: vi.fn(() => ({
|
|
40
|
-
destroy: mockReadStreamDestroy,
|
|
41
|
-
destroyed: false,
|
|
42
|
-
on: mockReadStreamOn,
|
|
43
|
-
})),
|
|
44
|
-
},
|
|
45
|
-
createReadStream: vi.fn(() => ({
|
|
46
|
-
destroy: mockReadStreamDestroy,
|
|
47
|
-
destroyed: false,
|
|
48
|
-
on: mockReadStreamOn,
|
|
49
|
-
})),
|
|
50
|
-
}));
|
|
51
|
-
|
|
52
|
-
// Mock safeJsonParse
|
|
53
|
-
vi.mock('@eldrforge/git-tools', () => ({
|
|
54
|
-
safeJsonParse: vi.fn((json: string) => JSON.parse(json)),
|
|
55
|
-
}));
|
|
56
|
-
|
|
57
|
-
describe('AI Edge Cases', () => {
|
|
58
|
-
beforeEach(() => {
|
|
59
|
-
vi.clearAllMocks();
|
|
60
|
-
process.env.OPENAI_API_KEY = 'test-key';
|
|
61
|
-
mockReadStreamDestroy.mockClear();
|
|
62
|
-
mockReadStreamOn.mockClear();
|
|
63
|
-
});
|
|
64
|
-
|
|
65
|
-
describe('createCompletion - debug and storage', () => {
|
|
66
|
-
it('should write debug request file when debug enabled', async () => {
|
|
67
|
-
const mockStorage: StorageAdapter = {
|
|
68
|
-
writeOutput: vi.fn(),
|
|
69
|
-
readTemp: vi.fn(),
|
|
70
|
-
writeTemp: vi.fn().mockResolvedValue(undefined),
|
|
71
|
-
};
|
|
72
|
-
|
|
73
|
-
mockChatCreate.mockResolvedValue({
|
|
74
|
-
choices: [{ message: { content: 'Response' } }],
|
|
75
|
-
usage: {},
|
|
76
|
-
});
|
|
77
|
-
|
|
78
|
-
await createCompletion(
|
|
79
|
-
[{ role: 'user', content: 'test' }],
|
|
80
|
-
{
|
|
81
|
-
debug: true,
|
|
82
|
-
debugRequestFile: 'request.json',
|
|
83
|
-
storage: mockStorage,
|
|
84
|
-
}
|
|
85
|
-
);
|
|
86
|
-
|
|
87
|
-
expect(mockStorage.writeTemp).toHaveBeenCalledWith(
|
|
88
|
-
'request.json',
|
|
89
|
-
expect.stringContaining('"model"')
|
|
90
|
-
);
|
|
91
|
-
});
|
|
92
|
-
|
|
93
|
-
it('should write debug response file when debug enabled', async () => {
|
|
94
|
-
const mockStorage: StorageAdapter = {
|
|
95
|
-
writeOutput: vi.fn(),
|
|
96
|
-
readTemp: vi.fn(),
|
|
97
|
-
writeTemp: vi.fn().mockResolvedValue(undefined),
|
|
98
|
-
};
|
|
99
|
-
|
|
100
|
-
mockChatCreate.mockResolvedValue({
|
|
101
|
-
choices: [{ message: { content: 'Response' } }],
|
|
102
|
-
usage: {},
|
|
103
|
-
});
|
|
104
|
-
|
|
105
|
-
await createCompletion(
|
|
106
|
-
[{ role: 'user', content: 'test' }],
|
|
107
|
-
{
|
|
108
|
-
debug: true,
|
|
109
|
-
debugResponseFile: 'response.json',
|
|
110
|
-
storage: mockStorage,
|
|
111
|
-
}
|
|
112
|
-
);
|
|
113
|
-
|
|
114
|
-
expect(mockStorage.writeTemp).toHaveBeenCalledWith(
|
|
115
|
-
'response.json',
|
|
116
|
-
expect.stringContaining('"choices"')
|
|
117
|
-
);
|
|
118
|
-
});
|
|
119
|
-
|
|
120
|
-
it('should use debugFile for both request and response if specific files not provided', async () => {
|
|
121
|
-
const mockStorage: StorageAdapter = {
|
|
122
|
-
writeOutput: vi.fn(),
|
|
123
|
-
readTemp: vi.fn(),
|
|
124
|
-
writeTemp: vi.fn().mockResolvedValue(undefined),
|
|
125
|
-
};
|
|
126
|
-
|
|
127
|
-
mockChatCreate.mockResolvedValue({
|
|
128
|
-
choices: [{ message: { content: 'Response' } }],
|
|
129
|
-
usage: {},
|
|
130
|
-
});
|
|
131
|
-
|
|
132
|
-
await createCompletion(
|
|
133
|
-
[{ role: 'user', content: 'test' }],
|
|
134
|
-
{
|
|
135
|
-
debug: true,
|
|
136
|
-
debugFile: 'debug.json',
|
|
137
|
-
storage: mockStorage,
|
|
138
|
-
}
|
|
139
|
-
);
|
|
140
|
-
|
|
141
|
-
expect(mockStorage.writeTemp).toHaveBeenCalledTimes(2); // Both request and response
|
|
142
|
-
});
|
|
143
|
-
});
|
|
144
|
-
|
|
145
|
-
describe('transcribeAudio - edge cases', () => {
|
|
146
|
-
it('should handle stream without on method', async () => {
|
|
147
|
-
const streamWithoutOn = {
|
|
148
|
-
destroy: mockReadStreamDestroy,
|
|
149
|
-
destroyed: false,
|
|
150
|
-
// No 'on' method
|
|
151
|
-
};
|
|
152
|
-
|
|
153
|
-
const fs = await import('fs');
|
|
154
|
-
vi.mocked(fs.createReadStream).mockReturnValue(streamWithoutOn as any);
|
|
155
|
-
|
|
156
|
-
mockTranscriptionsCreate.mockResolvedValue({
|
|
157
|
-
text: 'Transcribed',
|
|
158
|
-
});
|
|
159
|
-
|
|
160
|
-
const result = await transcribeAudio('/path/audio.mp3');
|
|
161
|
-
|
|
162
|
-
expect(result.text).toBe('Transcribed');
|
|
163
|
-
});
|
|
164
|
-
|
|
165
|
-
it('should check if stream is destroyed before calling destroy', async () => {
|
|
166
|
-
const destroyedStream = {
|
|
167
|
-
destroy: mockReadStreamDestroy,
|
|
168
|
-
destroyed: true,
|
|
169
|
-
on: mockReadStreamOn,
|
|
170
|
-
};
|
|
171
|
-
|
|
172
|
-
const fs = await import('fs');
|
|
173
|
-
vi.mocked(fs.createReadStream).mockReturnValue(destroyedStream as any);
|
|
174
|
-
|
|
175
|
-
mockTranscriptionsCreate.mockResolvedValue({
|
|
176
|
-
text: 'Transcribed',
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
const result = await transcribeAudio('/path/audio.mp3');
|
|
180
|
-
|
|
181
|
-
expect(result.text).toBe('Transcribed');
|
|
182
|
-
// The code checks destroyed flag before calling destroy
|
|
183
|
-
});
|
|
184
|
-
|
|
185
|
-
it('should handle stream destroy errors', async () => {
|
|
186
|
-
mockReadStreamDestroy.mockImplementation(() => {
|
|
187
|
-
throw new Error('Destroy failed');
|
|
188
|
-
});
|
|
189
|
-
|
|
190
|
-
mockTranscriptionsCreate.mockResolvedValue({
|
|
191
|
-
text: 'Transcribed',
|
|
192
|
-
});
|
|
193
|
-
|
|
194
|
-
// Should not throw despite stream error
|
|
195
|
-
const result = await transcribeAudio('/path/audio.mp3');
|
|
196
|
-
|
|
197
|
-
expect(result.text).toBe('Transcribed');
|
|
198
|
-
});
|
|
199
|
-
|
|
200
|
-
it('should write debug files when enabled', async () => {
|
|
201
|
-
const mockStorage: StorageAdapter = {
|
|
202
|
-
writeOutput: vi.fn(),
|
|
203
|
-
readTemp: vi.fn(),
|
|
204
|
-
writeTemp: vi.fn().mockResolvedValue(undefined),
|
|
205
|
-
};
|
|
206
|
-
|
|
207
|
-
mockTranscriptionsCreate.mockResolvedValue({
|
|
208
|
-
text: 'Transcribed text',
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
await transcribeAudio('/path/audio.mp3', {
|
|
212
|
-
debug: true,
|
|
213
|
-
debugFile: 'transcribe-debug.json',
|
|
214
|
-
storage: mockStorage,
|
|
215
|
-
});
|
|
216
|
-
|
|
217
|
-
expect(mockStorage.writeTemp).toHaveBeenCalledTimes(2); // Request and response
|
|
218
|
-
});
|
|
219
|
-
});
|
|
220
|
-
});
|
|
221
|
-
|
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
import { describe, it, expect } from 'vitest';
|
|
2
|
-
import { OpenAIError } from '../src/ai';
|
|
3
|
-
|
|
4
|
-
describe('OpenAIError', () => {
|
|
5
|
-
it('should create error with message', () => {
|
|
6
|
-
const error = new OpenAIError('Test error');
|
|
7
|
-
|
|
8
|
-
expect(error).toBeInstanceOf(Error);
|
|
9
|
-
expect(error.message).toBe('Test error');
|
|
10
|
-
expect(error.name).toBe('OpenAIError');
|
|
11
|
-
expect(error.isTokenLimitError).toBe(false);
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
it('should create error with token limit flag', () => {
|
|
15
|
-
const error = new OpenAIError('Token limit exceeded', true);
|
|
16
|
-
|
|
17
|
-
expect(error.message).toBe('Token limit exceeded');
|
|
18
|
-
expect(error.isTokenLimitError).toBe(true);
|
|
19
|
-
});
|
|
20
|
-
|
|
21
|
-
it('should have error prototype chain', () => {
|
|
22
|
-
const error = new OpenAIError('Test');
|
|
23
|
-
|
|
24
|
-
expect(error instanceof OpenAIError).toBe(true);
|
|
25
|
-
expect(error instanceof Error).toBe(true);
|
|
26
|
-
});
|
|
27
|
-
|
|
28
|
-
it('should support error stack traces', () => {
|
|
29
|
-
const error = new OpenAIError('Test error');
|
|
30
|
-
|
|
31
|
-
expect(error.stack).toBeDefined();
|
|
32
|
-
expect(typeof error.stack).toBe('string');
|
|
33
|
-
});
|
|
34
|
-
});
|
|
35
|
-
|