@hazeljs/ai 0.2.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/LICENSE +192 -0
  2. package/README.md +497 -0
  3. package/dist/ai-enhanced.service.d.ts +108 -0
  4. package/dist/ai-enhanced.service.d.ts.map +1 -0
  5. package/dist/ai-enhanced.service.js +345 -0
  6. package/dist/ai-enhanced.service.test.d.ts +2 -0
  7. package/dist/ai-enhanced.service.test.d.ts.map +1 -0
  8. package/dist/ai-enhanced.service.test.js +501 -0
  9. package/dist/ai-enhanced.test.d.ts +2 -0
  10. package/dist/ai-enhanced.test.d.ts.map +1 -0
  11. package/dist/ai-enhanced.test.js +587 -0
  12. package/dist/ai-enhanced.types.d.ts +277 -0
  13. package/dist/ai-enhanced.types.d.ts.map +1 -0
  14. package/dist/ai-enhanced.types.js +2 -0
  15. package/dist/ai.decorator.d.ts +4 -0
  16. package/dist/ai.decorator.d.ts.map +1 -0
  17. package/dist/ai.decorator.js +57 -0
  18. package/dist/ai.decorator.test.d.ts +2 -0
  19. package/dist/ai.decorator.test.d.ts.map +1 -0
  20. package/dist/ai.decorator.test.js +189 -0
  21. package/dist/ai.module.d.ts +12 -0
  22. package/dist/ai.module.d.ts.map +1 -0
  23. package/dist/ai.module.js +44 -0
  24. package/dist/ai.module.test.d.ts +2 -0
  25. package/dist/ai.module.test.d.ts.map +1 -0
  26. package/dist/ai.module.test.js +23 -0
  27. package/dist/ai.service.d.ts +11 -0
  28. package/dist/ai.service.d.ts.map +1 -0
  29. package/dist/ai.service.js +266 -0
  30. package/dist/ai.service.test.d.ts +2 -0
  31. package/dist/ai.service.test.d.ts.map +1 -0
  32. package/dist/ai.service.test.js +222 -0
  33. package/dist/ai.types.d.ts +30 -0
  34. package/dist/ai.types.d.ts.map +1 -0
  35. package/dist/ai.types.js +2 -0
  36. package/dist/context/context.manager.d.ts +69 -0
  37. package/dist/context/context.manager.d.ts.map +1 -0
  38. package/dist/context/context.manager.js +168 -0
  39. package/dist/context/context.manager.test.d.ts +2 -0
  40. package/dist/context/context.manager.test.d.ts.map +1 -0
  41. package/dist/context/context.manager.test.js +180 -0
  42. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  43. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  44. package/dist/decorators/ai-function.decorator.js +80 -0
  45. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  46. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  47. package/dist/decorators/ai-validate.decorator.js +83 -0
  48. package/dist/index.d.ts +18 -0
  49. package/dist/index.d.ts.map +1 -0
  50. package/dist/index.js +40 -0
  51. package/dist/prompts/task.prompt.d.ts +12 -0
  52. package/dist/prompts/task.prompt.d.ts.map +1 -0
  53. package/dist/prompts/task.prompt.js +12 -0
  54. package/dist/providers/anthropic.provider.d.ts +48 -0
  55. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  56. package/dist/providers/anthropic.provider.js +194 -0
  57. package/dist/providers/anthropic.provider.test.d.ts +2 -0
  58. package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
  59. package/dist/providers/anthropic.provider.test.js +222 -0
  60. package/dist/providers/cohere.provider.d.ts +57 -0
  61. package/dist/providers/cohere.provider.d.ts.map +1 -0
  62. package/dist/providers/cohere.provider.js +230 -0
  63. package/dist/providers/cohere.provider.test.d.ts +2 -0
  64. package/dist/providers/cohere.provider.test.d.ts.map +1 -0
  65. package/dist/providers/cohere.provider.test.js +267 -0
  66. package/dist/providers/gemini.provider.d.ts +45 -0
  67. package/dist/providers/gemini.provider.d.ts.map +1 -0
  68. package/dist/providers/gemini.provider.js +180 -0
  69. package/dist/providers/gemini.provider.test.d.ts +2 -0
  70. package/dist/providers/gemini.provider.test.d.ts.map +1 -0
  71. package/dist/providers/gemini.provider.test.js +219 -0
  72. package/dist/providers/ollama.provider.d.ts +45 -0
  73. package/dist/providers/ollama.provider.d.ts.map +1 -0
  74. package/dist/providers/ollama.provider.js +232 -0
  75. package/dist/providers/ollama.provider.test.d.ts +2 -0
  76. package/dist/providers/ollama.provider.test.d.ts.map +1 -0
  77. package/dist/providers/ollama.provider.test.js +267 -0
  78. package/dist/providers/openai.provider.d.ts +57 -0
  79. package/dist/providers/openai.provider.d.ts.map +1 -0
  80. package/dist/providers/openai.provider.js +320 -0
  81. package/dist/providers/openai.provider.test.d.ts +2 -0
  82. package/dist/providers/openai.provider.test.d.ts.map +1 -0
  83. package/dist/providers/openai.provider.test.js +364 -0
  84. package/dist/tracking/token.tracker.d.ts +72 -0
  85. package/dist/tracking/token.tracker.d.ts.map +1 -0
  86. package/dist/tracking/token.tracker.js +222 -0
  87. package/dist/tracking/token.tracker.test.d.ts +2 -0
  88. package/dist/tracking/token.tracker.test.d.ts.map +1 -0
  89. package/dist/tracking/token.tracker.test.js +272 -0
  90. package/dist/vector/vector.service.d.ts +50 -0
  91. package/dist/vector/vector.service.d.ts.map +1 -0
  92. package/dist/vector/vector.service.js +163 -0
  93. package/package.json +60 -0
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=ollama.provider.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ollama.provider.test.d.ts","sourceRoot":"","sources":["../../src/providers/ollama.provider.test.ts"],"names":[],"mappings":""}
@@ -0,0 +1,267 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ jest.mock('@hazeljs/core', () => ({
4
+ __esModule: true,
5
+ default: { info: jest.fn(), debug: jest.fn(), warn: jest.fn(), error: jest.fn() },
6
+ }));
7
+ const ollama_provider_1 = require("./ollama.provider");
8
+ describe('OllamaProvider', () => {
9
+ let provider;
10
+ let mockFetch;
11
+ beforeEach(() => {
12
+ provider = new ollama_provider_1.OllamaProvider();
13
+ mockFetch = jest.spyOn(global, 'fetch').mockImplementation(() => {
14
+ throw new Error('fetch not configured in this test');
15
+ });
16
+ });
17
+ afterEach(() => {
18
+ mockFetch.mockRestore();
19
+ });
20
+ describe('constructor', () => {
21
+ it('sets name to ollama', () => {
22
+ expect(provider.name).toBe('ollama');
23
+ });
24
+ it('accepts custom baseURL and model', () => {
25
+ const p = new ollama_provider_1.OllamaProvider({ baseURL: 'http://custom:11434', defaultModel: 'mistral' });
26
+ expect(p).toBeDefined();
27
+ });
28
+ it('uses OLLAMA_BASE_URL env var', () => {
29
+ process.env.OLLAMA_BASE_URL = 'http://env-host:11434';
30
+ const p = new ollama_provider_1.OllamaProvider();
31
+ expect(p).toBeDefined();
32
+ delete process.env.OLLAMA_BASE_URL;
33
+ });
34
+ });
35
+ describe('getSupportedModels()', () => {
36
+ it('returns list including llama2', () => {
37
+ const models = provider.getSupportedModels();
38
+ expect(models).toContain('llama2');
39
+ expect(models.length).toBeGreaterThan(0);
40
+ });
41
+ });
42
+ describe('getSupportedEmbeddingModels()', () => {
43
+ it('returns list including llama2', () => {
44
+ const models = provider.getSupportedEmbeddingModels();
45
+ expect(models).toContain('llama2');
46
+ });
47
+ });
48
+ describe('complete()', () => {
49
+ it('returns a completion response for user message', async () => {
50
+ mockFetch.mockResolvedValue({
51
+ ok: true,
52
+ json: () => Promise.resolve({
53
+ model: 'llama2',
54
+ response: 'Hello world',
55
+ done: true,
56
+ prompt_eval_count: 10,
57
+ eval_count: 20,
58
+ }),
59
+ });
60
+ const result = await provider.complete({
61
+ messages: [{ role: 'user', content: 'Say hello' }],
62
+ model: 'llama2',
63
+ });
64
+ expect(result.content).toBe('Hello world');
65
+ expect(result.role).toBe('assistant');
66
+ expect(result.usage?.promptTokens).toBe(10);
67
+ expect(result.usage?.completionTokens).toBe(20);
68
+ expect(result.usage?.totalTokens).toBe(30);
69
+ expect(result.finishReason).toBe('stop');
70
+ });
71
+ it('uses defaultModel when request has no model', async () => {
72
+ mockFetch.mockResolvedValue({
73
+ ok: true,
74
+ json: () => Promise.resolve({ model: 'llama2', response: 'ok', done: true }),
75
+ });
76
+ const result = await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
77
+ expect(result).toBeDefined();
78
+ });
79
+ it('returns "length" finishReason when not done', async () => {
80
+ mockFetch.mockResolvedValue({
81
+ ok: true,
82
+ json: () => Promise.resolve({ model: 'llama2', response: 'partial', done: false }),
83
+ });
84
+ const result = await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
85
+ expect(result.finishReason).toBe('length');
86
+ });
87
+ it('handles zero token counts gracefully', async () => {
88
+ mockFetch.mockResolvedValue({
89
+ ok: true,
90
+ json: () => Promise.resolve({ model: 'llama2', response: 'ok', done: true }),
91
+ });
92
+ const result = await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
93
+ expect(result.usage?.promptTokens).toBe(0);
94
+ expect(result.usage?.completionTokens).toBe(0);
95
+ });
96
+ it('transforms all message roles into prompt string', async () => {
97
+ mockFetch.mockResolvedValue({
98
+ ok: true,
99
+ json: () => Promise.resolve({ model: 'llama2', response: 'ok', done: true }),
100
+ });
101
+ await provider.complete({
102
+ messages: [
103
+ { role: 'system', content: 'System prompt' },
104
+ { role: 'user', content: 'User message' },
105
+ { role: 'assistant', content: 'Assistant response' },
106
+ ],
107
+ });
108
+ const callBody = JSON.parse(mockFetch.mock.calls[0][1].body);
109
+ expect(callBody.prompt).toContain('System: System prompt');
110
+ expect(callBody.prompt).toContain('User: User message');
111
+ expect(callBody.prompt).toContain('Assistant: Assistant response');
112
+ });
113
+ it('passes temperature and maxTokens to request', async () => {
114
+ mockFetch.mockResolvedValue({
115
+ ok: true,
116
+ json: () => Promise.resolve({ model: 'llama2', response: 'ok', done: true }),
117
+ });
118
+ await provider.complete({
119
+ messages: [{ role: 'user', content: 'hi' }],
120
+ temperature: 0.5,
121
+ maxTokens: 100,
122
+ topP: 0.9,
123
+ });
124
+ const callBody = JSON.parse(mockFetch.mock.calls[0][1].body);
125
+ expect(callBody.temperature).toBe(0.5);
126
+ expect(callBody.num_predict).toBe(100);
127
+ expect(callBody.top_p).toBe(0.9);
128
+ });
129
+ it('throws on API error response', async () => {
130
+ mockFetch.mockResolvedValue({
131
+ ok: false,
132
+ status: 500,
133
+ text: () => Promise.resolve('Internal Server Error'),
134
+ });
135
+ await expect(provider.complete({ messages: [{ role: 'user', content: 'hi' }] })).rejects.toThrow('Ollama API error: 500');
136
+ });
137
+ });
138
+ describe('streamComplete()', () => {
139
+ it('yields stream chunks', async () => {
140
+ const encoder = new TextEncoder();
141
+ const lines = [
142
+ JSON.stringify({ model: 'llama2', response: 'Hello', done: false, prompt_eval_count: 5 }),
143
+ JSON.stringify({ model: 'llama2', response: ' world', done: true, eval_count: 10 }),
144
+ ];
145
+ let readIdx = 0;
146
+ const mockReader = {
147
+ read: jest.fn().mockImplementation(() => {
148
+ if (readIdx < lines.length) {
149
+ return Promise.resolve({ done: false, value: encoder.encode(lines[readIdx++] + '\n') });
150
+ }
151
+ return Promise.resolve({ done: true, value: undefined });
152
+ }),
153
+ releaseLock: jest.fn(),
154
+ };
155
+ mockFetch.mockResolvedValue({
156
+ ok: true,
157
+ body: { getReader: () => mockReader },
158
+ });
159
+ const results = [];
160
+ for await (const chunk of provider.streamComplete({
161
+ messages: [{ role: 'user', content: 'hi' }],
162
+ })) {
163
+ results.push(chunk);
164
+ }
165
+ expect(results.length).toBeGreaterThan(0);
166
+ });
167
+ it('skips invalid JSON lines', async () => {
168
+ const encoder = new TextEncoder();
169
+ const lines = [
170
+ 'not-json\n',
171
+ JSON.stringify({ model: 'llama2', response: 'ok', done: true }) + '\n',
172
+ ];
173
+ let readIdx = 0;
174
+ const mockReader = {
175
+ read: jest.fn().mockImplementation(() => {
176
+ if (readIdx < lines.length) {
177
+ return Promise.resolve({ done: false, value: encoder.encode(lines[readIdx++]) });
178
+ }
179
+ return Promise.resolve({ done: true, value: undefined });
180
+ }),
181
+ releaseLock: jest.fn(),
182
+ };
183
+ mockFetch.mockResolvedValue({
184
+ ok: true,
185
+ body: { getReader: () => mockReader },
186
+ });
187
+ const results = [];
188
+ for await (const chunk of provider.streamComplete({
189
+ messages: [{ role: 'user', content: 'hi' }],
190
+ })) {
191
+ results.push(chunk);
192
+ }
193
+ // No error thrown; any valid chunks collected
194
+ expect(Array.isArray(results)).toBe(true);
195
+ });
196
+ it('throws when response is not ok', async () => {
197
+ mockFetch.mockResolvedValue({
198
+ ok: false,
199
+ status: 503,
200
+ text: () => Promise.resolve('Service Unavailable'),
201
+ });
202
+ const gen = provider.streamComplete({ messages: [{ role: 'user', content: 'hi' }] });
203
+ await expect(gen.next()).rejects.toThrow('Ollama API error: 503');
204
+ });
205
+ it('throws when response has no body', async () => {
206
+ mockFetch.mockResolvedValue({
207
+ ok: true,
208
+ body: null,
209
+ });
210
+ const gen = provider.streamComplete({ messages: [{ role: 'user', content: 'hi' }] });
211
+ await expect(gen.next()).rejects.toThrow('No response body available for streaming');
212
+ });
213
+ });
214
+ describe('embed()', () => {
215
+ it('returns embeddings for string input', async () => {
216
+ mockFetch.mockResolvedValue({
217
+ ok: true,
218
+ json: () => Promise.resolve({ embedding: [0.1, 0.2, 0.3] }),
219
+ });
220
+ const result = await provider.embed({ input: 'hello world', model: 'llama2' });
221
+ expect(result.embeddings).toHaveLength(1);
222
+ expect(result.embeddings[0]).toEqual([0.1, 0.2, 0.3]);
223
+ expect(result.usage?.promptTokens).toBe(0);
224
+ });
225
+ it('uses first element when input is an array', async () => {
226
+ mockFetch.mockResolvedValue({
227
+ ok: true,
228
+ json: () => Promise.resolve({ embedding: [0.5, 0.6] }),
229
+ });
230
+ const result = await provider.embed({ input: ['first', 'second'] });
231
+ expect(result.embeddings).toHaveLength(1);
232
+ // Only first element used
233
+ const callBody = JSON.parse(mockFetch.mock.calls[0][1].body);
234
+ expect(callBody.prompt).toBe('first');
235
+ });
236
+ it('uses defaultModel when no model specified', async () => {
237
+ mockFetch.mockResolvedValue({
238
+ ok: true,
239
+ json: () => Promise.resolve({ embedding: [0.1] }),
240
+ });
241
+ const result = await provider.embed({ input: 'test' });
242
+ expect(result.model).toBe('llama2');
243
+ });
244
+ it('throws on API error', async () => {
245
+ mockFetch.mockResolvedValue({
246
+ ok: false,
247
+ status: 404,
248
+ text: () => Promise.resolve('Not Found'),
249
+ });
250
+ await expect(provider.embed({ input: 'hello' })).rejects.toThrow('Ollama API error: 404');
251
+ });
252
+ });
253
+ describe('isAvailable()', () => {
254
+ it('returns true when API responds ok', async () => {
255
+ mockFetch.mockResolvedValue({ ok: true });
256
+ expect(await provider.isAvailable()).toBe(true);
257
+ });
258
+ it('returns false when API is down (fetch rejects)', async () => {
259
+ mockFetch.mockRejectedValue(new Error('Connection refused'));
260
+ expect(await provider.isAvailable()).toBe(false);
261
+ });
262
+ it('returns false on non-ok response', async () => {
263
+ mockFetch.mockResolvedValue({ ok: false });
264
+ expect(await provider.isAvailable()).toBe(false);
265
+ });
266
+ });
267
+ });
@@ -0,0 +1,57 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
2
+ /**
3
+ * OpenAI Provider
4
+ * Production-ready implementation with full OpenAI API support
5
+ */
6
+ export declare class OpenAIProvider implements IAIProvider {
7
+ readonly name: AIProvider;
8
+ private client;
9
+ private defaultModel;
10
+ constructor(apiKey?: string, config?: {
11
+ baseURL?: string;
12
+ defaultModel?: string;
13
+ });
14
+ /**
15
+ * Generate completion
16
+ */
17
+ complete(request: AICompletionRequest): Promise<AICompletionResponse>;
18
+ /**
19
+ * Generate streaming completion
20
+ */
21
+ streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
22
+ /**
23
+ * Generate embeddings
24
+ */
25
+ embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
26
+ /** Valid OpenAI TTS voices (updated to include ash, sage, coral) */
27
+ private static readonly TTS_VOICES;
28
+ /**
29
+ * Generate speech from text (TTS)
30
+ */
31
+ speech(input: string, options?: {
32
+ voice?: string;
33
+ model?: string;
34
+ format?: 'mp3' | 'opus';
35
+ }): Promise<Buffer>;
36
+ /**
37
+ * Check if provider is available
38
+ */
39
+ isAvailable(): Promise<boolean>;
40
+ /**
41
+ * Get supported models
42
+ */
43
+ getSupportedModels(): string[];
44
+ /**
45
+ * Get supported embedding models
46
+ */
47
+ getSupportedEmbeddingModels(): string[];
48
+ /**
49
+ * Transform messages to OpenAI format
50
+ */
51
+ private transformMessages;
52
+ /**
53
+ * Handle OpenAI errors
54
+ */
55
+ private handleError;
56
+ }
57
+ //# sourceMappingURL=openai.provider.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai.provider.d.ts","sourceRoot":"","sources":["../../src/providers/openai.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EAEpB,MAAM,sBAAsB,CAAC;AAK9B;;;GAGG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAY;IACrC,OAAO,CAAC,MAAM,CAA8B;IAC5C,OAAO,CAAC,YAAY,CAAS;gBAEjB,MAAM,CAAC,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC;QAAC,YAAY,CAAC,EAAE,MAAM,CAAA;KAAE;IASjF;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IAuF3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IA2DlF;;OAEG;IACG,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAgCtE,oEAAoE;IACpE,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,UAAU,CAUvB;IAEX;;OAEG;IACG,MAAM,CACV,KAAK,EAAE,MAAM,EACb,OAAO,CAAC,EAAE;QAAE,KAAK,CAAC,EAAE,MAAM,CAAC;QAAC,KAAK,CAAC,EAAE,MAAM,CAAC;QAAC,MAAM,CAAC,EAAE,KAAK,GAAG,MAAM,CAAA;KAAE,GACpE,OAAO,CAAC,MAAM,CAAC;IA8BlB;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAWrC;;OAEG;IACH,kBAAkB,IAAI,MAAM,EAAE;IAa9B;;OAEG;IACH,2BAA2B,IAAI,MAAM,EAAE;IAIvC;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAsDzB;;OAEG;IACH,OAAO,CAAC,WAAW;CAcpB"}
@@ -0,0 +1,320 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OpenAIProvider = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ const openai_1 = __importDefault(require("openai"));
9
+ /**
10
+ * OpenAI Provider
11
+ * Production-ready implementation with full OpenAI API support
12
+ */
13
+ class OpenAIProvider {
14
+ constructor(apiKey, config) {
15
+ this.name = 'openai';
16
+ this.client = new openai_1.default({
17
+ apiKey: apiKey || process.env.OPENAI_API_KEY,
18
+ baseURL: config?.baseURL,
19
+ });
20
+ this.defaultModel = config?.defaultModel || 'gpt-4-turbo-preview';
21
+ core_1.default.info('OpenAI provider initialized');
22
+ }
23
+ /**
24
+ * Generate completion
25
+ */
26
+ async complete(request) {
27
+ try {
28
+ core_1.default.debug(`OpenAI completion request for model: ${request.model || this.defaultModel}`);
29
+ const messages = this.transformMessages(request.messages);
30
+ // Build tools array from functions (modern API)
31
+ const tools = request.functions?.map((fn) => ({
32
+ type: 'function',
33
+ function: fn,
34
+ }));
35
+ const response = await this.client.chat.completions.create({
36
+ model: request.model || this.defaultModel,
37
+ messages,
38
+ temperature: request.temperature ?? 0.7,
39
+ max_tokens: request.maxTokens,
40
+ top_p: request.topP,
41
+ tools: tools && tools.length > 0 ? tools : undefined,
42
+ tool_choice: request.functionCall === 'auto'
43
+ ? 'auto'
44
+ : request.functionCall === 'none'
45
+ ? 'none'
46
+ : undefined,
47
+ });
48
+ const choice = response.choices[0];
49
+ if (!choice) {
50
+ throw new Error('No completion choice returned');
51
+ }
52
+ // Extract tool calls from the modern tool_calls response
53
+ // Filter to function-type calls and cast to access .function safely
54
+ const rawToolCalls = choice.message.tool_calls;
55
+ const functionCalls = rawToolCalls?.filter((tc) => tc.type === 'function');
56
+ const firstToolCall = functionCalls?.[0];
57
+ const result = {
58
+ id: response.id,
59
+ content: choice.message.content || '',
60
+ role: 'assistant',
61
+ model: response.model,
62
+ usage: response.usage
63
+ ? {
64
+ promptTokens: response.usage.prompt_tokens,
65
+ completionTokens: response.usage.completion_tokens,
66
+ totalTokens: response.usage.total_tokens,
67
+ }
68
+ : undefined,
69
+ functionCall: firstToolCall
70
+ ? {
71
+ name: firstToolCall.function.name,
72
+ arguments: firstToolCall.function.arguments,
73
+ }
74
+ : undefined,
75
+ toolCalls: functionCalls?.map((tc) => ({
76
+ id: tc.id,
77
+ type: 'function',
78
+ function: {
79
+ name: tc.function.name,
80
+ arguments: tc.function.arguments,
81
+ },
82
+ })),
83
+ finishReason: choice.finish_reason,
84
+ };
85
+ core_1.default.debug('OpenAI completion successful', {
86
+ tokens: result.usage?.totalTokens,
87
+ finishReason: result.finishReason,
88
+ });
89
+ return result;
90
+ }
91
+ catch (error) {
92
+ core_1.default.error('OpenAI completion failed:', error);
93
+ throw this.handleError(error);
94
+ }
95
+ }
96
+ /**
97
+ * Generate streaming completion
98
+ */
99
+ async *streamComplete(request) {
100
+ try {
101
+ core_1.default.debug('OpenAI streaming completion started');
102
+ const messages = this.transformMessages(request.messages);
103
+ const stream = await this.client.chat.completions.create({
104
+ model: request.model || this.defaultModel,
105
+ messages,
106
+ temperature: request.temperature ?? 0.7,
107
+ max_tokens: request.maxTokens,
108
+ top_p: request.topP,
109
+ stream: true,
110
+ });
111
+ let fullContent = '';
112
+ let chunkId = '';
113
+ for await (const chunk of stream) {
114
+ const delta = chunk.choices[0]?.delta;
115
+ const content = delta?.content || '';
116
+ if (content) {
117
+ fullContent += content;
118
+ chunkId = chunk.id;
119
+ yield {
120
+ id: chunk.id,
121
+ content: fullContent,
122
+ delta: content,
123
+ done: false,
124
+ };
125
+ }
126
+ // Check if stream is done
127
+ if (chunk.choices[0]?.finish_reason) {
128
+ yield {
129
+ id: chunkId,
130
+ content: fullContent,
131
+ delta: '',
132
+ done: true,
133
+ usage: chunk.usage
134
+ ? {
135
+ promptTokens: chunk.usage.prompt_tokens,
136
+ completionTokens: chunk.usage.completion_tokens,
137
+ totalTokens: chunk.usage.total_tokens,
138
+ }
139
+ : undefined,
140
+ };
141
+ }
142
+ }
143
+ core_1.default.debug('OpenAI streaming completed');
144
+ }
145
+ catch (error) {
146
+ core_1.default.error('OpenAI streaming failed:', error);
147
+ throw this.handleError(error);
148
+ }
149
+ }
150
+ /**
151
+ * Generate embeddings
152
+ */
153
+ async embed(request) {
154
+ try {
155
+ core_1.default.debug('OpenAI embedding request');
156
+ const input = Array.isArray(request.input) ? request.input : [request.input];
157
+ const response = await this.client.embeddings.create({
158
+ model: request.model || 'text-embedding-3-small',
159
+ input,
160
+ });
161
+ const result = {
162
+ embeddings: response.data.map((item) => item.embedding),
163
+ model: response.model,
164
+ usage: {
165
+ promptTokens: response.usage.prompt_tokens,
166
+ totalTokens: response.usage.total_tokens,
167
+ },
168
+ };
169
+ core_1.default.debug('OpenAI embedding successful', {
170
+ count: result.embeddings.length,
171
+ dimensions: result.embeddings[0]?.length,
172
+ });
173
+ return result;
174
+ }
175
+ catch (error) {
176
+ core_1.default.error('OpenAI embedding failed:', error);
177
+ throw this.handleError(error);
178
+ }
179
+ }
180
+ /**
181
+ * Generate speech from text (TTS)
182
+ */
183
+ async speech(input, options) {
184
+ try {
185
+ if (input.length > 4096) {
186
+ throw new Error('TTS input must be 4096 characters or less. Chunk longer text before calling speech().');
187
+ }
188
+ const rawVoice = (options?.voice || 'alloy').toString().trim().toLowerCase();
189
+ const voice = OpenAIProvider.TTS_VOICES.includes(rawVoice)
190
+ ? rawVoice
191
+ : 'alloy';
192
+ const response = await this.client.audio.speech.create({
193
+ model: options?.model || 'tts-1',
194
+ voice,
195
+ input,
196
+ response_format: options?.format || 'mp3',
197
+ });
198
+ const arrayBuffer = await response.arrayBuffer();
199
+ return Buffer.from(arrayBuffer);
200
+ }
201
+ catch (error) {
202
+ core_1.default.error('OpenAI TTS failed:', error);
203
+ throw this.handleError(error);
204
+ }
205
+ }
206
+ /**
207
+ * Check if provider is available
208
+ */
209
+ async isAvailable() {
210
+ try {
211
+ // Make a minimal API call to check availability
212
+ await this.client.models.list();
213
+ return true;
214
+ }
215
+ catch (error) {
216
+ core_1.default.warn('OpenAI provider not available:', error);
217
+ return false;
218
+ }
219
+ }
220
+ /**
221
+ * Get supported models
222
+ */
223
+ getSupportedModels() {
224
+ return [
225
+ 'gpt-4-turbo-preview',
226
+ 'gpt-4-0125-preview',
227
+ 'gpt-4-1106-preview',
228
+ 'gpt-4',
229
+ 'gpt-4-0613',
230
+ 'gpt-3.5-turbo',
231
+ 'gpt-3.5-turbo-0125',
232
+ 'gpt-3.5-turbo-1106',
233
+ ];
234
+ }
235
+ /**
236
+ * Get supported embedding models
237
+ */
238
+ getSupportedEmbeddingModels() {
239
+ return ['text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002'];
240
+ }
241
+ /**
242
+ * Transform messages to OpenAI format
243
+ */
244
+ transformMessages(messages) {
245
+ return messages.map((msg) => {
246
+ // Map legacy 'function' role to modern 'tool' role
247
+ if (msg.role === 'function' || msg.role === 'tool') {
248
+ return {
249
+ role: 'tool',
250
+ content: msg.content,
251
+ tool_call_id: msg.toolCallId || msg.name || 'unknown',
252
+ };
253
+ }
254
+ if (msg.role === 'assistant' && (msg.functionCall || msg.toolCalls)) {
255
+ return {
256
+ role: 'assistant',
257
+ content: msg.content || null,
258
+ tool_calls: msg.toolCalls ||
259
+ (msg.functionCall
260
+ ? [
261
+ {
262
+ id: msg.functionCall.name,
263
+ type: 'function',
264
+ function: {
265
+ name: msg.functionCall.name,
266
+ arguments: msg.functionCall.arguments,
267
+ },
268
+ },
269
+ ]
270
+ : undefined),
271
+ };
272
+ }
273
+ if (msg.role === 'system') {
274
+ return {
275
+ role: 'system',
276
+ content: msg.content,
277
+ };
278
+ }
279
+ if (msg.role === 'user') {
280
+ return {
281
+ role: 'user',
282
+ content: msg.content,
283
+ };
284
+ }
285
+ // Default to assistant
286
+ return {
287
+ role: 'assistant',
288
+ content: msg.content,
289
+ };
290
+ });
291
+ }
292
+ /**
293
+ * Handle OpenAI errors
294
+ */
295
+ handleError(error) {
296
+ if (error && typeof error === 'object' && 'status' in error && 'message' in error) {
297
+ const apiError = error;
298
+ const status = apiError.status ?? 'unknown';
299
+ const message = `OpenAI API Error (${status}): ${apiError.message ?? 'Unknown error'}`;
300
+ return new Error(message);
301
+ }
302
+ if (error instanceof Error) {
303
+ return error;
304
+ }
305
+ return new Error('Unknown OpenAI error');
306
+ }
307
+ }
308
+ exports.OpenAIProvider = OpenAIProvider;
309
+ /** Valid OpenAI TTS voices (updated to include ash, sage, coral) */
310
+ OpenAIProvider.TTS_VOICES = [
311
+ 'alloy',
312
+ 'echo',
313
+ 'fable',
314
+ 'onyx',
315
+ 'nova',
316
+ 'shimmer',
317
+ 'ash',
318
+ 'sage',
319
+ 'coral',
320
+ ];
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=openai.provider.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai.provider.test.d.ts","sourceRoot":"","sources":["../../src/providers/openai.provider.test.ts"],"names":[],"mappings":""}