@hazeljs/ai 0.2.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +192 -0
- package/README.md +497 -0
- package/dist/ai-enhanced.service.d.ts +108 -0
- package/dist/ai-enhanced.service.d.ts.map +1 -0
- package/dist/ai-enhanced.service.js +345 -0
- package/dist/ai-enhanced.service.test.d.ts +2 -0
- package/dist/ai-enhanced.service.test.d.ts.map +1 -0
- package/dist/ai-enhanced.service.test.js +501 -0
- package/dist/ai-enhanced.test.d.ts +2 -0
- package/dist/ai-enhanced.test.d.ts.map +1 -0
- package/dist/ai-enhanced.test.js +587 -0
- package/dist/ai-enhanced.types.d.ts +277 -0
- package/dist/ai-enhanced.types.d.ts.map +1 -0
- package/dist/ai-enhanced.types.js +2 -0
- package/dist/ai.decorator.d.ts +4 -0
- package/dist/ai.decorator.d.ts.map +1 -0
- package/dist/ai.decorator.js +57 -0
- package/dist/ai.decorator.test.d.ts +2 -0
- package/dist/ai.decorator.test.d.ts.map +1 -0
- package/dist/ai.decorator.test.js +189 -0
- package/dist/ai.module.d.ts +12 -0
- package/dist/ai.module.d.ts.map +1 -0
- package/dist/ai.module.js +44 -0
- package/dist/ai.module.test.d.ts +2 -0
- package/dist/ai.module.test.d.ts.map +1 -0
- package/dist/ai.module.test.js +23 -0
- package/dist/ai.service.d.ts +11 -0
- package/dist/ai.service.d.ts.map +1 -0
- package/dist/ai.service.js +266 -0
- package/dist/ai.service.test.d.ts +2 -0
- package/dist/ai.service.test.d.ts.map +1 -0
- package/dist/ai.service.test.js +222 -0
- package/dist/ai.types.d.ts +30 -0
- package/dist/ai.types.d.ts.map +1 -0
- package/dist/ai.types.js +2 -0
- package/dist/context/context.manager.d.ts +69 -0
- package/dist/context/context.manager.d.ts.map +1 -0
- package/dist/context/context.manager.js +168 -0
- package/dist/context/context.manager.test.d.ts +2 -0
- package/dist/context/context.manager.test.d.ts.map +1 -0
- package/dist/context/context.manager.test.js +180 -0
- package/dist/decorators/ai-function.decorator.d.ts +42 -0
- package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-function.decorator.js +80 -0
- package/dist/decorators/ai-validate.decorator.d.ts +46 -0
- package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-validate.decorator.js +83 -0
- package/dist/index.d.ts +18 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/prompts/task.prompt.d.ts +12 -0
- package/dist/prompts/task.prompt.d.ts.map +1 -0
- package/dist/prompts/task.prompt.js +12 -0
- package/dist/providers/anthropic.provider.d.ts +48 -0
- package/dist/providers/anthropic.provider.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.js +194 -0
- package/dist/providers/anthropic.provider.test.d.ts +2 -0
- package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.test.js +222 -0
- package/dist/providers/cohere.provider.d.ts +57 -0
- package/dist/providers/cohere.provider.d.ts.map +1 -0
- package/dist/providers/cohere.provider.js +230 -0
- package/dist/providers/cohere.provider.test.d.ts +2 -0
- package/dist/providers/cohere.provider.test.d.ts.map +1 -0
- package/dist/providers/cohere.provider.test.js +267 -0
- package/dist/providers/gemini.provider.d.ts +45 -0
- package/dist/providers/gemini.provider.d.ts.map +1 -0
- package/dist/providers/gemini.provider.js +180 -0
- package/dist/providers/gemini.provider.test.d.ts +2 -0
- package/dist/providers/gemini.provider.test.d.ts.map +1 -0
- package/dist/providers/gemini.provider.test.js +219 -0
- package/dist/providers/ollama.provider.d.ts +45 -0
- package/dist/providers/ollama.provider.d.ts.map +1 -0
- package/dist/providers/ollama.provider.js +232 -0
- package/dist/providers/ollama.provider.test.d.ts +2 -0
- package/dist/providers/ollama.provider.test.d.ts.map +1 -0
- package/dist/providers/ollama.provider.test.js +267 -0
- package/dist/providers/openai.provider.d.ts +57 -0
- package/dist/providers/openai.provider.d.ts.map +1 -0
- package/dist/providers/openai.provider.js +320 -0
- package/dist/providers/openai.provider.test.d.ts +2 -0
- package/dist/providers/openai.provider.test.d.ts.map +1 -0
- package/dist/providers/openai.provider.test.js +364 -0
- package/dist/tracking/token.tracker.d.ts +72 -0
- package/dist/tracking/token.tracker.d.ts.map +1 -0
- package/dist/tracking/token.tracker.js +222 -0
- package/dist/tracking/token.tracker.test.d.ts +2 -0
- package/dist/tracking/token.tracker.test.d.ts.map +1 -0
- package/dist/tracking/token.tracker.test.js +272 -0
- package/dist/vector/vector.service.d.ts +50 -0
- package/dist/vector/vector.service.d.ts.map +1 -0
- package/dist/vector/vector.service.js +163 -0
- package/package.json +60 -0
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
jest.mock('@hazeljs/core', () => ({
|
|
4
|
+
__esModule: true,
|
|
5
|
+
default: { info: jest.fn(), debug: jest.fn(), warn: jest.fn(), error: jest.fn() },
|
|
6
|
+
}));
|
|
7
|
+
const mockChatCreate = jest.fn();
|
|
8
|
+
const mockEmbedCreate = jest.fn();
|
|
9
|
+
const mockSpeechCreate = jest.fn();
|
|
10
|
+
const mockModelsList = jest.fn();
|
|
11
|
+
jest.mock('openai', () => ({
|
|
12
|
+
__esModule: true,
|
|
13
|
+
default: jest.fn().mockImplementation(() => ({
|
|
14
|
+
chat: { completions: { create: mockChatCreate } },
|
|
15
|
+
embeddings: { create: mockEmbedCreate },
|
|
16
|
+
audio: { speech: { create: mockSpeechCreate } },
|
|
17
|
+
models: { list: mockModelsList },
|
|
18
|
+
})),
|
|
19
|
+
}));
|
|
20
|
+
const openai_provider_1 = require("./openai.provider");
|
|
21
|
+
const BASE_REQUEST = {
|
|
22
|
+
messages: [{ role: 'user', content: 'Hello OpenAI' }],
|
|
23
|
+
model: 'gpt-4',
|
|
24
|
+
};
|
|
25
|
+
const MOCK_COMPLETION = {
|
|
26
|
+
id: 'chatcmpl-001',
|
|
27
|
+
model: 'gpt-4',
|
|
28
|
+
choices: [
|
|
29
|
+
{
|
|
30
|
+
message: { content: 'OpenAI response', tool_calls: undefined },
|
|
31
|
+
finish_reason: 'stop',
|
|
32
|
+
},
|
|
33
|
+
],
|
|
34
|
+
usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
|
|
35
|
+
};
|
|
36
|
+
describe('OpenAIProvider', () => {
|
|
37
|
+
let provider;
|
|
38
|
+
beforeEach(() => {
|
|
39
|
+
jest.clearAllMocks();
|
|
40
|
+
provider = new openai_provider_1.OpenAIProvider('test-api-key');
|
|
41
|
+
});
|
|
42
|
+
describe('constructor', () => {
|
|
43
|
+
it('sets name to openai', () => {
|
|
44
|
+
expect(provider.name).toBe('openai');
|
|
45
|
+
});
|
|
46
|
+
it('accepts custom config', () => {
|
|
47
|
+
const p = new openai_provider_1.OpenAIProvider('key', {
|
|
48
|
+
baseURL: 'http://custom',
|
|
49
|
+
defaultModel: 'gpt-3.5-turbo',
|
|
50
|
+
});
|
|
51
|
+
expect(p).toBeDefined();
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
describe('getSupportedModels()', () => {
|
|
55
|
+
it('returns list including gpt-4', () => {
|
|
56
|
+
const models = provider.getSupportedModels();
|
|
57
|
+
expect(models).toContain('gpt-4');
|
|
58
|
+
});
|
|
59
|
+
});
|
|
60
|
+
describe('getSupportedEmbeddingModels()', () => {
|
|
61
|
+
it('returns embedding models', () => {
|
|
62
|
+
const models = provider.getSupportedEmbeddingModels();
|
|
63
|
+
expect(models).toContain('text-embedding-3-small');
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
describe('complete()', () => {
|
|
67
|
+
it('returns a completion response', async () => {
|
|
68
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
69
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
70
|
+
expect(result.content).toBe('OpenAI response');
|
|
71
|
+
expect(result.role).toBe('assistant');
|
|
72
|
+
expect(result.usage?.promptTokens).toBe(10);
|
|
73
|
+
expect(result.usage?.completionTokens).toBe(20);
|
|
74
|
+
expect(result.usage?.totalTokens).toBe(30);
|
|
75
|
+
expect(result.finishReason).toBe('stop');
|
|
76
|
+
});
|
|
77
|
+
it('uses default model when not specified', async () => {
|
|
78
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
79
|
+
await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
|
|
80
|
+
expect(mockChatCreate).toHaveBeenCalledWith(expect.objectContaining({ model: 'gpt-4-turbo-preview' }));
|
|
81
|
+
});
|
|
82
|
+
it('handles tool_calls in response', async () => {
|
|
83
|
+
mockChatCreate.mockResolvedValue({
|
|
84
|
+
...MOCK_COMPLETION,
|
|
85
|
+
choices: [
|
|
86
|
+
{
|
|
87
|
+
message: {
|
|
88
|
+
content: '',
|
|
89
|
+
tool_calls: [
|
|
90
|
+
{
|
|
91
|
+
id: 'call_1',
|
|
92
|
+
type: 'function',
|
|
93
|
+
function: { name: 'getWeather', arguments: '{"city":"NYC"}' },
|
|
94
|
+
},
|
|
95
|
+
],
|
|
96
|
+
},
|
|
97
|
+
finish_reason: 'tool_calls',
|
|
98
|
+
},
|
|
99
|
+
],
|
|
100
|
+
});
|
|
101
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
102
|
+
expect(result.functionCall?.name).toBe('getWeather');
|
|
103
|
+
expect(result.toolCalls).toHaveLength(1);
|
|
104
|
+
});
|
|
105
|
+
it('passes functions as tools', async () => {
|
|
106
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
107
|
+
await provider.complete({
|
|
108
|
+
...BASE_REQUEST,
|
|
109
|
+
functions: [
|
|
110
|
+
{
|
|
111
|
+
name: 'testFn',
|
|
112
|
+
description: 'Test',
|
|
113
|
+
parameters: { type: 'object', properties: {} },
|
|
114
|
+
},
|
|
115
|
+
],
|
|
116
|
+
});
|
|
117
|
+
expect(mockChatCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
118
|
+
tools: expect.arrayContaining([expect.objectContaining({ type: 'function' })]),
|
|
119
|
+
}));
|
|
120
|
+
});
|
|
121
|
+
it('sets tool_choice auto when functionCall is "auto"', async () => {
|
|
122
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
123
|
+
await provider.complete({ ...BASE_REQUEST, functionCall: 'auto' });
|
|
124
|
+
expect(mockChatCreate).toHaveBeenCalledWith(expect.objectContaining({ tool_choice: 'auto' }));
|
|
125
|
+
});
|
|
126
|
+
it('sets tool_choice none when functionCall is "none"', async () => {
|
|
127
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
128
|
+
await provider.complete({ ...BASE_REQUEST, functionCall: 'none' });
|
|
129
|
+
expect(mockChatCreate).toHaveBeenCalledWith(expect.objectContaining({ tool_choice: 'none' }));
|
|
130
|
+
});
|
|
131
|
+
it('handles missing usage in response', async () => {
|
|
132
|
+
mockChatCreate.mockResolvedValue({ ...MOCK_COMPLETION, usage: undefined });
|
|
133
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
134
|
+
expect(result.usage).toBeUndefined();
|
|
135
|
+
});
|
|
136
|
+
it('throws when no choices returned', async () => {
|
|
137
|
+
mockChatCreate.mockResolvedValue({ ...MOCK_COMPLETION, choices: [] });
|
|
138
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow();
|
|
139
|
+
});
|
|
140
|
+
it('wraps API errors with status code', async () => {
|
|
141
|
+
mockChatCreate.mockRejectedValue({ status: 429, message: 'Rate limit' });
|
|
142
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow('OpenAI API Error (429)');
|
|
143
|
+
});
|
|
144
|
+
it('returns original Error when thrown', async () => {
|
|
145
|
+
mockChatCreate.mockRejectedValue(new Error('Direct error'));
|
|
146
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow('Direct error');
|
|
147
|
+
});
|
|
148
|
+
it('wraps unknown thrown values', async () => {
|
|
149
|
+
mockChatCreate.mockRejectedValue(null);
|
|
150
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow('Unknown OpenAI error');
|
|
151
|
+
});
|
|
152
|
+
});
|
|
153
|
+
describe('complete() – message transformation', () => {
|
|
154
|
+
it('transforms system messages', async () => {
|
|
155
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
156
|
+
await provider.complete({
|
|
157
|
+
messages: [{ role: 'system', content: 'Be helpful' }],
|
|
158
|
+
});
|
|
159
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
160
|
+
expect(msgs[0].role).toBe('system');
|
|
161
|
+
});
|
|
162
|
+
it('transforms user messages', async () => {
|
|
163
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
164
|
+
await provider.complete({ messages: [{ role: 'user', content: 'Hello' }] });
|
|
165
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
166
|
+
expect(msgs[0].role).toBe('user');
|
|
167
|
+
});
|
|
168
|
+
it('transforms assistant messages with functionCall to tool_calls', async () => {
|
|
169
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
170
|
+
await provider.complete({
|
|
171
|
+
messages: [
|
|
172
|
+
{
|
|
173
|
+
role: 'assistant',
|
|
174
|
+
content: '',
|
|
175
|
+
functionCall: { name: 'fn', arguments: '{}' },
|
|
176
|
+
},
|
|
177
|
+
],
|
|
178
|
+
});
|
|
179
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
180
|
+
expect(msgs[0].role).toBe('assistant');
|
|
181
|
+
expect(msgs[0].tool_calls).toBeDefined();
|
|
182
|
+
});
|
|
183
|
+
it('transforms function/tool role messages', async () => {
|
|
184
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
185
|
+
await provider.complete({
|
|
186
|
+
messages: [{ role: 'tool', content: 'result', toolCallId: 'call_1' }],
|
|
187
|
+
});
|
|
188
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
189
|
+
expect(msgs[0].role).toBe('tool');
|
|
190
|
+
});
|
|
191
|
+
it('transforms assistant messages with toolCalls', async () => {
|
|
192
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
193
|
+
await provider.complete({
|
|
194
|
+
messages: [
|
|
195
|
+
{
|
|
196
|
+
role: 'assistant',
|
|
197
|
+
content: '',
|
|
198
|
+
toolCalls: [{ id: 'tc_1', type: 'function', function: { name: 'f', arguments: '{}' } }],
|
|
199
|
+
},
|
|
200
|
+
],
|
|
201
|
+
});
|
|
202
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
203
|
+
expect(msgs[0].role).toBe('assistant');
|
|
204
|
+
});
|
|
205
|
+
it('defaults unknown role to assistant', async () => {
|
|
206
|
+
mockChatCreate.mockResolvedValue(MOCK_COMPLETION);
|
|
207
|
+
await provider.complete({
|
|
208
|
+
messages: [{ role: 'unknown', content: 'msg' }],
|
|
209
|
+
});
|
|
210
|
+
const msgs = mockChatCreate.mock.calls[0][0].messages;
|
|
211
|
+
expect(msgs[0].role).toBe('assistant');
|
|
212
|
+
});
|
|
213
|
+
});
|
|
214
|
+
describe('streamComplete()', () => {
|
|
215
|
+
it('yields content chunks', async () => {
|
|
216
|
+
async function* mockStream() {
|
|
217
|
+
yield {
|
|
218
|
+
id: 's1',
|
|
219
|
+
choices: [{ delta: { content: 'Hello ' }, finish_reason: null }],
|
|
220
|
+
usage: undefined,
|
|
221
|
+
};
|
|
222
|
+
yield {
|
|
223
|
+
id: 's1',
|
|
224
|
+
choices: [{ delta: { content: 'world' }, finish_reason: null }],
|
|
225
|
+
usage: undefined,
|
|
226
|
+
};
|
|
227
|
+
yield {
|
|
228
|
+
id: 's1',
|
|
229
|
+
choices: [{ delta: {}, finish_reason: 'stop' }],
|
|
230
|
+
usage: { prompt_tokens: 5, completion_tokens: 3, total_tokens: 8 },
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
mockChatCreate.mockResolvedValue(mockStream());
|
|
234
|
+
const results = [];
|
|
235
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
236
|
+
results.push(chunk);
|
|
237
|
+
}
|
|
238
|
+
expect(results.length).toBeGreaterThan(0);
|
|
239
|
+
});
|
|
240
|
+
it('includes usage in final chunk when finish_reason is set', async () => {
|
|
241
|
+
async function* mockStream() {
|
|
242
|
+
yield {
|
|
243
|
+
id: 's2',
|
|
244
|
+
choices: [{ delta: { content: 'end' }, finish_reason: 'stop' }],
|
|
245
|
+
usage: { prompt_tokens: 3, completion_tokens: 2, total_tokens: 5 },
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
mockChatCreate.mockResolvedValue(mockStream());
|
|
249
|
+
const results = [];
|
|
250
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
251
|
+
results.push(chunk);
|
|
252
|
+
}
|
|
253
|
+
const last = results[results.length - 1];
|
|
254
|
+
expect(last.done).toBe(true);
|
|
255
|
+
});
|
|
256
|
+
it('skips chunks with empty content', async () => {
|
|
257
|
+
async function* mockStream() {
|
|
258
|
+
yield {
|
|
259
|
+
id: 's3',
|
|
260
|
+
choices: [{ delta: { content: '' }, finish_reason: null }],
|
|
261
|
+
usage: undefined,
|
|
262
|
+
};
|
|
263
|
+
yield { id: 's3', choices: [{ delta: {}, finish_reason: 'stop' }], usage: undefined };
|
|
264
|
+
}
|
|
265
|
+
mockChatCreate.mockResolvedValue(mockStream());
|
|
266
|
+
const results = [];
|
|
267
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
268
|
+
results.push(chunk);
|
|
269
|
+
}
|
|
270
|
+
// Only done chunk, no content chunks
|
|
271
|
+
expect(results).toHaveLength(1);
|
|
272
|
+
});
|
|
273
|
+
it('throws on streaming error', async () => {
|
|
274
|
+
mockChatCreate.mockRejectedValue(new Error('Stream failed'));
|
|
275
|
+
await expect(async () => {
|
|
276
|
+
for await (const _chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
277
|
+
// consume
|
|
278
|
+
}
|
|
279
|
+
}).rejects.toThrow();
|
|
280
|
+
});
|
|
281
|
+
});
|
|
282
|
+
describe('embed()', () => {
|
|
283
|
+
it('returns embeddings for string input', async () => {
|
|
284
|
+
mockEmbedCreate.mockResolvedValue({
|
|
285
|
+
data: [{ embedding: [0.1, 0.2, 0.3] }],
|
|
286
|
+
model: 'text-embedding-3-small',
|
|
287
|
+
usage: { prompt_tokens: 5, total_tokens: 5 },
|
|
288
|
+
});
|
|
289
|
+
const result = await provider.embed({ input: 'hello' });
|
|
290
|
+
expect(result.embeddings).toHaveLength(1);
|
|
291
|
+
expect(result.embeddings[0]).toEqual([0.1, 0.2, 0.3]);
|
|
292
|
+
});
|
|
293
|
+
it('returns multiple embeddings for array input', async () => {
|
|
294
|
+
mockEmbedCreate.mockResolvedValue({
|
|
295
|
+
data: [{ embedding: [0.1] }, { embedding: [0.2] }],
|
|
296
|
+
model: 'text-embedding-3-small',
|
|
297
|
+
usage: { prompt_tokens: 10, total_tokens: 10 },
|
|
298
|
+
});
|
|
299
|
+
const result = await provider.embed({ input: ['first', 'second'] });
|
|
300
|
+
expect(result.embeddings).toHaveLength(2);
|
|
301
|
+
});
|
|
302
|
+
it('uses default model text-embedding-3-small', async () => {
|
|
303
|
+
mockEmbedCreate.mockResolvedValue({
|
|
304
|
+
data: [{ embedding: [0.1] }],
|
|
305
|
+
model: 'text-embedding-3-small',
|
|
306
|
+
usage: { prompt_tokens: 3, total_tokens: 3 },
|
|
307
|
+
});
|
|
308
|
+
const result = await provider.embed({ input: 'test' });
|
|
309
|
+
expect(mockEmbedCreate).toHaveBeenCalledWith(expect.objectContaining({ model: 'text-embedding-3-small' }));
|
|
310
|
+
expect(result.model).toBe('text-embedding-3-small');
|
|
311
|
+
});
|
|
312
|
+
it('throws on API failure', async () => {
|
|
313
|
+
mockEmbedCreate.mockRejectedValue({ status: 500, message: 'Server error' });
|
|
314
|
+
await expect(provider.embed({ input: 'test' })).rejects.toThrow();
|
|
315
|
+
});
|
|
316
|
+
});
|
|
317
|
+
describe('speech()', () => {
|
|
318
|
+
it('returns a Buffer from TTS', async () => {
|
|
319
|
+
const fakeAudioData = new Uint8Array([1, 2, 3]).buffer;
|
|
320
|
+
mockSpeechCreate.mockResolvedValue({
|
|
321
|
+
arrayBuffer: jest.fn().mockResolvedValue(fakeAudioData),
|
|
322
|
+
});
|
|
323
|
+
const result = await provider.speech('Hello world', { voice: 'alloy' });
|
|
324
|
+
expect(Buffer.isBuffer(result)).toBe(true);
|
|
325
|
+
});
|
|
326
|
+
it('uses default voice alloy', async () => {
|
|
327
|
+
const fakeAudioData = new Uint8Array([1]).buffer;
|
|
328
|
+
mockSpeechCreate.mockResolvedValue({
|
|
329
|
+
arrayBuffer: jest.fn().mockResolvedValue(fakeAudioData),
|
|
330
|
+
});
|
|
331
|
+
await provider.speech('test');
|
|
332
|
+
expect(mockSpeechCreate).toHaveBeenCalledWith(expect.objectContaining({ voice: 'alloy' }));
|
|
333
|
+
});
|
|
334
|
+
it('falls back to alloy for invalid voice', async () => {
|
|
335
|
+
const fakeAudioData = new Uint8Array([1]).buffer;
|
|
336
|
+
mockSpeechCreate.mockResolvedValue({
|
|
337
|
+
arrayBuffer: jest.fn().mockResolvedValue(fakeAudioData),
|
|
338
|
+
});
|
|
339
|
+
await provider.speech('test', { voice: 'invalid-voice' });
|
|
340
|
+
expect(mockSpeechCreate).toHaveBeenCalledWith(expect.objectContaining({ voice: 'alloy' }));
|
|
341
|
+
});
|
|
342
|
+
it('throws when input exceeds 4096 characters', async () => {
|
|
343
|
+
await expect(provider.speech('x'.repeat(4097))).rejects.toThrow('TTS input must be 4096 characters');
|
|
344
|
+
});
|
|
345
|
+
it('uses custom model and format', async () => {
|
|
346
|
+
const fakeAudioData = new Uint8Array([1]).buffer;
|
|
347
|
+
mockSpeechCreate.mockResolvedValue({
|
|
348
|
+
arrayBuffer: jest.fn().mockResolvedValue(fakeAudioData),
|
|
349
|
+
});
|
|
350
|
+
await provider.speech('hello', { model: 'tts-1-hd', format: 'opus' });
|
|
351
|
+
expect(mockSpeechCreate).toHaveBeenCalledWith(expect.objectContaining({ model: 'tts-1-hd', response_format: 'opus' }));
|
|
352
|
+
});
|
|
353
|
+
});
|
|
354
|
+
describe('isAvailable()', () => {
|
|
355
|
+
it('returns true when models.list() succeeds', async () => {
|
|
356
|
+
mockModelsList.mockResolvedValue({ data: [] });
|
|
357
|
+
expect(await provider.isAvailable()).toBe(true);
|
|
358
|
+
});
|
|
359
|
+
it('returns false when models.list() throws', async () => {
|
|
360
|
+
mockModelsList.mockRejectedValue(new Error('Unauthorized'));
|
|
361
|
+
expect(await provider.isAvailable()).toBe(false);
|
|
362
|
+
});
|
|
363
|
+
});
|
|
364
|
+
});
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { TokenUsage, TokenLimitConfig } from '../ai-enhanced.types';
|
|
2
|
+
/**
|
|
3
|
+
* Token Usage Tracker
|
|
4
|
+
* Tracks and limits token usage per user/request
|
|
5
|
+
*/
|
|
6
|
+
export declare class TokenTracker {
|
|
7
|
+
private usageHistory;
|
|
8
|
+
private config;
|
|
9
|
+
private userUsage;
|
|
10
|
+
private readonly TOKEN_COSTS;
|
|
11
|
+
constructor(config?: TokenLimitConfig);
|
|
12
|
+
/**
|
|
13
|
+
* Track token usage
|
|
14
|
+
*/
|
|
15
|
+
track(usage: TokenUsage, model?: string): void;
|
|
16
|
+
/**
|
|
17
|
+
* Check if request is within limits
|
|
18
|
+
*/
|
|
19
|
+
checkLimits(userId?: string, requestTokens?: number): Promise<{
|
|
20
|
+
allowed: boolean;
|
|
21
|
+
reason?: string;
|
|
22
|
+
usage?: {
|
|
23
|
+
today: number;
|
|
24
|
+
month: number;
|
|
25
|
+
limit: {
|
|
26
|
+
daily: number;
|
|
27
|
+
monthly: number;
|
|
28
|
+
};
|
|
29
|
+
};
|
|
30
|
+
}>;
|
|
31
|
+
/**
|
|
32
|
+
* Calculate cost for token usage
|
|
33
|
+
*/
|
|
34
|
+
calculateCost(usage: TokenUsage, model: string): number;
|
|
35
|
+
/**
|
|
36
|
+
* Get usage statistics for a user
|
|
37
|
+
*/
|
|
38
|
+
getUserStats(userId: string, days?: number): {
|
|
39
|
+
totalTokens: number;
|
|
40
|
+
totalCost: number;
|
|
41
|
+
requestCount: number;
|
|
42
|
+
averageTokensPerRequest: number;
|
|
43
|
+
dailyAverage: number;
|
|
44
|
+
};
|
|
45
|
+
/**
|
|
46
|
+
* Get global statistics
|
|
47
|
+
*/
|
|
48
|
+
getGlobalStats(days?: number): {
|
|
49
|
+
totalTokens: number;
|
|
50
|
+
totalCost: number;
|
|
51
|
+
requestCount: number;
|
|
52
|
+
uniqueUsers: number;
|
|
53
|
+
topUsers: Array<{
|
|
54
|
+
userId: string;
|
|
55
|
+
tokens: number;
|
|
56
|
+
cost: number;
|
|
57
|
+
}>;
|
|
58
|
+
};
|
|
59
|
+
/**
|
|
60
|
+
* Clear old usage data
|
|
61
|
+
*/
|
|
62
|
+
cleanup(daysToKeep?: number): void;
|
|
63
|
+
/**
|
|
64
|
+
* Export usage data
|
|
65
|
+
*/
|
|
66
|
+
exportData(userId?: string): TokenUsage[];
|
|
67
|
+
/**
|
|
68
|
+
* Update configuration
|
|
69
|
+
*/
|
|
70
|
+
updateConfig(config: Partial<TokenLimitConfig>): void;
|
|
71
|
+
}
|
|
72
|
+
//# sourceMappingURL=token.tracker.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"token.tracker.d.ts","sourceRoot":"","sources":["../../src/tracking/token.tracker.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AAIpE;;;GAGG;AACH,qBACa,YAAY;IACvB,OAAO,CAAC,YAAY,CAAoB;IACxC,OAAO,CAAC,MAAM,CAAmB;IACjC,OAAO,CAAC,SAAS,CAAwC;IAGzD,OAAO,CAAC,QAAQ,CAAC,WAAW,CAO1B;gBAEU,MAAM,CAAC,EAAE,gBAAgB;IAUrC;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,UAAU,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,IAAI;IAsB9C;;OAEG;IACG,WAAW,CACf,MAAM,CAAC,EAAE,MAAM,EACf,aAAa,CAAC,EAAE,MAAM,GACrB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,KAAK,CAAC,EAAE;YACN,KAAK,EAAE,MAAM,CAAC;YACd,KAAK,EAAE,MAAM,CAAC;YACd,KAAK,EAAE;gBACL,KAAK,EAAE,MAAM,CAAC;gBACd,OAAO,EAAE,MAAM,CAAC;aACjB,CAAC;SACH,CAAC;KACH,CAAC;IA2DF;;OAEG;IACH,aAAa,CAAC,KAAK,EAAE,UAAU,EAAE,KAAK,EAAE,MAAM,GAAG,MAAM;IAavD;;OAEG;IACH,YAAY,CACV,MAAM,EAAE,MAAM,EACd,IAAI,GAAE,MAAW,GAChB;QACD,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,uBAAuB,EAAE,MAAM,CAAC;QAChC,YAAY,EAAE,MAAM,CAAC;KACtB;IAiBD;;OAEG;IACH,cAAc,CAAC,IAAI,GAAE,MAAW,GAAG;QACjC,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,WAAW,EAAE,MAAM,CAAC;QACpB,QAAQ,EAAE,KAAK,CAAC;YAAE,MAAM,EAAE,MAAM,CAAC;YAAC,MAAM,EAAE,MAAM,CAAC;YAAC,IAAI,EAAE,MAAM,CAAA;SAAE,CAAC,CAAC;KACnE;IAsCD;;OAEG;IACH,OAAO,CAAC,UAAU,GAAE,MAAW,GAAG,IAAI;IAmBtC;;OAEG;IACH,UAAU,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,UAAU,EAAE;IAOzC;;OAEG;IACH,YAAY,CAAC,MAAM,EAAE,OAAO,CAAC,gBAAgB,CAAC,GAAG,IAAI;CAItD"}
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
|
|
3
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
4
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
5
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
|
+
};
|
|
8
|
+
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
9
|
+
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
|
+
};
|
|
11
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
+
};
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.TokenTracker = void 0;
|
|
16
|
+
const core_1 = require("@hazeljs/core");
|
|
17
|
+
const core_2 = __importDefault(require("@hazeljs/core"));
|
|
18
|
+
/**
|
|
19
|
+
* Token Usage Tracker
|
|
20
|
+
* Tracks and limits token usage per user/request
|
|
21
|
+
*/
|
|
22
|
+
let TokenTracker = class TokenTracker {
|
|
23
|
+
constructor(config) {
|
|
24
|
+
this.usageHistory = [];
|
|
25
|
+
this.userUsage = new Map();
|
|
26
|
+
// Token costs per 1K tokens (as of 2024)
|
|
27
|
+
this.TOKEN_COSTS = {
|
|
28
|
+
'gpt-4-turbo-preview': { prompt: 0.01, completion: 0.03 },
|
|
29
|
+
'gpt-4': { prompt: 0.03, completion: 0.06 },
|
|
30
|
+
'gpt-3.5-turbo': { prompt: 0.0005, completion: 0.0015 },
|
|
31
|
+
'claude-3-opus': { prompt: 0.015, completion: 0.075 },
|
|
32
|
+
'claude-3-sonnet': { prompt: 0.003, completion: 0.015 },
|
|
33
|
+
'claude-3-haiku': { prompt: 0.00025, completion: 0.00125 },
|
|
34
|
+
};
|
|
35
|
+
this.config = {
|
|
36
|
+
maxTokensPerRequest: config?.maxTokensPerRequest || 4096,
|
|
37
|
+
maxTokensPerDay: config?.maxTokensPerDay || 100000,
|
|
38
|
+
maxTokensPerMonth: config?.maxTokensPerMonth || 1000000,
|
|
39
|
+
costPerToken: config?.costPerToken,
|
|
40
|
+
};
|
|
41
|
+
core_2.default.info('Token Tracker initialized', this.config);
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Track token usage
|
|
45
|
+
*/
|
|
46
|
+
track(usage, model) {
|
|
47
|
+
// Calculate cost if not provided
|
|
48
|
+
if (!usage.cost && model) {
|
|
49
|
+
usage.cost = this.calculateCost(usage, model);
|
|
50
|
+
}
|
|
51
|
+
this.usageHistory.push(usage);
|
|
52
|
+
// Track per user if userId provided
|
|
53
|
+
if (usage.userId) {
|
|
54
|
+
const userHistory = this.userUsage.get(usage.userId) || [];
|
|
55
|
+
userHistory.push(usage);
|
|
56
|
+
this.userUsage.set(usage.userId, userHistory);
|
|
57
|
+
}
|
|
58
|
+
core_2.default.debug('Token usage tracked', {
|
|
59
|
+
userId: usage.userId,
|
|
60
|
+
totalTokens: usage.totalTokens,
|
|
61
|
+
cost: usage.cost,
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Check if request is within limits
|
|
66
|
+
*/
|
|
67
|
+
async checkLimits(userId, requestTokens) {
|
|
68
|
+
// Check request token limit
|
|
69
|
+
if (requestTokens && requestTokens > this.config.maxTokensPerRequest) {
|
|
70
|
+
return {
|
|
71
|
+
allowed: false,
|
|
72
|
+
reason: `Request exceeds token limit (${requestTokens} > ${this.config.maxTokensPerRequest})`,
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
if (!userId) {
|
|
76
|
+
return { allowed: true };
|
|
77
|
+
}
|
|
78
|
+
const now = Date.now();
|
|
79
|
+
const oneDayAgo = now - 24 * 60 * 60 * 1000;
|
|
80
|
+
const oneMonthAgo = now - 30 * 24 * 60 * 60 * 1000;
|
|
81
|
+
const userHistory = this.userUsage.get(userId) || [];
|
|
82
|
+
// Calculate daily usage
|
|
83
|
+
const dailyUsage = userHistory
|
|
84
|
+
.filter((u) => u.timestamp > oneDayAgo)
|
|
85
|
+
.reduce((sum, u) => sum + u.totalTokens, 0);
|
|
86
|
+
// Calculate monthly usage
|
|
87
|
+
const monthlyUsage = userHistory
|
|
88
|
+
.filter((u) => u.timestamp > oneMonthAgo)
|
|
89
|
+
.reduce((sum, u) => sum + u.totalTokens, 0);
|
|
90
|
+
const usage = {
|
|
91
|
+
today: dailyUsage,
|
|
92
|
+
month: monthlyUsage,
|
|
93
|
+
limit: {
|
|
94
|
+
daily: this.config.maxTokensPerDay,
|
|
95
|
+
monthly: this.config.maxTokensPerMonth,
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
// Check daily limit
|
|
99
|
+
if (dailyUsage >= this.config.maxTokensPerDay) {
|
|
100
|
+
return {
|
|
101
|
+
allowed: false,
|
|
102
|
+
reason: 'Daily token limit exceeded',
|
|
103
|
+
usage,
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
// Check monthly limit
|
|
107
|
+
if (monthlyUsage >= this.config.maxTokensPerMonth) {
|
|
108
|
+
return {
|
|
109
|
+
allowed: false,
|
|
110
|
+
reason: 'Monthly token limit exceeded',
|
|
111
|
+
usage,
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
return { allowed: true, usage };
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Calculate cost for token usage
|
|
118
|
+
*/
|
|
119
|
+
calculateCost(usage, model) {
|
|
120
|
+
const costs = this.TOKEN_COSTS[model];
|
|
121
|
+
if (!costs) {
|
|
122
|
+
core_2.default.warn(`Unknown model for cost calculation: ${model}`);
|
|
123
|
+
return 0;
|
|
124
|
+
}
|
|
125
|
+
const promptCost = (usage.promptTokens / 1000) * costs.prompt;
|
|
126
|
+
const completionCost = (usage.completionTokens / 1000) * costs.completion;
|
|
127
|
+
return promptCost + completionCost;
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Get usage statistics for a user
|
|
131
|
+
*/
|
|
132
|
+
getUserStats(userId, days = 30) {
|
|
133
|
+
const cutoff = Date.now() - days * 24 * 60 * 60 * 1000;
|
|
134
|
+
const userHistory = (this.userUsage.get(userId) || []).filter((u) => u.timestamp > cutoff);
|
|
135
|
+
const totalTokens = userHistory.reduce((sum, u) => sum + u.totalTokens, 0);
|
|
136
|
+
const totalCost = userHistory.reduce((sum, u) => sum + (u.cost || 0), 0);
|
|
137
|
+
const requestCount = userHistory.length;
|
|
138
|
+
return {
|
|
139
|
+
totalTokens,
|
|
140
|
+
totalCost,
|
|
141
|
+
requestCount,
|
|
142
|
+
averageTokensPerRequest: requestCount > 0 ? Math.round(totalTokens / requestCount) : 0,
|
|
143
|
+
dailyAverage: Math.round(totalTokens / days),
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Get global statistics
|
|
148
|
+
*/
|
|
149
|
+
getGlobalStats(days = 30) {
|
|
150
|
+
const cutoff = Date.now() - days * 24 * 60 * 60 * 1000;
|
|
151
|
+
const recentUsage = this.usageHistory.filter((u) => u.timestamp > cutoff);
|
|
152
|
+
const totalTokens = recentUsage.reduce((sum, u) => sum + u.totalTokens, 0);
|
|
153
|
+
const totalCost = recentUsage.reduce((sum, u) => sum + (u.cost || 0), 0);
|
|
154
|
+
const requestCount = recentUsage.length;
|
|
155
|
+
// Calculate per-user stats
|
|
156
|
+
const userStats = new Map();
|
|
157
|
+
recentUsage.forEach((u) => {
|
|
158
|
+
if (u.userId) {
|
|
159
|
+
const stats = userStats.get(u.userId) || { tokens: 0, cost: 0 };
|
|
160
|
+
stats.tokens += u.totalTokens;
|
|
161
|
+
stats.cost += u.cost || 0;
|
|
162
|
+
userStats.set(u.userId, stats);
|
|
163
|
+
}
|
|
164
|
+
});
|
|
165
|
+
// Get top users
|
|
166
|
+
const topUsers = Array.from(userStats.entries())
|
|
167
|
+
.map(([userId, stats]) => ({
|
|
168
|
+
userId,
|
|
169
|
+
tokens: stats.tokens,
|
|
170
|
+
cost: stats.cost,
|
|
171
|
+
}))
|
|
172
|
+
.sort((a, b) => b.tokens - a.tokens)
|
|
173
|
+
.slice(0, 10);
|
|
174
|
+
return {
|
|
175
|
+
totalTokens,
|
|
176
|
+
totalCost,
|
|
177
|
+
requestCount,
|
|
178
|
+
uniqueUsers: userStats.size,
|
|
179
|
+
topUsers,
|
|
180
|
+
};
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Clear old usage data
|
|
184
|
+
*/
|
|
185
|
+
cleanup(daysToKeep = 90) {
|
|
186
|
+
const cutoff = Date.now() - daysToKeep * 24 * 60 * 60 * 1000;
|
|
187
|
+
// Clean global history
|
|
188
|
+
this.usageHistory = this.usageHistory.filter((u) => u.timestamp > cutoff);
|
|
189
|
+
// Clean per-user history
|
|
190
|
+
for (const [userId, history] of this.userUsage.entries()) {
|
|
191
|
+
const filtered = history.filter((u) => u.timestamp > cutoff);
|
|
192
|
+
if (filtered.length === 0) {
|
|
193
|
+
this.userUsage.delete(userId);
|
|
194
|
+
}
|
|
195
|
+
else {
|
|
196
|
+
this.userUsage.set(userId, filtered);
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
core_2.default.info(`Cleaned up usage data older than ${daysToKeep} days`);
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Export usage data
|
|
203
|
+
*/
|
|
204
|
+
exportData(userId) {
|
|
205
|
+
if (userId) {
|
|
206
|
+
return this.userUsage.get(userId) || [];
|
|
207
|
+
}
|
|
208
|
+
return [...this.usageHistory];
|
|
209
|
+
}
|
|
210
|
+
/**
|
|
211
|
+
* Update configuration
|
|
212
|
+
*/
|
|
213
|
+
updateConfig(config) {
|
|
214
|
+
this.config = { ...this.config, ...config };
|
|
215
|
+
core_2.default.info('Token tracker configuration updated', this.config);
|
|
216
|
+
}
|
|
217
|
+
};
|
|
218
|
+
exports.TokenTracker = TokenTracker;
|
|
219
|
+
exports.TokenTracker = TokenTracker = __decorate([
|
|
220
|
+
(0, core_1.Service)(),
|
|
221
|
+
__metadata("design:paramtypes", [Object])
|
|
222
|
+
], TokenTracker);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"token.tracker.test.d.ts","sourceRoot":"","sources":["../../src/tracking/token.tracker.test.ts"],"names":[],"mappings":""}
|