@hazeljs/ai 0.2.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +192 -0
- package/README.md +497 -0
- package/dist/ai-enhanced.service.d.ts +108 -0
- package/dist/ai-enhanced.service.d.ts.map +1 -0
- package/dist/ai-enhanced.service.js +345 -0
- package/dist/ai-enhanced.service.test.d.ts +2 -0
- package/dist/ai-enhanced.service.test.d.ts.map +1 -0
- package/dist/ai-enhanced.service.test.js +501 -0
- package/dist/ai-enhanced.test.d.ts +2 -0
- package/dist/ai-enhanced.test.d.ts.map +1 -0
- package/dist/ai-enhanced.test.js +587 -0
- package/dist/ai-enhanced.types.d.ts +277 -0
- package/dist/ai-enhanced.types.d.ts.map +1 -0
- package/dist/ai-enhanced.types.js +2 -0
- package/dist/ai.decorator.d.ts +4 -0
- package/dist/ai.decorator.d.ts.map +1 -0
- package/dist/ai.decorator.js +57 -0
- package/dist/ai.decorator.test.d.ts +2 -0
- package/dist/ai.decorator.test.d.ts.map +1 -0
- package/dist/ai.decorator.test.js +189 -0
- package/dist/ai.module.d.ts +12 -0
- package/dist/ai.module.d.ts.map +1 -0
- package/dist/ai.module.js +44 -0
- package/dist/ai.module.test.d.ts +2 -0
- package/dist/ai.module.test.d.ts.map +1 -0
- package/dist/ai.module.test.js +23 -0
- package/dist/ai.service.d.ts +11 -0
- package/dist/ai.service.d.ts.map +1 -0
- package/dist/ai.service.js +266 -0
- package/dist/ai.service.test.d.ts +2 -0
- package/dist/ai.service.test.d.ts.map +1 -0
- package/dist/ai.service.test.js +222 -0
- package/dist/ai.types.d.ts +30 -0
- package/dist/ai.types.d.ts.map +1 -0
- package/dist/ai.types.js +2 -0
- package/dist/context/context.manager.d.ts +69 -0
- package/dist/context/context.manager.d.ts.map +1 -0
- package/dist/context/context.manager.js +168 -0
- package/dist/context/context.manager.test.d.ts +2 -0
- package/dist/context/context.manager.test.d.ts.map +1 -0
- package/dist/context/context.manager.test.js +180 -0
- package/dist/decorators/ai-function.decorator.d.ts +42 -0
- package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-function.decorator.js +80 -0
- package/dist/decorators/ai-validate.decorator.d.ts +46 -0
- package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-validate.decorator.js +83 -0
- package/dist/index.d.ts +18 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/prompts/task.prompt.d.ts +12 -0
- package/dist/prompts/task.prompt.d.ts.map +1 -0
- package/dist/prompts/task.prompt.js +12 -0
- package/dist/providers/anthropic.provider.d.ts +48 -0
- package/dist/providers/anthropic.provider.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.js +194 -0
- package/dist/providers/anthropic.provider.test.d.ts +2 -0
- package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.test.js +222 -0
- package/dist/providers/cohere.provider.d.ts +57 -0
- package/dist/providers/cohere.provider.d.ts.map +1 -0
- package/dist/providers/cohere.provider.js +230 -0
- package/dist/providers/cohere.provider.test.d.ts +2 -0
- package/dist/providers/cohere.provider.test.d.ts.map +1 -0
- package/dist/providers/cohere.provider.test.js +267 -0
- package/dist/providers/gemini.provider.d.ts +45 -0
- package/dist/providers/gemini.provider.d.ts.map +1 -0
- package/dist/providers/gemini.provider.js +180 -0
- package/dist/providers/gemini.provider.test.d.ts +2 -0
- package/dist/providers/gemini.provider.test.d.ts.map +1 -0
- package/dist/providers/gemini.provider.test.js +219 -0
- package/dist/providers/ollama.provider.d.ts +45 -0
- package/dist/providers/ollama.provider.d.ts.map +1 -0
- package/dist/providers/ollama.provider.js +232 -0
- package/dist/providers/ollama.provider.test.d.ts +2 -0
- package/dist/providers/ollama.provider.test.d.ts.map +1 -0
- package/dist/providers/ollama.provider.test.js +267 -0
- package/dist/providers/openai.provider.d.ts +57 -0
- package/dist/providers/openai.provider.d.ts.map +1 -0
- package/dist/providers/openai.provider.js +320 -0
- package/dist/providers/openai.provider.test.d.ts +2 -0
- package/dist/providers/openai.provider.test.d.ts.map +1 -0
- package/dist/providers/openai.provider.test.js +364 -0
- package/dist/tracking/token.tracker.d.ts +72 -0
- package/dist/tracking/token.tracker.d.ts.map +1 -0
- package/dist/tracking/token.tracker.js +222 -0
- package/dist/tracking/token.tracker.test.d.ts +2 -0
- package/dist/tracking/token.tracker.test.d.ts.map +1 -0
- package/dist/tracking/token.tracker.test.js +272 -0
- package/dist/vector/vector.service.d.ts +50 -0
- package/dist/vector/vector.service.d.ts.map +1 -0
- package/dist/vector/vector.service.js +163 -0
- package/package.json +60 -0
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
|
|
3
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
4
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
5
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
6
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
7
|
+
};
|
|
8
|
+
var __metadata = (this && this.__metadata) || function (k, v) {
|
|
9
|
+
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
|
+
};
|
|
11
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
12
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
13
|
+
};
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.AIService = void 0;
|
|
16
|
+
const core_1 = require("@hazeljs/core");
|
|
17
|
+
const core_2 = __importDefault(require("@hazeljs/core"));
|
|
18
|
+
const openai_1 = __importDefault(require("openai"));
|
|
19
|
+
const prompts_1 = require("@hazeljs/prompts");
|
|
20
|
+
require("./prompts/task.prompt");
|
|
21
|
+
const task_prompt_1 = require("./prompts/task.prompt");
|
|
22
|
+
let AIService = class AIService {
|
|
23
|
+
constructor() {
|
|
24
|
+
this.providers = new Map();
|
|
25
|
+
// Initialize providers
|
|
26
|
+
this.initializeProviders();
|
|
27
|
+
}
|
|
28
|
+
initializeProviders() {
|
|
29
|
+
core_2.default.debug('Initializing AI providers');
|
|
30
|
+
// OpenAI provider
|
|
31
|
+
this.providers.set('openai', {
|
|
32
|
+
execute: async (config, input) => {
|
|
33
|
+
core_2.default.debug('OpenAI provider execute called with config:', {
|
|
34
|
+
name: config.name,
|
|
35
|
+
model: config.model,
|
|
36
|
+
stream: config.stream,
|
|
37
|
+
provider: config.provider,
|
|
38
|
+
});
|
|
39
|
+
const openai = new openai_1.default({
|
|
40
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
41
|
+
});
|
|
42
|
+
core_2.default.debug('OpenAI client initialized');
|
|
43
|
+
if (config.stream) {
|
|
44
|
+
try {
|
|
45
|
+
core_2.default.debug('Creating OpenAI stream with config:', {
|
|
46
|
+
model: config.model,
|
|
47
|
+
temperature: config.temperature,
|
|
48
|
+
prompt: this.formatPrompt(config, input),
|
|
49
|
+
});
|
|
50
|
+
const stream = await openai.chat.completions.create({
|
|
51
|
+
model: config.model,
|
|
52
|
+
messages: [
|
|
53
|
+
{
|
|
54
|
+
role: 'system',
|
|
55
|
+
content: this.formatPrompt(config, input),
|
|
56
|
+
},
|
|
57
|
+
],
|
|
58
|
+
temperature: config.temperature || 0.7,
|
|
59
|
+
max_tokens: config.maxTokens,
|
|
60
|
+
stream: true,
|
|
61
|
+
});
|
|
62
|
+
core_2.default.debug('OpenAI stream created successfully');
|
|
63
|
+
return {
|
|
64
|
+
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
|
|
65
|
+
stream: (async function* () {
|
|
66
|
+
try {
|
|
67
|
+
core_2.default.debug('Starting to iterate over stream chunks');
|
|
68
|
+
for await (const chunk of stream) {
|
|
69
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
70
|
+
if (content) {
|
|
71
|
+
core_2.default.debug('Yielding chunk:', { content });
|
|
72
|
+
yield content;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
core_2.default.debug('Finished iterating over stream chunks');
|
|
76
|
+
}
|
|
77
|
+
catch (error) {
|
|
78
|
+
core_2.default.error('Error in OpenAI stream:', error);
|
|
79
|
+
throw error;
|
|
80
|
+
}
|
|
81
|
+
})(),
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
core_2.default.error('Error creating OpenAI stream:', error);
|
|
86
|
+
return { error: error instanceof Error ? error.message : 'Failed to create stream' };
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
try {
|
|
90
|
+
const response = await openai.chat.completions.create({
|
|
91
|
+
model: config.model,
|
|
92
|
+
messages: [
|
|
93
|
+
{
|
|
94
|
+
role: 'system',
|
|
95
|
+
content: this.formatPrompt(config, input),
|
|
96
|
+
},
|
|
97
|
+
],
|
|
98
|
+
temperature: config.temperature || 0.7,
|
|
99
|
+
max_tokens: config.maxTokens,
|
|
100
|
+
});
|
|
101
|
+
return this.parseResponse(response.choices[0].message.content, config.outputType);
|
|
102
|
+
}
|
|
103
|
+
catch (error) {
|
|
104
|
+
core_2.default.error('Error in OpenAI request:', error);
|
|
105
|
+
return { error: error instanceof Error ? error.message : 'Failed to get response' };
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
});
|
|
109
|
+
// Ollama provider
|
|
110
|
+
this.providers.set('ollama', {
|
|
111
|
+
execute: async (config, input) => {
|
|
112
|
+
if (config.stream) {
|
|
113
|
+
const response = await fetch('http://localhost:11434/api/generate', {
|
|
114
|
+
method: 'POST',
|
|
115
|
+
headers: { 'Content-Type': 'application/json' },
|
|
116
|
+
body: JSON.stringify({
|
|
117
|
+
model: config.model,
|
|
118
|
+
prompt: this.formatPrompt(config, input),
|
|
119
|
+
temperature: config.temperature || 0.7,
|
|
120
|
+
max_tokens: config.maxTokens,
|
|
121
|
+
stream: true,
|
|
122
|
+
}),
|
|
123
|
+
});
|
|
124
|
+
if (!response.body) {
|
|
125
|
+
throw new Error('No response body available for streaming');
|
|
126
|
+
}
|
|
127
|
+
const reader = response.body.getReader();
|
|
128
|
+
const decoder = new TextDecoder();
|
|
129
|
+
return {
|
|
130
|
+
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
|
|
131
|
+
stream: (async function* () {
|
|
132
|
+
try {
|
|
133
|
+
while (true) {
|
|
134
|
+
const { done, value } = await reader.read();
|
|
135
|
+
if (done)
|
|
136
|
+
break;
|
|
137
|
+
const chunk = decoder.decode(value);
|
|
138
|
+
const lines = chunk.split('\n').filter(Boolean);
|
|
139
|
+
for (const line of lines) {
|
|
140
|
+
try {
|
|
141
|
+
const data = JSON.parse(line);
|
|
142
|
+
if (data.response) {
|
|
143
|
+
yield data.response;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
catch {
|
|
147
|
+
// Skip invalid JSON lines
|
|
148
|
+
continue;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
finally {
|
|
154
|
+
reader.releaseLock();
|
|
155
|
+
}
|
|
156
|
+
})(),
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
const response = await fetch('http://localhost:11434/api/generate', {
|
|
160
|
+
method: 'POST',
|
|
161
|
+
headers: { 'Content-Type': 'application/json' },
|
|
162
|
+
body: JSON.stringify({
|
|
163
|
+
model: config.model,
|
|
164
|
+
prompt: this.formatPrompt(config, input),
|
|
165
|
+
temperature: config.temperature || 0.7,
|
|
166
|
+
max_tokens: config.maxTokens,
|
|
167
|
+
}),
|
|
168
|
+
});
|
|
169
|
+
const data = (await response.json());
|
|
170
|
+
return this.parseResponse(data.response, config.outputType);
|
|
171
|
+
},
|
|
172
|
+
});
|
|
173
|
+
// Custom provider
|
|
174
|
+
this.providers.set('custom', {
|
|
175
|
+
execute: async (config, input) => {
|
|
176
|
+
if (!config.customProvider) {
|
|
177
|
+
throw new Error('Custom provider configuration is required');
|
|
178
|
+
}
|
|
179
|
+
const { url, headers, transformRequest, transformResponse } = config.customProvider;
|
|
180
|
+
const requestBody = transformRequest ? transformRequest(input) : input;
|
|
181
|
+
const response = await fetch(url, {
|
|
182
|
+
method: 'POST',
|
|
183
|
+
headers: {
|
|
184
|
+
'Content-Type': 'application/json',
|
|
185
|
+
...headers,
|
|
186
|
+
},
|
|
187
|
+
body: JSON.stringify(requestBody),
|
|
188
|
+
});
|
|
189
|
+
const data = await response.json();
|
|
190
|
+
const transformedData = transformResponse ? transformResponse(data) : data;
|
|
191
|
+
return this.parseResponse(transformedData, config.outputType);
|
|
192
|
+
},
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
formatPrompt(config, input) {
|
|
196
|
+
const context = {
|
|
197
|
+
taskName: config.name,
|
|
198
|
+
description: config.prompt,
|
|
199
|
+
inputExample: 'JSON object with input data',
|
|
200
|
+
outputExample: `Expected ${config.outputType} output`,
|
|
201
|
+
input: input,
|
|
202
|
+
};
|
|
203
|
+
// Normalise legacy {{variable}} to {variable} and render via PromptTemplate
|
|
204
|
+
const normalizedTemplate = config.prompt.replace(/\{\{(\w+)\}\}/g, '{$1}');
|
|
205
|
+
const tpl = new prompts_1.PromptTemplate(normalizedTemplate, {
|
|
206
|
+
name: task_prompt_1.AI_TASK_FORMAT_KEY,
|
|
207
|
+
});
|
|
208
|
+
return tpl.render(context);
|
|
209
|
+
}
|
|
210
|
+
// eslint-disable-next-line @typescript-eslint/explicit-function-return-type
|
|
211
|
+
parseResponse(response, outputType) {
|
|
212
|
+
try {
|
|
213
|
+
switch (outputType) {
|
|
214
|
+
case 'json':
|
|
215
|
+
return { data: JSON.parse(response) };
|
|
216
|
+
case 'number':
|
|
217
|
+
return { data: Number(response) };
|
|
218
|
+
case 'boolean':
|
|
219
|
+
return { data: response.toLowerCase() === 'true' };
|
|
220
|
+
default:
|
|
221
|
+
return { data: response };
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
catch (error) {
|
|
225
|
+
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
226
|
+
return { error: `Failed to parse response: ${errorMessage}` };
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
async executeTask(config, input) {
|
|
230
|
+
try {
|
|
231
|
+
core_2.default.debug('Executing AI task:', {
|
|
232
|
+
task: config.name,
|
|
233
|
+
provider: config.provider,
|
|
234
|
+
stream: config.stream,
|
|
235
|
+
model: config.model,
|
|
236
|
+
});
|
|
237
|
+
const provider = this.providers.get(config.provider);
|
|
238
|
+
if (!provider) {
|
|
239
|
+
core_2.default.error('Provider not found:', config.provider);
|
|
240
|
+
throw new Error(`Provider ${config.provider} not supported`);
|
|
241
|
+
}
|
|
242
|
+
core_2.default.debug('Found provider, executing task');
|
|
243
|
+
const result = await provider.execute(config, input);
|
|
244
|
+
core_2.default.debug('AI task completed:', {
|
|
245
|
+
task: config.name,
|
|
246
|
+
hasStream: !!result.stream,
|
|
247
|
+
hasError: !!result.error,
|
|
248
|
+
});
|
|
249
|
+
return result;
|
|
250
|
+
}
|
|
251
|
+
catch (error) {
|
|
252
|
+
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
|
|
253
|
+
core_2.default.error('AI task failed:', {
|
|
254
|
+
task: config.name,
|
|
255
|
+
error: errorMessage,
|
|
256
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
257
|
+
});
|
|
258
|
+
return { error: errorMessage };
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
};
|
|
262
|
+
exports.AIService = AIService;
|
|
263
|
+
exports.AIService = AIService = __decorate([
|
|
264
|
+
(0, core_1.Service)(),
|
|
265
|
+
__metadata("design:paramtypes", [])
|
|
266
|
+
], AIService);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai.service.test.d.ts","sourceRoot":"","sources":["../src/ai.service.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const ai_service_1 = require("./ai.service");
|
|
4
|
+
// Mock OpenAI
|
|
5
|
+
const mockCreate = jest.fn();
|
|
6
|
+
jest.mock('openai', () => ({
|
|
7
|
+
__esModule: true,
|
|
8
|
+
default: jest.fn().mockImplementation(() => ({
|
|
9
|
+
chat: {
|
|
10
|
+
completions: {
|
|
11
|
+
create: mockCreate,
|
|
12
|
+
},
|
|
13
|
+
},
|
|
14
|
+
})),
|
|
15
|
+
}));
|
|
16
|
+
// Mock fetch
|
|
17
|
+
global.fetch = jest.fn();
|
|
18
|
+
describe('AIService', () => {
|
|
19
|
+
let service;
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
service = new ai_service_1.AIService();
|
|
22
|
+
jest.clearAllMocks();
|
|
23
|
+
mockCreate.mockReset();
|
|
24
|
+
});
|
|
25
|
+
describe('executeTask', () => {
|
|
26
|
+
const mockConfig = {
|
|
27
|
+
name: 'test-task',
|
|
28
|
+
provider: 'openai',
|
|
29
|
+
model: 'gpt-3.5-turbo',
|
|
30
|
+
prompt: 'Test prompt with {{input}}',
|
|
31
|
+
outputType: 'json',
|
|
32
|
+
temperature: 0.7,
|
|
33
|
+
maxTokens: 100,
|
|
34
|
+
};
|
|
35
|
+
const mockInput = { test: 'data' };
|
|
36
|
+
it('should execute task with OpenAI provider', async () => {
|
|
37
|
+
mockCreate.mockResolvedValueOnce({
|
|
38
|
+
choices: [
|
|
39
|
+
{
|
|
40
|
+
message: {
|
|
41
|
+
content: '{"result": "test"}',
|
|
42
|
+
},
|
|
43
|
+
},
|
|
44
|
+
],
|
|
45
|
+
});
|
|
46
|
+
const result = await service.executeTask(mockConfig, mockInput);
|
|
47
|
+
expect(result).toEqual({ data: { result: 'test' } });
|
|
48
|
+
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
49
|
+
model: 'gpt-3.5-turbo',
|
|
50
|
+
messages: [
|
|
51
|
+
expect.objectContaining({
|
|
52
|
+
role: 'system',
|
|
53
|
+
content: expect.stringContaining('Test prompt with'),
|
|
54
|
+
}),
|
|
55
|
+
],
|
|
56
|
+
temperature: 0.7,
|
|
57
|
+
max_tokens: 100,
|
|
58
|
+
}));
|
|
59
|
+
});
|
|
60
|
+
it('should execute task with Ollama provider', async () => {
|
|
61
|
+
global.fetch.mockResolvedValueOnce({
|
|
62
|
+
json: () => Promise.resolve({ response: '{"result": "test"}' }),
|
|
63
|
+
});
|
|
64
|
+
const result = await service.executeTask({
|
|
65
|
+
...mockConfig,
|
|
66
|
+
provider: 'ollama',
|
|
67
|
+
}, mockInput);
|
|
68
|
+
expect(result).toEqual({ data: { result: 'test' } });
|
|
69
|
+
expect(global.fetch).toHaveBeenCalledWith('http://localhost:11434/api/generate', expect.objectContaining({
|
|
70
|
+
method: 'POST',
|
|
71
|
+
headers: { 'Content-Type': 'application/json' },
|
|
72
|
+
body: expect.stringContaining('"model":"gpt-3.5-turbo"'),
|
|
73
|
+
}));
|
|
74
|
+
});
|
|
75
|
+
it('should execute task with custom provider', async () => {
|
|
76
|
+
global.fetch.mockResolvedValueOnce({
|
|
77
|
+
json: () => Promise.resolve({ result: '{"result":"test"}' }),
|
|
78
|
+
});
|
|
79
|
+
const result = await service.executeTask({
|
|
80
|
+
...mockConfig,
|
|
81
|
+
provider: 'custom',
|
|
82
|
+
customProvider: {
|
|
83
|
+
url: 'http://custom-api.com',
|
|
84
|
+
headers: { 'X-API-Key': 'test' },
|
|
85
|
+
transformRequest: (input) => ({ transformed: input }),
|
|
86
|
+
transformResponse: (data) => data.result,
|
|
87
|
+
},
|
|
88
|
+
}, mockInput);
|
|
89
|
+
expect(result).toEqual({ data: { result: 'test' } });
|
|
90
|
+
expect(global.fetch).toHaveBeenCalledWith('http://custom-api.com', expect.objectContaining({
|
|
91
|
+
method: 'POST',
|
|
92
|
+
headers: expect.objectContaining({
|
|
93
|
+
'Content-Type': 'application/json',
|
|
94
|
+
'X-API-Key': 'test',
|
|
95
|
+
}),
|
|
96
|
+
body: JSON.stringify({ transformed: mockInput }),
|
|
97
|
+
}));
|
|
98
|
+
});
|
|
99
|
+
it('should handle unsupported provider', async () => {
|
|
100
|
+
const result = await service.executeTask({
|
|
101
|
+
...mockConfig,
|
|
102
|
+
provider: 'anthropic',
|
|
103
|
+
}, mockInput);
|
|
104
|
+
expect(result).toEqual({
|
|
105
|
+
error: 'Provider anthropic not supported',
|
|
106
|
+
});
|
|
107
|
+
});
|
|
108
|
+
it('should handle OpenAI API errors', async () => {
|
|
109
|
+
mockCreate.mockRejectedValueOnce(new Error('API Error'));
|
|
110
|
+
const result = await service.executeTask(mockConfig, mockInput);
|
|
111
|
+
expect(result).toEqual({
|
|
112
|
+
error: 'API Error',
|
|
113
|
+
});
|
|
114
|
+
});
|
|
115
|
+
it('should handle Ollama API errors', async () => {
|
|
116
|
+
global.fetch.mockRejectedValueOnce(new Error('Network Error'));
|
|
117
|
+
const result = await service.executeTask({
|
|
118
|
+
...mockConfig,
|
|
119
|
+
provider: 'ollama',
|
|
120
|
+
}, mockInput);
|
|
121
|
+
expect(result).toEqual({
|
|
122
|
+
error: 'Network Error',
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
it('should handle custom provider errors', async () => {
|
|
126
|
+
global.fetch.mockRejectedValueOnce(new Error('Custom API Error'));
|
|
127
|
+
const result = await service.executeTask({
|
|
128
|
+
...mockConfig,
|
|
129
|
+
provider: 'custom',
|
|
130
|
+
customProvider: {
|
|
131
|
+
url: 'http://custom-api.com',
|
|
132
|
+
},
|
|
133
|
+
}, mockInput);
|
|
134
|
+
expect(result).toEqual({
|
|
135
|
+
error: 'Custom API Error',
|
|
136
|
+
});
|
|
137
|
+
});
|
|
138
|
+
it('should handle invalid JSON response', async () => {
|
|
139
|
+
mockCreate.mockResolvedValueOnce({
|
|
140
|
+
choices: [
|
|
141
|
+
{
|
|
142
|
+
message: {
|
|
143
|
+
content: 'invalid json',
|
|
144
|
+
},
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
});
|
|
148
|
+
const result = await service.executeTask(mockConfig, mockInput);
|
|
149
|
+
expect(result.error).toContain('Failed to parse response');
|
|
150
|
+
});
|
|
151
|
+
it('should handle different output types', async () => {
|
|
152
|
+
// Test number output
|
|
153
|
+
mockCreate.mockResolvedValueOnce({
|
|
154
|
+
choices: [
|
|
155
|
+
{
|
|
156
|
+
message: {
|
|
157
|
+
content: '42',
|
|
158
|
+
},
|
|
159
|
+
},
|
|
160
|
+
],
|
|
161
|
+
});
|
|
162
|
+
const numberResult = await service.executeTask({
|
|
163
|
+
...mockConfig,
|
|
164
|
+
outputType: 'number',
|
|
165
|
+
}, mockInput);
|
|
166
|
+
expect(numberResult).toEqual({ data: 42 });
|
|
167
|
+
// Test boolean output
|
|
168
|
+
mockCreate.mockResolvedValueOnce({
|
|
169
|
+
choices: [
|
|
170
|
+
{
|
|
171
|
+
message: {
|
|
172
|
+
content: 'true',
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
],
|
|
176
|
+
});
|
|
177
|
+
const booleanResult = await service.executeTask({
|
|
178
|
+
...mockConfig,
|
|
179
|
+
outputType: 'boolean',
|
|
180
|
+
}, mockInput);
|
|
181
|
+
expect(booleanResult).toEqual({ data: true });
|
|
182
|
+
// Test string output
|
|
183
|
+
mockCreate.mockResolvedValueOnce({
|
|
184
|
+
choices: [
|
|
185
|
+
{
|
|
186
|
+
message: {
|
|
187
|
+
content: 'test string',
|
|
188
|
+
},
|
|
189
|
+
},
|
|
190
|
+
],
|
|
191
|
+
});
|
|
192
|
+
const stringResult = await service.executeTask({
|
|
193
|
+
...mockConfig,
|
|
194
|
+
outputType: 'string',
|
|
195
|
+
}, mockInput);
|
|
196
|
+
expect(stringResult).toEqual({ data: 'test string' });
|
|
197
|
+
});
|
|
198
|
+
it('should format prompt with context variables', async () => {
|
|
199
|
+
mockCreate.mockResolvedValueOnce({
|
|
200
|
+
choices: [
|
|
201
|
+
{
|
|
202
|
+
message: {
|
|
203
|
+
content: '{"result": "test"}',
|
|
204
|
+
},
|
|
205
|
+
},
|
|
206
|
+
],
|
|
207
|
+
});
|
|
208
|
+
const config = {
|
|
209
|
+
...mockConfig,
|
|
210
|
+
prompt: 'Task: {{taskName}}\nInput: {{input}}\nDescription: {{description}}',
|
|
211
|
+
};
|
|
212
|
+
await service.executeTask(config, mockInput);
|
|
213
|
+
expect(mockCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
214
|
+
messages: [
|
|
215
|
+
expect.objectContaining({
|
|
216
|
+
content: expect.stringContaining('Task: test-task'),
|
|
217
|
+
}),
|
|
218
|
+
],
|
|
219
|
+
}));
|
|
220
|
+
});
|
|
221
|
+
});
|
|
222
|
+
});
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
export type LLMProvider = 'openai' | 'ollama' | 'anthropic' | 'custom';
|
|
2
|
+
export interface AITaskConfig {
|
|
3
|
+
name: string;
|
|
4
|
+
prompt: string;
|
|
5
|
+
provider: LLMProvider;
|
|
6
|
+
model: string;
|
|
7
|
+
outputType: 'string' | 'json' | 'number' | 'boolean';
|
|
8
|
+
temperature?: number;
|
|
9
|
+
maxTokens?: number;
|
|
10
|
+
stream?: boolean;
|
|
11
|
+
customProvider?: {
|
|
12
|
+
url: string;
|
|
13
|
+
headers?: Record<string, string>;
|
|
14
|
+
transformRequest?: (input: unknown) => unknown;
|
|
15
|
+
transformResponse?: (response: unknown) => unknown;
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
export interface AITaskContext {
|
|
19
|
+
taskName: string;
|
|
20
|
+
description: string;
|
|
21
|
+
inputExample: string;
|
|
22
|
+
outputExample: string;
|
|
23
|
+
input: unknown;
|
|
24
|
+
}
|
|
25
|
+
export interface AITaskResult<T = unknown> {
|
|
26
|
+
data?: T;
|
|
27
|
+
error?: string;
|
|
28
|
+
stream?: AsyncIterable<string>;
|
|
29
|
+
}
|
|
30
|
+
//# sourceMappingURL=ai.types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai.types.d.ts","sourceRoot":"","sources":["../src/ai.types.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,WAAW,GAAG,QAAQ,GAAG,QAAQ,GAAG,WAAW,GAAG,QAAQ,CAAC;AAEvE,MAAM,WAAW,YAAY;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,WAAW,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,EAAE,QAAQ,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC;IACrD,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,cAAc,CAAC,EAAE;QACf,GAAG,EAAE,MAAM,CAAC;QACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACjC,gBAAgB,CAAC,EAAE,CAAC,KAAK,EAAE,OAAO,KAAK,OAAO,CAAC;QAC/C,iBAAiB,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,KAAK,OAAO,CAAC;KACpD,CAAC;CACH;AAED,MAAM,WAAW,aAAa;IAC5B,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,YAAY,EAAE,MAAM,CAAC;IACrB,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,OAAO,CAAC;CAChB;AAED,MAAM,WAAW,YAAY,CAAC,CAAC,GAAG,OAAO;IACvC,IAAI,CAAC,EAAE,CAAC,CAAC;IACT,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,CAAC,EAAE,aAAa,CAAC,MAAM,CAAC,CAAC;CAChC"}
|
package/dist/ai.types.js
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { AIContext, AIMessage } from '../ai-enhanced.types';
|
|
2
|
+
/**
|
|
3
|
+
* AI Context Manager
|
|
4
|
+
* Manages conversation context and token limits
|
|
5
|
+
*/
|
|
6
|
+
export declare class AIContextManager implements AIContext {
|
|
7
|
+
messages: AIMessage[];
|
|
8
|
+
maxTokens: number;
|
|
9
|
+
currentTokens: number;
|
|
10
|
+
private readonly TOKENS_PER_MESSAGE;
|
|
11
|
+
private readonly TOKENS_PER_NAME;
|
|
12
|
+
constructor(maxTokens?: number);
|
|
13
|
+
/**
|
|
14
|
+
* Add message to context
|
|
15
|
+
*/
|
|
16
|
+
addMessage(message: AIMessage): void;
|
|
17
|
+
/**
|
|
18
|
+
* Get all messages
|
|
19
|
+
*/
|
|
20
|
+
getMessages(): AIMessage[];
|
|
21
|
+
/**
|
|
22
|
+
* Clear all messages
|
|
23
|
+
*/
|
|
24
|
+
clear(): void;
|
|
25
|
+
/**
|
|
26
|
+
* Trim messages to fit within token limit
|
|
27
|
+
* Keeps system messages and removes oldest user/assistant messages
|
|
28
|
+
*/
|
|
29
|
+
trimToLimit(): void;
|
|
30
|
+
/**
|
|
31
|
+
* Estimate tokens for a message
|
|
32
|
+
* This is a rough estimation. For accurate counting, use tiktoken library
|
|
33
|
+
*/
|
|
34
|
+
private estimateTokens;
|
|
35
|
+
/**
|
|
36
|
+
* Get context statistics
|
|
37
|
+
*/
|
|
38
|
+
getStats(): {
|
|
39
|
+
messageCount: number;
|
|
40
|
+
currentTokens: number;
|
|
41
|
+
maxTokens: number;
|
|
42
|
+
utilizationPercent: number;
|
|
43
|
+
};
|
|
44
|
+
/**
|
|
45
|
+
* Set max tokens limit
|
|
46
|
+
*/
|
|
47
|
+
setMaxTokens(maxTokens: number): void;
|
|
48
|
+
/**
|
|
49
|
+
* Get system messages
|
|
50
|
+
*/
|
|
51
|
+
getSystemMessages(): AIMessage[];
|
|
52
|
+
/**
|
|
53
|
+
* Get conversation messages (user + assistant)
|
|
54
|
+
*/
|
|
55
|
+
getConversationMessages(): AIMessage[];
|
|
56
|
+
/**
|
|
57
|
+
* Add system message
|
|
58
|
+
*/
|
|
59
|
+
addSystemMessage(content: string): void;
|
|
60
|
+
/**
|
|
61
|
+
* Add user message
|
|
62
|
+
*/
|
|
63
|
+
addUserMessage(content: string): void;
|
|
64
|
+
/**
|
|
65
|
+
* Add assistant message
|
|
66
|
+
*/
|
|
67
|
+
addAssistantMessage(content: string): void;
|
|
68
|
+
}
|
|
69
|
+
//# sourceMappingURL=context.manager.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"context.manager.d.ts","sourceRoot":"","sources":["../../src/context/context.manager.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,sBAAsB,CAAC;AAG5D;;;GAGG;AACH,qBAAa,gBAAiB,YAAW,SAAS;IAChD,QAAQ,EAAE,SAAS,EAAE,CAAM;IAC3B,SAAS,EAAE,MAAM,CAAC;IAClB,aAAa,EAAE,MAAM,CAAK;IAC1B,OAAO,CAAC,QAAQ,CAAC,kBAAkB,CAAK;IACxC,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAK;gBAEzB,SAAS,GAAE,MAAa;IAKpC;;OAEG;IACH,UAAU,CAAC,OAAO,EAAE,SAAS,GAAG,IAAI;IAiBpC;;OAEG;IACH,WAAW,IAAI,SAAS,EAAE;IAI1B;;OAEG;IACH,KAAK,IAAI,IAAI;IAMb;;;OAGG;IACH,WAAW,IAAI,IAAI;IAwCnB;;;OAGG;IACH,OAAO,CAAC,cAAc;IAoBtB;;OAEG;IACH,QAAQ,IAAI;QACV,YAAY,EAAE,MAAM,CAAC;QACrB,aAAa,EAAE,MAAM,CAAC;QACtB,SAAS,EAAE,MAAM,CAAC;QAClB,kBAAkB,EAAE,MAAM,CAAC;KAC5B;IASD;;OAEG;IACH,YAAY,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI;IASrC;;OAEG;IACH,iBAAiB,IAAI,SAAS,EAAE;IAIhC;;OAEG;IACH,uBAAuB,IAAI,SAAS,EAAE;IAItC;;OAEG;IACH,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;IAOvC;;OAEG;IACH,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;IAOrC;;OAEG;IACH,mBAAmB,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;CAM3C"}
|