@hazeljs/ai 0.2.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +192 -0
- package/README.md +497 -0
- package/dist/ai-enhanced.service.d.ts +108 -0
- package/dist/ai-enhanced.service.d.ts.map +1 -0
- package/dist/ai-enhanced.service.js +345 -0
- package/dist/ai-enhanced.service.test.d.ts +2 -0
- package/dist/ai-enhanced.service.test.d.ts.map +1 -0
- package/dist/ai-enhanced.service.test.js +501 -0
- package/dist/ai-enhanced.test.d.ts +2 -0
- package/dist/ai-enhanced.test.d.ts.map +1 -0
- package/dist/ai-enhanced.test.js +587 -0
- package/dist/ai-enhanced.types.d.ts +277 -0
- package/dist/ai-enhanced.types.d.ts.map +1 -0
- package/dist/ai-enhanced.types.js +2 -0
- package/dist/ai.decorator.d.ts +4 -0
- package/dist/ai.decorator.d.ts.map +1 -0
- package/dist/ai.decorator.js +57 -0
- package/dist/ai.decorator.test.d.ts +2 -0
- package/dist/ai.decorator.test.d.ts.map +1 -0
- package/dist/ai.decorator.test.js +189 -0
- package/dist/ai.module.d.ts +12 -0
- package/dist/ai.module.d.ts.map +1 -0
- package/dist/ai.module.js +44 -0
- package/dist/ai.module.test.d.ts +2 -0
- package/dist/ai.module.test.d.ts.map +1 -0
- package/dist/ai.module.test.js +23 -0
- package/dist/ai.service.d.ts +11 -0
- package/dist/ai.service.d.ts.map +1 -0
- package/dist/ai.service.js +266 -0
- package/dist/ai.service.test.d.ts +2 -0
- package/dist/ai.service.test.d.ts.map +1 -0
- package/dist/ai.service.test.js +222 -0
- package/dist/ai.types.d.ts +30 -0
- package/dist/ai.types.d.ts.map +1 -0
- package/dist/ai.types.js +2 -0
- package/dist/context/context.manager.d.ts +69 -0
- package/dist/context/context.manager.d.ts.map +1 -0
- package/dist/context/context.manager.js +168 -0
- package/dist/context/context.manager.test.d.ts +2 -0
- package/dist/context/context.manager.test.d.ts.map +1 -0
- package/dist/context/context.manager.test.js +180 -0
- package/dist/decorators/ai-function.decorator.d.ts +42 -0
- package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-function.decorator.js +80 -0
- package/dist/decorators/ai-validate.decorator.d.ts +46 -0
- package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-validate.decorator.js +83 -0
- package/dist/index.d.ts +18 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/prompts/task.prompt.d.ts +12 -0
- package/dist/prompts/task.prompt.d.ts.map +1 -0
- package/dist/prompts/task.prompt.js +12 -0
- package/dist/providers/anthropic.provider.d.ts +48 -0
- package/dist/providers/anthropic.provider.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.js +194 -0
- package/dist/providers/anthropic.provider.test.d.ts +2 -0
- package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.test.js +222 -0
- package/dist/providers/cohere.provider.d.ts +57 -0
- package/dist/providers/cohere.provider.d.ts.map +1 -0
- package/dist/providers/cohere.provider.js +230 -0
- package/dist/providers/cohere.provider.test.d.ts +2 -0
- package/dist/providers/cohere.provider.test.d.ts.map +1 -0
- package/dist/providers/cohere.provider.test.js +267 -0
- package/dist/providers/gemini.provider.d.ts +45 -0
- package/dist/providers/gemini.provider.d.ts.map +1 -0
- package/dist/providers/gemini.provider.js +180 -0
- package/dist/providers/gemini.provider.test.d.ts +2 -0
- package/dist/providers/gemini.provider.test.d.ts.map +1 -0
- package/dist/providers/gemini.provider.test.js +219 -0
- package/dist/providers/ollama.provider.d.ts +45 -0
- package/dist/providers/ollama.provider.d.ts.map +1 -0
- package/dist/providers/ollama.provider.js +232 -0
- package/dist/providers/ollama.provider.test.d.ts +2 -0
- package/dist/providers/ollama.provider.test.d.ts.map +1 -0
- package/dist/providers/ollama.provider.test.js +267 -0
- package/dist/providers/openai.provider.d.ts +57 -0
- package/dist/providers/openai.provider.d.ts.map +1 -0
- package/dist/providers/openai.provider.js +320 -0
- package/dist/providers/openai.provider.test.d.ts +2 -0
- package/dist/providers/openai.provider.test.d.ts.map +1 -0
- package/dist/providers/openai.provider.test.js +364 -0
- package/dist/tracking/token.tracker.d.ts +72 -0
- package/dist/tracking/token.tracker.d.ts.map +1 -0
- package/dist/tracking/token.tracker.js +222 -0
- package/dist/tracking/token.tracker.test.d.ts +2 -0
- package/dist/tracking/token.tracker.test.d.ts.map +1 -0
- package/dist/tracking/token.tracker.test.js +272 -0
- package/dist/vector/vector.service.d.ts +50 -0
- package/dist/vector/vector.service.d.ts.map +1 -0
- package/dist/vector/vector.service.js +163 -0
- package/package.json +60 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AIContextManager = void 0;
|
|
7
|
+
const core_1 = __importDefault(require("@hazeljs/core"));
|
|
8
|
+
/**
|
|
9
|
+
* AI Context Manager
|
|
10
|
+
* Manages conversation context and token limits
|
|
11
|
+
*/
|
|
12
|
+
class AIContextManager {
|
|
13
|
+
constructor(maxTokens = 4096) {
|
|
14
|
+
this.messages = [];
|
|
15
|
+
this.currentTokens = 0;
|
|
16
|
+
this.TOKENS_PER_MESSAGE = 4; // Approximate overhead per message
|
|
17
|
+
this.TOKENS_PER_NAME = 1; // Approximate overhead for name field
|
|
18
|
+
this.maxTokens = maxTokens;
|
|
19
|
+
core_1.default.debug(`AI Context Manager initialized with max tokens: ${maxTokens}`);
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Add message to context
|
|
23
|
+
*/
|
|
24
|
+
addMessage(message) {
|
|
25
|
+
const tokens = this.estimateTokens(message);
|
|
26
|
+
this.messages.push(message);
|
|
27
|
+
this.currentTokens += tokens;
|
|
28
|
+
core_1.default.debug(`Message added to context`, {
|
|
29
|
+
role: message.role,
|
|
30
|
+
tokens,
|
|
31
|
+
totalTokens: this.currentTokens,
|
|
32
|
+
});
|
|
33
|
+
// Auto-trim if exceeds limit
|
|
34
|
+
if (this.currentTokens > this.maxTokens) {
|
|
35
|
+
this.trimToLimit();
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Get all messages
|
|
40
|
+
*/
|
|
41
|
+
getMessages() {
|
|
42
|
+
return [...this.messages];
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Clear all messages
|
|
46
|
+
*/
|
|
47
|
+
clear() {
|
|
48
|
+
this.messages = [];
|
|
49
|
+
this.currentTokens = 0;
|
|
50
|
+
core_1.default.debug('Context cleared');
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Trim messages to fit within token limit
|
|
54
|
+
* Keeps system messages and removes oldest user/assistant messages
|
|
55
|
+
*/
|
|
56
|
+
trimToLimit() {
|
|
57
|
+
core_1.default.debug('Trimming context to fit token limit');
|
|
58
|
+
// Separate system messages from conversation messages
|
|
59
|
+
const systemMessages = this.messages.filter((m) => m.role === 'system');
|
|
60
|
+
const conversationMessages = this.messages.filter((m) => m.role !== 'system');
|
|
61
|
+
// Calculate tokens for system messages
|
|
62
|
+
const systemTokens = systemMessages.reduce((sum, msg) => sum + this.estimateTokens(msg), 0);
|
|
63
|
+
// Available tokens for conversation
|
|
64
|
+
const availableTokens = this.maxTokens - systemTokens;
|
|
65
|
+
// Keep most recent messages that fit
|
|
66
|
+
const keptMessages = [];
|
|
67
|
+
let conversationTokens = 0;
|
|
68
|
+
for (let i = conversationMessages.length - 1; i >= 0; i--) {
|
|
69
|
+
const msg = conversationMessages[i];
|
|
70
|
+
const tokens = this.estimateTokens(msg);
|
|
71
|
+
if (conversationTokens + tokens <= availableTokens) {
|
|
72
|
+
keptMessages.unshift(msg);
|
|
73
|
+
conversationTokens += tokens;
|
|
74
|
+
}
|
|
75
|
+
else {
|
|
76
|
+
break;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
// Combine system messages with kept conversation messages
|
|
80
|
+
this.messages = [...systemMessages, ...keptMessages];
|
|
81
|
+
this.currentTokens = systemTokens + conversationTokens;
|
|
82
|
+
core_1.default.debug('Context trimmed', {
|
|
83
|
+
removedMessages: conversationMessages.length - keptMessages.length,
|
|
84
|
+
remainingMessages: this.messages.length,
|
|
85
|
+
currentTokens: this.currentTokens,
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Estimate tokens for a message
|
|
90
|
+
* This is a rough estimation. For accurate counting, use tiktoken library
|
|
91
|
+
*/
|
|
92
|
+
estimateTokens(message) {
|
|
93
|
+
let tokens = this.TOKENS_PER_MESSAGE;
|
|
94
|
+
// Add tokens for content (rough estimate: 1 token ≈ 4 characters)
|
|
95
|
+
tokens += Math.ceil(message.content.length / 4);
|
|
96
|
+
// Add tokens for name if present
|
|
97
|
+
if (message.name) {
|
|
98
|
+
tokens += this.TOKENS_PER_NAME;
|
|
99
|
+
}
|
|
100
|
+
// Add tokens for function call if present
|
|
101
|
+
if (message.functionCall) {
|
|
102
|
+
tokens += Math.ceil(message.functionCall.name.length / 4);
|
|
103
|
+
tokens += Math.ceil(message.functionCall.arguments.length / 4);
|
|
104
|
+
}
|
|
105
|
+
return tokens;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Get context statistics
|
|
109
|
+
*/
|
|
110
|
+
getStats() {
|
|
111
|
+
return {
|
|
112
|
+
messageCount: this.messages.length,
|
|
113
|
+
currentTokens: this.currentTokens,
|
|
114
|
+
maxTokens: this.maxTokens,
|
|
115
|
+
utilizationPercent: Math.round((this.currentTokens / this.maxTokens) * 100),
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Set max tokens limit
|
|
120
|
+
*/
|
|
121
|
+
setMaxTokens(maxTokens) {
|
|
122
|
+
this.maxTokens = maxTokens;
|
|
123
|
+
core_1.default.debug(`Max tokens updated to: ${maxTokens}`);
|
|
124
|
+
if (this.currentTokens > maxTokens) {
|
|
125
|
+
this.trimToLimit();
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Get system messages
|
|
130
|
+
*/
|
|
131
|
+
getSystemMessages() {
|
|
132
|
+
return this.messages.filter((m) => m.role === 'system');
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Get conversation messages (user + assistant)
|
|
136
|
+
*/
|
|
137
|
+
getConversationMessages() {
|
|
138
|
+
return this.messages.filter((m) => m.role === 'user' || m.role === 'assistant');
|
|
139
|
+
}
|
|
140
|
+
/**
|
|
141
|
+
* Add system message
|
|
142
|
+
*/
|
|
143
|
+
addSystemMessage(content) {
|
|
144
|
+
this.addMessage({
|
|
145
|
+
role: 'system',
|
|
146
|
+
content,
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
/**
|
|
150
|
+
* Add user message
|
|
151
|
+
*/
|
|
152
|
+
addUserMessage(content) {
|
|
153
|
+
this.addMessage({
|
|
154
|
+
role: 'user',
|
|
155
|
+
content,
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Add assistant message
|
|
160
|
+
*/
|
|
161
|
+
addAssistantMessage(content) {
|
|
162
|
+
this.addMessage({
|
|
163
|
+
role: 'assistant',
|
|
164
|
+
content,
|
|
165
|
+
});
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
exports.AIContextManager = AIContextManager;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"context.manager.test.d.ts","sourceRoot":"","sources":["../../src/context/context.manager.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
jest.mock('@hazeljs/core', () => ({
|
|
4
|
+
__esModule: true,
|
|
5
|
+
default: { info: jest.fn(), debug: jest.fn(), warn: jest.fn(), error: jest.fn() },
|
|
6
|
+
}));
|
|
7
|
+
const context_manager_1 = require("./context.manager");
|
|
8
|
+
describe('AIContextManager', () => {
|
|
9
|
+
let manager;
|
|
10
|
+
beforeEach(() => {
|
|
11
|
+
manager = new context_manager_1.AIContextManager(200);
|
|
12
|
+
});
|
|
13
|
+
describe('constructor', () => {
|
|
14
|
+
it('initializes with default maxTokens', () => {
|
|
15
|
+
const m = new context_manager_1.AIContextManager();
|
|
16
|
+
expect(m.maxTokens).toBe(4096);
|
|
17
|
+
expect(m.messages).toEqual([]);
|
|
18
|
+
expect(m.currentTokens).toBe(0);
|
|
19
|
+
});
|
|
20
|
+
it('initializes with custom maxTokens', () => {
|
|
21
|
+
const m = new context_manager_1.AIContextManager(1000);
|
|
22
|
+
expect(m.maxTokens).toBe(1000);
|
|
23
|
+
});
|
|
24
|
+
});
|
|
25
|
+
describe('addMessage()', () => {
|
|
26
|
+
it('adds a user message', () => {
|
|
27
|
+
manager.addMessage({ role: 'user', content: 'hello' });
|
|
28
|
+
expect(manager.messages).toHaveLength(1);
|
|
29
|
+
expect(manager.messages[0].content).toBe('hello');
|
|
30
|
+
});
|
|
31
|
+
it('increments currentTokens', () => {
|
|
32
|
+
manager.addMessage({ role: 'user', content: 'hello' });
|
|
33
|
+
expect(manager.currentTokens).toBeGreaterThan(0);
|
|
34
|
+
});
|
|
35
|
+
it('adds message with name field', () => {
|
|
36
|
+
manager.addMessage({ role: 'user', content: 'hi', name: 'Alice' });
|
|
37
|
+
expect(manager.messages[0].name).toBe('Alice');
|
|
38
|
+
});
|
|
39
|
+
it('adds message with functionCall field', () => {
|
|
40
|
+
manager.addMessage({
|
|
41
|
+
role: 'assistant',
|
|
42
|
+
content: '',
|
|
43
|
+
functionCall: { name: 'getWeather', arguments: '{"city":"NYC"}' },
|
|
44
|
+
});
|
|
45
|
+
expect(manager.messages).toHaveLength(1);
|
|
46
|
+
});
|
|
47
|
+
it('auto-trims when content exceeds token limit', () => {
|
|
48
|
+
const tightManager = new context_manager_1.AIContextManager(10);
|
|
49
|
+
tightManager.addMessage({ role: 'user', content: 'a'.repeat(200) });
|
|
50
|
+
expect(tightManager.currentTokens).toBeLessThanOrEqual(10);
|
|
51
|
+
});
|
|
52
|
+
it('adds multiple messages', () => {
|
|
53
|
+
manager.addMessage({ role: 'user', content: 'first' });
|
|
54
|
+
manager.addMessage({ role: 'assistant', content: 'second' });
|
|
55
|
+
expect(manager.messages).toHaveLength(2);
|
|
56
|
+
});
|
|
57
|
+
});
|
|
58
|
+
describe('getMessages()', () => {
|
|
59
|
+
it('returns a shallow copy of messages', () => {
|
|
60
|
+
manager.addMessage({ role: 'user', content: 'test' });
|
|
61
|
+
const msgs = manager.getMessages();
|
|
62
|
+
msgs.push({ role: 'assistant', content: 'extra' });
|
|
63
|
+
expect(manager.messages).toHaveLength(1);
|
|
64
|
+
});
|
|
65
|
+
it('returns empty array when no messages', () => {
|
|
66
|
+
expect(manager.getMessages()).toEqual([]);
|
|
67
|
+
});
|
|
68
|
+
});
|
|
69
|
+
describe('clear()', () => {
|
|
70
|
+
it('clears all messages and resets token count', () => {
|
|
71
|
+
manager.addMessage({ role: 'user', content: 'test' });
|
|
72
|
+
manager.clear();
|
|
73
|
+
expect(manager.messages).toHaveLength(0);
|
|
74
|
+
expect(manager.currentTokens).toBe(0);
|
|
75
|
+
});
|
|
76
|
+
});
|
|
77
|
+
describe('trimToLimit()', () => {
|
|
78
|
+
it('preserves system messages', () => {
|
|
79
|
+
const tightManager = new context_manager_1.AIContextManager(50);
|
|
80
|
+
tightManager.addMessage({ role: 'system', content: 'You are a helpful assistant' });
|
|
81
|
+
tightManager.addMessage({ role: 'user', content: 'message 1 to be trimmed maybe' });
|
|
82
|
+
tightManager.addMessage({ role: 'user', content: 'message 2 to be trimmed maybe' });
|
|
83
|
+
tightManager.trimToLimit();
|
|
84
|
+
const systemMsgs = tightManager.getSystemMessages();
|
|
85
|
+
expect(systemMsgs).toHaveLength(1);
|
|
86
|
+
});
|
|
87
|
+
it('keeps most recent conversation messages', () => {
|
|
88
|
+
const tightManager = new context_manager_1.AIContextManager(30);
|
|
89
|
+
tightManager.addMessage({ role: 'user', content: 'old' });
|
|
90
|
+
tightManager.addMessage({ role: 'user', content: 'new' });
|
|
91
|
+
tightManager.trimToLimit();
|
|
92
|
+
const msgs = tightManager.getConversationMessages();
|
|
93
|
+
if (msgs.length > 0) {
|
|
94
|
+
expect(msgs[msgs.length - 1].content).toBe('new');
|
|
95
|
+
}
|
|
96
|
+
});
|
|
97
|
+
it('trims all conversation messages when system tokens fill the limit', () => {
|
|
98
|
+
const tightManager = new context_manager_1.AIContextManager(5);
|
|
99
|
+
tightManager.addMessage({ role: 'system', content: 'sys' });
|
|
100
|
+
tightManager.addMessage({ role: 'user', content: 'user msg' });
|
|
101
|
+
tightManager.trimToLimit();
|
|
102
|
+
expect(tightManager.getSystemMessages()).toHaveLength(1);
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
describe('getStats()', () => {
|
|
106
|
+
it('returns zero stats when empty', () => {
|
|
107
|
+
const stats = manager.getStats();
|
|
108
|
+
expect(stats.messageCount).toBe(0);
|
|
109
|
+
expect(stats.currentTokens).toBe(0);
|
|
110
|
+
expect(stats.maxTokens).toBe(200);
|
|
111
|
+
expect(stats.utilizationPercent).toBe(0);
|
|
112
|
+
});
|
|
113
|
+
it('returns correct utilization after adding messages', () => {
|
|
114
|
+
manager.addMessage({ role: 'user', content: 'hello world' });
|
|
115
|
+
const stats = manager.getStats();
|
|
116
|
+
expect(stats.messageCount).toBe(1);
|
|
117
|
+
expect(stats.utilizationPercent).toBeGreaterThan(0);
|
|
118
|
+
});
|
|
119
|
+
});
|
|
120
|
+
describe('setMaxTokens()', () => {
|
|
121
|
+
it('updates maxTokens', () => {
|
|
122
|
+
manager.setMaxTokens(500);
|
|
123
|
+
expect(manager.maxTokens).toBe(500);
|
|
124
|
+
});
|
|
125
|
+
it('trims when new limit is lower than current usage', () => {
|
|
126
|
+
manager.addMessage({ role: 'user', content: 'a'.repeat(200) });
|
|
127
|
+
manager.setMaxTokens(10);
|
|
128
|
+
expect(manager.currentTokens).toBeLessThanOrEqual(10);
|
|
129
|
+
});
|
|
130
|
+
it('does not trim when new limit is higher', () => {
|
|
131
|
+
manager.addMessage({ role: 'user', content: 'hello' });
|
|
132
|
+
const tokensBefore = manager.currentTokens;
|
|
133
|
+
manager.setMaxTokens(1000);
|
|
134
|
+
expect(manager.currentTokens).toBe(tokensBefore);
|
|
135
|
+
});
|
|
136
|
+
});
|
|
137
|
+
describe('getSystemMessages()', () => {
|
|
138
|
+
it('returns only system messages', () => {
|
|
139
|
+
manager.addMessage({ role: 'system', content: 'system prompt' });
|
|
140
|
+
manager.addMessage({ role: 'user', content: 'user msg' });
|
|
141
|
+
expect(manager.getSystemMessages()).toHaveLength(1);
|
|
142
|
+
expect(manager.getSystemMessages()[0].role).toBe('system');
|
|
143
|
+
});
|
|
144
|
+
it('returns empty array when no system messages', () => {
|
|
145
|
+
manager.addMessage({ role: 'user', content: 'user' });
|
|
146
|
+
expect(manager.getSystemMessages()).toHaveLength(0);
|
|
147
|
+
});
|
|
148
|
+
});
|
|
149
|
+
describe('getConversationMessages()', () => {
|
|
150
|
+
it('returns user and assistant messages only', () => {
|
|
151
|
+
manager.addMessage({ role: 'system', content: 'system' });
|
|
152
|
+
manager.addMessage({ role: 'user', content: 'user' });
|
|
153
|
+
manager.addMessage({ role: 'assistant', content: 'assistant' });
|
|
154
|
+
const conv = manager.getConversationMessages();
|
|
155
|
+
expect(conv).toHaveLength(2);
|
|
156
|
+
expect(conv.every((m) => m.role === 'user' || m.role === 'assistant')).toBe(true);
|
|
157
|
+
});
|
|
158
|
+
});
|
|
159
|
+
describe('addSystemMessage()', () => {
|
|
160
|
+
it('adds a system message', () => {
|
|
161
|
+
manager.addSystemMessage('You are helpful');
|
|
162
|
+
expect(manager.messages[0].role).toBe('system');
|
|
163
|
+
expect(manager.messages[0].content).toBe('You are helpful');
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
describe('addUserMessage()', () => {
|
|
167
|
+
it('adds a user message', () => {
|
|
168
|
+
manager.addUserMessage('What is the weather?');
|
|
169
|
+
expect(manager.messages[0].role).toBe('user');
|
|
170
|
+
expect(manager.messages[0].content).toBe('What is the weather?');
|
|
171
|
+
});
|
|
172
|
+
});
|
|
173
|
+
describe('addAssistantMessage()', () => {
|
|
174
|
+
it('adds an assistant message', () => {
|
|
175
|
+
manager.addAssistantMessage('It is sunny');
|
|
176
|
+
expect(manager.messages[0].role).toBe('assistant');
|
|
177
|
+
expect(manager.messages[0].content).toBe('It is sunny');
|
|
178
|
+
});
|
|
179
|
+
});
|
|
180
|
+
});
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import 'reflect-metadata';
|
|
2
|
+
import { AIFunctionOptions } from '../ai-enhanced.types';
|
|
3
|
+
/**
|
|
4
|
+
* AIFunction decorator for AI-powered methods
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* ```typescript
|
|
8
|
+
* @AIFunction({
|
|
9
|
+
* provider: 'openai',
|
|
10
|
+
* model: 'gpt-4',
|
|
11
|
+
* streaming: true
|
|
12
|
+
* })
|
|
13
|
+
* async generateContent(@AIPrompt() prompt: string) {
|
|
14
|
+
* // Auto-handled by framework
|
|
15
|
+
* }
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
export declare function AIFunction(options: AIFunctionOptions): MethodDecorator;
|
|
19
|
+
/**
|
|
20
|
+
* Get AI function metadata
|
|
21
|
+
*/
|
|
22
|
+
export declare function getAIFunctionMetadata(target: object, propertyKey: string | symbol): AIFunctionOptions | undefined;
|
|
23
|
+
/**
|
|
24
|
+
* Check if method has AI function metadata
|
|
25
|
+
*/
|
|
26
|
+
export declare function hasAIFunctionMetadata(target: object, propertyKey: string | symbol): boolean;
|
|
27
|
+
/**
|
|
28
|
+
* AIPrompt parameter decorator
|
|
29
|
+
*
|
|
30
|
+
* @example
|
|
31
|
+
* ```typescript
|
|
32
|
+
* async generateContent(@AIPrompt() prompt: string) {
|
|
33
|
+
* // prompt parameter is marked for AI processing
|
|
34
|
+
* }
|
|
35
|
+
* ```
|
|
36
|
+
*/
|
|
37
|
+
export declare function AIPrompt(): ParameterDecorator;
|
|
38
|
+
/**
|
|
39
|
+
* Get AI prompt parameter metadata
|
|
40
|
+
*/
|
|
41
|
+
export declare function getAIPromptMetadata(target: object, propertyKey: string | symbol): number[];
|
|
42
|
+
//# sourceMappingURL=ai-function.decorator.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai-function.decorator.d.ts","sourceRoot":"","sources":["../../src/decorators/ai-function.decorator.ts"],"names":[],"mappings":"AAAA,OAAO,kBAAkB,CAAC;AAC1B,OAAO,EAAE,iBAAiB,EAAE,MAAM,sBAAsB,CAAC;AAMzD;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,UAAU,CAAC,OAAO,EAAE,iBAAiB,GAAG,eAAe,CAgBtE;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CACnC,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,MAAM,GAAG,MAAM,GAC3B,iBAAiB,GAAG,SAAS,CAE/B;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,GAAG,OAAO,CAE3F;AAED;;;;;;;;;GASG;AACH,wBAAgB,QAAQ,IAAI,kBAAkB,CAM7C;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,EAAE,CAK1F"}
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AIFunction = AIFunction;
|
|
7
|
+
exports.getAIFunctionMetadata = getAIFunctionMetadata;
|
|
8
|
+
exports.hasAIFunctionMetadata = hasAIFunctionMetadata;
|
|
9
|
+
exports.AIPrompt = AIPrompt;
|
|
10
|
+
exports.getAIPromptMetadata = getAIPromptMetadata;
|
|
11
|
+
require("reflect-metadata");
|
|
12
|
+
const core_1 = __importDefault(require("@hazeljs/core"));
|
|
13
|
+
const AI_FUNCTION_METADATA_KEY = 'hazel:ai:function';
|
|
14
|
+
const AI_PROMPT_METADATA_KEY = 'hazel:ai:prompt';
|
|
15
|
+
/**
|
|
16
|
+
* AIFunction decorator for AI-powered methods
|
|
17
|
+
*
|
|
18
|
+
* @example
|
|
19
|
+
* ```typescript
|
|
20
|
+
* @AIFunction({
|
|
21
|
+
* provider: 'openai',
|
|
22
|
+
* model: 'gpt-4',
|
|
23
|
+
* streaming: true
|
|
24
|
+
* })
|
|
25
|
+
* async generateContent(@AIPrompt() prompt: string) {
|
|
26
|
+
* // Auto-handled by framework
|
|
27
|
+
* }
|
|
28
|
+
* ```
|
|
29
|
+
*/
|
|
30
|
+
function AIFunction(options) {
|
|
31
|
+
return (target, propertyKey, descriptor) => {
|
|
32
|
+
const defaults = {
|
|
33
|
+
streaming: false,
|
|
34
|
+
temperature: 0.7,
|
|
35
|
+
maxTokens: 1000,
|
|
36
|
+
...options,
|
|
37
|
+
};
|
|
38
|
+
Reflect.defineMetadata(AI_FUNCTION_METADATA_KEY, defaults, target, propertyKey);
|
|
39
|
+
core_1.default.debug(`AIFunction decorator applied to ${target.constructor.name}.${String(propertyKey)}`);
|
|
40
|
+
return descriptor;
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* Get AI function metadata
|
|
45
|
+
*/
|
|
46
|
+
function getAIFunctionMetadata(target, propertyKey) {
|
|
47
|
+
return Reflect.getMetadata(AI_FUNCTION_METADATA_KEY, target, propertyKey);
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Check if method has AI function metadata
|
|
51
|
+
*/
|
|
52
|
+
function hasAIFunctionMetadata(target, propertyKey) {
|
|
53
|
+
return Reflect.hasMetadata(AI_FUNCTION_METADATA_KEY, target, propertyKey);
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* AIPrompt parameter decorator
|
|
57
|
+
*
|
|
58
|
+
* @example
|
|
59
|
+
* ```typescript
|
|
60
|
+
* async generateContent(@AIPrompt() prompt: string) {
|
|
61
|
+
* // prompt parameter is marked for AI processing
|
|
62
|
+
* }
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
function AIPrompt() {
|
|
66
|
+
return (target, propertyKey, parameterIndex) => {
|
|
67
|
+
const existingParams = Reflect.getMetadata(AI_PROMPT_METADATA_KEY, target, propertyKey) || [];
|
|
68
|
+
existingParams[parameterIndex] = 'prompt';
|
|
69
|
+
Reflect.defineMetadata(AI_PROMPT_METADATA_KEY, existingParams, target, propertyKey);
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Get AI prompt parameter metadata
|
|
74
|
+
*/
|
|
75
|
+
function getAIPromptMetadata(target, propertyKey) {
|
|
76
|
+
const params = Reflect.getMetadata(AI_PROMPT_METADATA_KEY, target, propertyKey) || [];
|
|
77
|
+
return params
|
|
78
|
+
.map((p, index) => (p === 'prompt' ? index : -1))
|
|
79
|
+
.filter((i) => i !== -1);
|
|
80
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import 'reflect-metadata';
|
|
2
|
+
import { AIValidationOptions } from '../ai-enhanced.types';
|
|
3
|
+
/**
|
|
4
|
+
* AIValidate decorator for AI-powered validation
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* ```typescript
|
|
8
|
+
* @AIValidate({
|
|
9
|
+
* provider: 'openai',
|
|
10
|
+
* instruction: 'Validate if this is a professional email'
|
|
11
|
+
* })
|
|
12
|
+
* export class ContactDto {
|
|
13
|
+
* @IsEmail()
|
|
14
|
+
* email: string;
|
|
15
|
+
* }
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
export declare function AIValidate(options: AIValidationOptions): ClassDecorator;
|
|
19
|
+
/**
|
|
20
|
+
* Get AI validation metadata
|
|
21
|
+
*/
|
|
22
|
+
export declare function getAIValidationMetadata(target: object): AIValidationOptions | undefined;
|
|
23
|
+
/**
|
|
24
|
+
* Check if class has AI validation metadata
|
|
25
|
+
*/
|
|
26
|
+
export declare function hasAIValidationMetadata(target: object): boolean;
|
|
27
|
+
/**
|
|
28
|
+
* AIValidateProperty decorator for property-level validation
|
|
29
|
+
*
|
|
30
|
+
* @example
|
|
31
|
+
* ```typescript
|
|
32
|
+
* export class UserDto {
|
|
33
|
+
* @AIValidateProperty({
|
|
34
|
+
* provider: 'openai',
|
|
35
|
+
* instruction: 'Check if this username is appropriate'
|
|
36
|
+
* })
|
|
37
|
+
* username: string;
|
|
38
|
+
* }
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
41
|
+
export declare function AIValidateProperty(options: AIValidationOptions): PropertyDecorator;
|
|
42
|
+
/**
|
|
43
|
+
* Get AI property validation metadata
|
|
44
|
+
*/
|
|
45
|
+
export declare function getAIPropertyValidationMetadata(target: object, propertyKey: string | symbol): AIValidationOptions | undefined;
|
|
46
|
+
//# sourceMappingURL=ai-validate.decorator.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai-validate.decorator.d.ts","sourceRoot":"","sources":["../../src/decorators/ai-validate.decorator.ts"],"names":[],"mappings":"AAAA,OAAO,kBAAkB,CAAC;AAC1B,OAAO,EAAE,mBAAmB,EAAE,MAAM,sBAAsB,CAAC;AAK3D;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,UAAU,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAYvE;AAED;;GAEG;AACH,wBAAgB,uBAAuB,CAAC,MAAM,EAAE,MAAM,GAAG,mBAAmB,GAAG,SAAS,CAEvF;AAED;;GAEG;AACH,wBAAgB,uBAAuB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAE/D;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAgB,kBAAkB,CAAC,OAAO,EAAE,mBAAmB,GAAG,iBAAiB,CAalF;AAED;;GAEG;AACH,wBAAgB,+BAA+B,CAC7C,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,MAAM,GAAG,MAAM,GAC3B,mBAAmB,GAAG,SAAS,CAEjC"}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AIValidate = AIValidate;
|
|
7
|
+
exports.getAIValidationMetadata = getAIValidationMetadata;
|
|
8
|
+
exports.hasAIValidationMetadata = hasAIValidationMetadata;
|
|
9
|
+
exports.AIValidateProperty = AIValidateProperty;
|
|
10
|
+
exports.getAIPropertyValidationMetadata = getAIPropertyValidationMetadata;
|
|
11
|
+
require("reflect-metadata");
|
|
12
|
+
const core_1 = __importDefault(require("@hazeljs/core"));
|
|
13
|
+
const AI_VALIDATE_METADATA_KEY = 'hazel:ai:validate';
|
|
14
|
+
/**
|
|
15
|
+
* AIValidate decorator for AI-powered validation
|
|
16
|
+
*
|
|
17
|
+
* @example
|
|
18
|
+
* ```typescript
|
|
19
|
+
* @AIValidate({
|
|
20
|
+
* provider: 'openai',
|
|
21
|
+
* instruction: 'Validate if this is a professional email'
|
|
22
|
+
* })
|
|
23
|
+
* export class ContactDto {
|
|
24
|
+
* @IsEmail()
|
|
25
|
+
* email: string;
|
|
26
|
+
* }
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
function AIValidate(options) {
|
|
30
|
+
return (target) => {
|
|
31
|
+
const defaults = {
|
|
32
|
+
model: 'gpt-3.5-turbo',
|
|
33
|
+
failOnInvalid: true,
|
|
34
|
+
...options,
|
|
35
|
+
};
|
|
36
|
+
Reflect.defineMetadata(AI_VALIDATE_METADATA_KEY, defaults, target);
|
|
37
|
+
const className = target.name || 'Unknown';
|
|
38
|
+
core_1.default.debug(`AIValidate decorator applied to ${className}`);
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Get AI validation metadata
|
|
43
|
+
*/
|
|
44
|
+
function getAIValidationMetadata(target) {
|
|
45
|
+
return Reflect.getMetadata(AI_VALIDATE_METADATA_KEY, target);
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Check if class has AI validation metadata
|
|
49
|
+
*/
|
|
50
|
+
function hasAIValidationMetadata(target) {
|
|
51
|
+
return Reflect.hasMetadata(AI_VALIDATE_METADATA_KEY, target);
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* AIValidateProperty decorator for property-level validation
|
|
55
|
+
*
|
|
56
|
+
* @example
|
|
57
|
+
* ```typescript
|
|
58
|
+
* export class UserDto {
|
|
59
|
+
* @AIValidateProperty({
|
|
60
|
+
* provider: 'openai',
|
|
61
|
+
* instruction: 'Check if this username is appropriate'
|
|
62
|
+
* })
|
|
63
|
+
* username: string;
|
|
64
|
+
* }
|
|
65
|
+
* ```
|
|
66
|
+
*/
|
|
67
|
+
function AIValidateProperty(options) {
|
|
68
|
+
return (target, propertyKey) => {
|
|
69
|
+
const defaults = {
|
|
70
|
+
model: 'gpt-3.5-turbo',
|
|
71
|
+
failOnInvalid: true,
|
|
72
|
+
...options,
|
|
73
|
+
};
|
|
74
|
+
Reflect.defineMetadata(`${AI_VALIDATE_METADATA_KEY}:${String(propertyKey)}`, defaults, target);
|
|
75
|
+
core_1.default.debug(`AIValidateProperty decorator applied to ${target.constructor.name}.${String(propertyKey)}`);
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Get AI property validation metadata
|
|
80
|
+
*/
|
|
81
|
+
function getAIPropertyValidationMetadata(target, propertyKey) {
|
|
82
|
+
return Reflect.getMetadata(`${AI_VALIDATE_METADATA_KEY}:${String(propertyKey)}`, target);
|
|
83
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @hazeljs/ai - AI integration module for HazelJS
|
|
3
|
+
*/
|
|
4
|
+
export { AIModule } from './ai.module';
|
|
5
|
+
export { AIService } from './ai.service';
|
|
6
|
+
export { AIEnhancedService } from './ai-enhanced.service';
|
|
7
|
+
export type { AITaskConfig, AITaskContext, AITaskResult } from './ai.types';
|
|
8
|
+
export { AITask } from './ai.decorator';
|
|
9
|
+
export { OpenAIProvider } from './providers/openai.provider';
|
|
10
|
+
export { AnthropicProvider } from './providers/anthropic.provider';
|
|
11
|
+
export { GeminiProvider } from './providers/gemini.provider';
|
|
12
|
+
export { CohereProvider } from './providers/cohere.provider';
|
|
13
|
+
export { OllamaProvider } from './providers/ollama.provider';
|
|
14
|
+
export { AIFunction, AIPrompt, getAIFunctionMetadata, hasAIFunctionMetadata, getAIPromptMetadata, } from './decorators/ai-function.decorator';
|
|
15
|
+
export { AIValidate, AIValidateProperty, getAIValidationMetadata, hasAIValidationMetadata, getAIPropertyValidationMetadata, } from './decorators/ai-validate.decorator';
|
|
16
|
+
export { VectorService } from './vector/vector.service';
|
|
17
|
+
export { type AIProvider, type AIModelConfig, type AIMessageRole, type AIMessage, type AICompletionRequest, type AICompletionResponse, type AIStreamChunk, type AIFunction as AIFunctionType, type AIEmbeddingRequest, type AIEmbeddingResponse, type IAIProvider, type VectorDatabase, type VectorStoreConfig, type VectorDocument, type VectorSearchRequest, type VectorSearchResult, type AIContext, type TokenUsage, type TokenLimitConfig, type AIFunctionOptions, type AIValidationOptions, } from './ai-enhanced.types';
|
|
18
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;GAEG;AAGH,OAAO,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AACvC,OAAO,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AACzC,OAAO,EAAE,iBAAiB,EAAE,MAAM,uBAAuB,CAAC;AAC1D,YAAY,EAAE,YAAY,EAAE,aAAa,EAAE,YAAY,EAAE,MAAM,YAAY,CAAC;AAC5E,OAAO,EAAE,MAAM,EAAE,MAAM,gBAAgB,CAAC;AAGxC,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EAAE,iBAAiB,EAAE,MAAM,gCAAgC,CAAC;AACnE,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EAAE,cAAc,EAAE,MAAM,6BAA6B,CAAC;AAC7D,OAAO,EACL,UAAU,EACV,QAAQ,EACR,qBAAqB,EACrB,qBAAqB,EACrB,mBAAmB,GACpB,MAAM,oCAAoC,CAAC;AAC5C,OAAO,EACL,UAAU,EACV,kBAAkB,EAClB,uBAAuB,EACvB,uBAAuB,EACvB,+BAA+B,GAChC,MAAM,oCAAoC,CAAC;AAC5C,OAAO,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AACxD,OAAO,EACL,KAAK,UAAU,EACf,KAAK,aAAa,EAClB,KAAK,aAAa,EAClB,KAAK,SAAS,EACd,KAAK,mBAAmB,EACxB,KAAK,oBAAoB,EACzB,KAAK,aAAa,EAClB,KAAK,UAAU,IAAI,cAAc,EACjC,KAAK,kBAAkB,EACvB,KAAK,mBAAmB,EACxB,KAAK,WAAW,EAChB,KAAK,cAAc,EACnB,KAAK,iBAAiB,EACtB,KAAK,cAAc,EACnB,KAAK,mBAAmB,EACxB,KAAK,kBAAkB,EACvB,KAAK,SAAS,EACd,KAAK,UAAU,EACf,KAAK,gBAAgB,EACrB,KAAK,iBAAiB,EACtB,KAAK,mBAAmB,GACzB,MAAM,qBAAqB,CAAC"}
|