@hazeljs/ai 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +496 -0
  2. package/dist/ai-enhanced.service.d.ts +108 -0
  3. package/dist/ai-enhanced.service.d.ts.map +1 -0
  4. package/dist/ai-enhanced.service.js +345 -0
  5. package/dist/ai-enhanced.types.d.ts +269 -0
  6. package/dist/ai-enhanced.types.d.ts.map +1 -0
  7. package/dist/ai-enhanced.types.js +2 -0
  8. package/dist/ai.decorator.d.ts +4 -0
  9. package/dist/ai.decorator.d.ts.map +1 -0
  10. package/dist/ai.decorator.js +57 -0
  11. package/dist/ai.module.d.ts +12 -0
  12. package/dist/ai.module.d.ts.map +1 -0
  13. package/dist/ai.module.js +44 -0
  14. package/dist/ai.service.d.ts +10 -0
  15. package/dist/ai.service.d.ts.map +1 -0
  16. package/dist/ai.service.js +261 -0
  17. package/dist/ai.types.d.ts +30 -0
  18. package/dist/ai.types.d.ts.map +1 -0
  19. package/dist/ai.types.js +2 -0
  20. package/dist/context/context.manager.d.ts +69 -0
  21. package/dist/context/context.manager.d.ts.map +1 -0
  22. package/dist/context/context.manager.js +168 -0
  23. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  24. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  25. package/dist/decorators/ai-function.decorator.js +80 -0
  26. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  27. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  28. package/dist/decorators/ai-validate.decorator.js +83 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +38 -0
  32. package/dist/providers/anthropic.provider.d.ts +48 -0
  33. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  34. package/dist/providers/anthropic.provider.js +194 -0
  35. package/dist/providers/cohere.provider.d.ts +57 -0
  36. package/dist/providers/cohere.provider.d.ts.map +1 -0
  37. package/dist/providers/cohere.provider.js +230 -0
  38. package/dist/providers/gemini.provider.d.ts +45 -0
  39. package/dist/providers/gemini.provider.d.ts.map +1 -0
  40. package/dist/providers/gemini.provider.js +180 -0
  41. package/dist/providers/ollama.provider.d.ts +45 -0
  42. package/dist/providers/ollama.provider.d.ts.map +1 -0
  43. package/dist/providers/ollama.provider.js +232 -0
  44. package/dist/providers/openai.provider.d.ts +47 -0
  45. package/dist/providers/openai.provider.d.ts.map +1 -0
  46. package/dist/providers/openai.provider.js +273 -0
  47. package/dist/tracking/token.tracker.d.ts +72 -0
  48. package/dist/tracking/token.tracker.d.ts.map +1 -0
  49. package/dist/tracking/token.tracker.js +222 -0
  50. package/dist/vector/vector.service.d.ts +50 -0
  51. package/dist/vector/vector.service.d.ts.map +1 -0
  52. package/dist/vector/vector.service.js +163 -0
  53. package/package.json +52 -0
@@ -0,0 +1,12 @@
1
+ import { AIProvider } from './ai-enhanced.types';
2
+ export interface AIModuleOptions {
3
+ defaultProvider?: AIProvider;
4
+ providers?: AIProvider[];
5
+ apiKeys?: Partial<Record<AIProvider, string>>;
6
+ }
7
+ export declare class AIModule {
8
+ private static options;
9
+ static register(options: AIModuleOptions): typeof AIModule;
10
+ static getOptions(): AIModuleOptions;
11
+ }
12
+ //# sourceMappingURL=ai.module.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai.module.d.ts","sourceRoot":"","sources":["../src/ai.module.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,UAAU,EAAE,MAAM,qBAAqB,CAAC;AAEjD,MAAM,WAAW,eAAe;IAC9B,eAAe,CAAC,EAAE,UAAU,CAAC;IAC7B,SAAS,CAAC,EAAE,UAAU,EAAE,CAAC;IACzB,OAAO,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC,CAAC;CAC/C;AAED,qBAIa,QAAQ;IACnB,OAAO,CAAC,MAAM,CAAC,OAAO,CAAuB;IAE7C,MAAM,CAAC,QAAQ,CAAC,OAAO,EAAE,eAAe,GAAG,OAAO,QAAQ;IAqB1D,MAAM,CAAC,UAAU,IAAI,eAAe;CAGrC"}
@@ -0,0 +1,44 @@
1
+ "use strict";
2
+ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
3
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
4
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
5
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
6
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
7
+ };
8
+ var AIModule_1;
9
+ Object.defineProperty(exports, "__esModule", { value: true });
10
+ exports.AIModule = void 0;
11
+ const core_1 = require("@hazeljs/core");
12
+ const ai_service_1 = require("./ai.service");
13
+ const ai_enhanced_service_1 = require("./ai-enhanced.service");
14
+ let AIModule = AIModule_1 = class AIModule {
15
+ static register(options) {
16
+ AIModule_1.options = options;
17
+ // Set API keys in environment if provided (allows runtime configuration)
18
+ if (options.apiKeys) {
19
+ const keyMap = {
20
+ openai: 'OPENAI_API_KEY',
21
+ anthropic: 'ANTHROPIC_API_KEY',
22
+ gemini: 'GEMINI_API_KEY',
23
+ cohere: 'COHERE_API_KEY',
24
+ };
25
+ for (const [provider, key] of Object.entries(options.apiKeys)) {
26
+ if (key && keyMap[provider]) {
27
+ process.env[keyMap[provider]] = key;
28
+ }
29
+ }
30
+ }
31
+ return AIModule_1;
32
+ }
33
+ static getOptions() {
34
+ return AIModule_1.options;
35
+ }
36
+ };
37
+ exports.AIModule = AIModule;
38
+ AIModule.options = {};
39
+ exports.AIModule = AIModule = AIModule_1 = __decorate([
40
+ (0, core_1.HazelModule)({
41
+ providers: [ai_service_1.AIService, ai_enhanced_service_1.AIEnhancedService],
42
+ exports: [ai_service_1.AIService, ai_enhanced_service_1.AIEnhancedService],
43
+ })
44
+ ], AIModule);
@@ -0,0 +1,10 @@
1
+ import { AITaskConfig, AITaskResult } from './ai.types';
2
+ export declare class AIService {
3
+ private providers;
4
+ constructor();
5
+ private initializeProviders;
6
+ private formatPrompt;
7
+ private parseResponse;
8
+ executeTask(config: AITaskConfig, input: unknown): Promise<AITaskResult>;
9
+ }
10
+ //# sourceMappingURL=ai.service.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai.service.d.ts","sourceRoot":"","sources":["../src/ai.service.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAiB,YAAY,EAAE,MAAM,YAAY,CAAC;AAYvE,qBACa,SAAS;IACpB,OAAO,CAAC,SAAS,CAAsC;;IAOvD,OAAO,CAAC,mBAAmB;IAqL3B,OAAO,CAAC,YAAY;IAepB,OAAO,CAAC,aAAa;IAkBf,WAAW,CAAC,MAAM,EAAE,YAAY,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,YAAY,CAAC;CAkC/E"}
@@ -0,0 +1,261 @@
1
+ "use strict";
2
+ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
3
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
4
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
5
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
6
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
7
+ };
8
+ var __metadata = (this && this.__metadata) || function (k, v) {
9
+ if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
10
+ };
11
+ var __importDefault = (this && this.__importDefault) || function (mod) {
12
+ return (mod && mod.__esModule) ? mod : { "default": mod };
13
+ };
14
+ Object.defineProperty(exports, "__esModule", { value: true });
15
+ exports.AIService = void 0;
16
+ const core_1 = require("@hazeljs/core");
17
+ const core_2 = __importDefault(require("@hazeljs/core"));
18
+ const openai_1 = __importDefault(require("openai"));
19
+ let AIService = class AIService {
20
+ constructor() {
21
+ this.providers = new Map();
22
+ // Initialize providers
23
+ this.initializeProviders();
24
+ }
25
+ initializeProviders() {
26
+ core_2.default.debug('Initializing AI providers');
27
+ // OpenAI provider
28
+ this.providers.set('openai', {
29
+ execute: async (config, input) => {
30
+ core_2.default.debug('OpenAI provider execute called with config:', {
31
+ name: config.name,
32
+ model: config.model,
33
+ stream: config.stream,
34
+ provider: config.provider,
35
+ });
36
+ const openai = new openai_1.default({
37
+ apiKey: process.env.OPENAI_API_KEY,
38
+ });
39
+ core_2.default.debug('OpenAI client initialized');
40
+ if (config.stream) {
41
+ try {
42
+ core_2.default.debug('Creating OpenAI stream with config:', {
43
+ model: config.model,
44
+ temperature: config.temperature,
45
+ prompt: this.formatPrompt(config, input),
46
+ });
47
+ const stream = await openai.chat.completions.create({
48
+ model: config.model,
49
+ messages: [
50
+ {
51
+ role: 'system',
52
+ content: this.formatPrompt(config, input),
53
+ },
54
+ ],
55
+ temperature: config.temperature || 0.7,
56
+ max_tokens: config.maxTokens,
57
+ stream: true,
58
+ });
59
+ core_2.default.debug('OpenAI stream created successfully');
60
+ return {
61
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
62
+ stream: (async function* () {
63
+ try {
64
+ core_2.default.debug('Starting to iterate over stream chunks');
65
+ for await (const chunk of stream) {
66
+ const content = chunk.choices[0]?.delta?.content;
67
+ if (content) {
68
+ core_2.default.debug('Yielding chunk:', { content });
69
+ yield content;
70
+ }
71
+ }
72
+ core_2.default.debug('Finished iterating over stream chunks');
73
+ }
74
+ catch (error) {
75
+ core_2.default.error('Error in OpenAI stream:', error);
76
+ throw error;
77
+ }
78
+ })(),
79
+ };
80
+ }
81
+ catch (error) {
82
+ core_2.default.error('Error creating OpenAI stream:', error);
83
+ return { error: error instanceof Error ? error.message : 'Failed to create stream' };
84
+ }
85
+ }
86
+ try {
87
+ const response = await openai.chat.completions.create({
88
+ model: config.model,
89
+ messages: [
90
+ {
91
+ role: 'system',
92
+ content: this.formatPrompt(config, input),
93
+ },
94
+ ],
95
+ temperature: config.temperature || 0.7,
96
+ max_tokens: config.maxTokens,
97
+ });
98
+ return this.parseResponse(response.choices[0].message.content, config.outputType);
99
+ }
100
+ catch (error) {
101
+ core_2.default.error('Error in OpenAI request:', error);
102
+ return { error: error instanceof Error ? error.message : 'Failed to get response' };
103
+ }
104
+ },
105
+ });
106
+ // Ollama provider
107
+ this.providers.set('ollama', {
108
+ execute: async (config, input) => {
109
+ if (config.stream) {
110
+ const response = await fetch('http://localhost:11434/api/generate', {
111
+ method: 'POST',
112
+ headers: { 'Content-Type': 'application/json' },
113
+ body: JSON.stringify({
114
+ model: config.model,
115
+ prompt: this.formatPrompt(config, input),
116
+ temperature: config.temperature || 0.7,
117
+ max_tokens: config.maxTokens,
118
+ stream: true,
119
+ }),
120
+ });
121
+ if (!response.body) {
122
+ throw new Error('No response body available for streaming');
123
+ }
124
+ const reader = response.body.getReader();
125
+ const decoder = new TextDecoder();
126
+ return {
127
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
128
+ stream: (async function* () {
129
+ try {
130
+ while (true) {
131
+ const { done, value } = await reader.read();
132
+ if (done)
133
+ break;
134
+ const chunk = decoder.decode(value);
135
+ const lines = chunk.split('\n').filter(Boolean);
136
+ for (const line of lines) {
137
+ try {
138
+ const data = JSON.parse(line);
139
+ if (data.response) {
140
+ yield data.response;
141
+ }
142
+ }
143
+ catch {
144
+ // Skip invalid JSON lines
145
+ continue;
146
+ }
147
+ }
148
+ }
149
+ }
150
+ finally {
151
+ reader.releaseLock();
152
+ }
153
+ })(),
154
+ };
155
+ }
156
+ const response = await fetch('http://localhost:11434/api/generate', {
157
+ method: 'POST',
158
+ headers: { 'Content-Type': 'application/json' },
159
+ body: JSON.stringify({
160
+ model: config.model,
161
+ prompt: this.formatPrompt(config, input),
162
+ temperature: config.temperature || 0.7,
163
+ max_tokens: config.maxTokens,
164
+ }),
165
+ });
166
+ const data = (await response.json());
167
+ return this.parseResponse(data.response, config.outputType);
168
+ },
169
+ });
170
+ // Custom provider
171
+ this.providers.set('custom', {
172
+ execute: async (config, input) => {
173
+ if (!config.customProvider) {
174
+ throw new Error('Custom provider configuration is required');
175
+ }
176
+ const { url, headers, transformRequest, transformResponse } = config.customProvider;
177
+ const requestBody = transformRequest ? transformRequest(input) : input;
178
+ const response = await fetch(url, {
179
+ method: 'POST',
180
+ headers: {
181
+ 'Content-Type': 'application/json',
182
+ ...headers,
183
+ },
184
+ body: JSON.stringify(requestBody),
185
+ });
186
+ const data = await response.json();
187
+ const transformedData = transformResponse ? transformResponse(data) : data;
188
+ return this.parseResponse(transformedData, config.outputType);
189
+ },
190
+ });
191
+ }
192
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
193
+ formatPrompt(config, input) {
194
+ const context = {
195
+ taskName: config.name,
196
+ description: config.prompt,
197
+ inputExample: 'JSON object with input data',
198
+ outputExample: `Expected ${config.outputType} output`,
199
+ input: input,
200
+ };
201
+ return config.prompt.replace(/{{(\w+)}}/g, (_, key) => {
202
+ return String(context[key] || '');
203
+ });
204
+ }
205
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
206
+ parseResponse(response, outputType) {
207
+ try {
208
+ switch (outputType) {
209
+ case 'json':
210
+ return { data: JSON.parse(response) };
211
+ case 'number':
212
+ return { data: Number(response) };
213
+ case 'boolean':
214
+ return { data: response.toLowerCase() === 'true' };
215
+ default:
216
+ return { data: response };
217
+ }
218
+ }
219
+ catch (error) {
220
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
221
+ return { error: `Failed to parse response: ${errorMessage}` };
222
+ }
223
+ }
224
+ async executeTask(config, input) {
225
+ try {
226
+ core_2.default.debug('Executing AI task:', {
227
+ task: config.name,
228
+ provider: config.provider,
229
+ stream: config.stream,
230
+ model: config.model,
231
+ });
232
+ const provider = this.providers.get(config.provider);
233
+ if (!provider) {
234
+ core_2.default.error('Provider not found:', config.provider);
235
+ throw new Error(`Provider ${config.provider} not supported`);
236
+ }
237
+ core_2.default.debug('Found provider, executing task');
238
+ const result = await provider.execute(config, input);
239
+ core_2.default.debug('AI task completed:', {
240
+ task: config.name,
241
+ hasStream: !!result.stream,
242
+ hasError: !!result.error,
243
+ });
244
+ return result;
245
+ }
246
+ catch (error) {
247
+ const errorMessage = error instanceof Error ? error.message : 'Unknown error';
248
+ core_2.default.error('AI task failed:', {
249
+ task: config.name,
250
+ error: errorMessage,
251
+ stack: error instanceof Error ? error.stack : undefined,
252
+ });
253
+ return { error: errorMessage };
254
+ }
255
+ }
256
+ };
257
+ exports.AIService = AIService;
258
+ exports.AIService = AIService = __decorate([
259
+ (0, core_1.Injectable)(),
260
+ __metadata("design:paramtypes", [])
261
+ ], AIService);
@@ -0,0 +1,30 @@
1
+ export type LLMProvider = 'openai' | 'ollama' | 'anthropic' | 'custom';
2
+ export interface AITaskConfig {
3
+ name: string;
4
+ prompt: string;
5
+ provider: LLMProvider;
6
+ model: string;
7
+ outputType: 'string' | 'json' | 'number' | 'boolean';
8
+ temperature?: number;
9
+ maxTokens?: number;
10
+ stream?: boolean;
11
+ customProvider?: {
12
+ url: string;
13
+ headers?: Record<string, string>;
14
+ transformRequest?: (input: unknown) => unknown;
15
+ transformResponse?: (response: unknown) => unknown;
16
+ };
17
+ }
18
+ export interface AITaskContext {
19
+ taskName: string;
20
+ description: string;
21
+ inputExample: string;
22
+ outputExample: string;
23
+ input: unknown;
24
+ }
25
+ export interface AITaskResult<T = unknown> {
26
+ data?: T;
27
+ error?: string;
28
+ stream?: AsyncIterable<string>;
29
+ }
30
+ //# sourceMappingURL=ai.types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai.types.d.ts","sourceRoot":"","sources":["../src/ai.types.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,WAAW,GAAG,QAAQ,GAAG,QAAQ,GAAG,WAAW,GAAG,QAAQ,CAAC;AAEvE,MAAM,WAAW,YAAY;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,WAAW,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,EAAE,QAAQ,GAAG,MAAM,GAAG,QAAQ,GAAG,SAAS,CAAC;IACrD,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB,cAAc,CAAC,EAAE;QACf,GAAG,EAAE,MAAM,CAAC;QACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACjC,gBAAgB,CAAC,EAAE,CAAC,KAAK,EAAE,OAAO,KAAK,OAAO,CAAC;QAC/C,iBAAiB,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,KAAK,OAAO,CAAC;KACpD,CAAC;CACH;AAED,MAAM,WAAW,aAAa;IAC5B,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,YAAY,EAAE,MAAM,CAAC;IACrB,aAAa,EAAE,MAAM,CAAC;IACtB,KAAK,EAAE,OAAO,CAAC;CAChB;AAED,MAAM,WAAW,YAAY,CAAC,CAAC,GAAG,OAAO;IACvC,IAAI,CAAC,EAAE,CAAC,CAAC;IACT,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,CAAC,EAAE,aAAa,CAAC,MAAM,CAAC,CAAC;CAChC"}
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,69 @@
1
+ import { AIContext, AIMessage } from '../ai-enhanced.types';
2
+ /**
3
+ * AI Context Manager
4
+ * Manages conversation context and token limits
5
+ */
6
+ export declare class AIContextManager implements AIContext {
7
+ messages: AIMessage[];
8
+ maxTokens: number;
9
+ currentTokens: number;
10
+ private readonly TOKENS_PER_MESSAGE;
11
+ private readonly TOKENS_PER_NAME;
12
+ constructor(maxTokens?: number);
13
+ /**
14
+ * Add message to context
15
+ */
16
+ addMessage(message: AIMessage): void;
17
+ /**
18
+ * Get all messages
19
+ */
20
+ getMessages(): AIMessage[];
21
+ /**
22
+ * Clear all messages
23
+ */
24
+ clear(): void;
25
+ /**
26
+ * Trim messages to fit within token limit
27
+ * Keeps system messages and removes oldest user/assistant messages
28
+ */
29
+ trimToLimit(): void;
30
+ /**
31
+ * Estimate tokens for a message
32
+ * This is a rough estimation. For accurate counting, use tiktoken library
33
+ */
34
+ private estimateTokens;
35
+ /**
36
+ * Get context statistics
37
+ */
38
+ getStats(): {
39
+ messageCount: number;
40
+ currentTokens: number;
41
+ maxTokens: number;
42
+ utilizationPercent: number;
43
+ };
44
+ /**
45
+ * Set max tokens limit
46
+ */
47
+ setMaxTokens(maxTokens: number): void;
48
+ /**
49
+ * Get system messages
50
+ */
51
+ getSystemMessages(): AIMessage[];
52
+ /**
53
+ * Get conversation messages (user + assistant)
54
+ */
55
+ getConversationMessages(): AIMessage[];
56
+ /**
57
+ * Add system message
58
+ */
59
+ addSystemMessage(content: string): void;
60
+ /**
61
+ * Add user message
62
+ */
63
+ addUserMessage(content: string): void;
64
+ /**
65
+ * Add assistant message
66
+ */
67
+ addAssistantMessage(content: string): void;
68
+ }
69
+ //# sourceMappingURL=context.manager.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"context.manager.d.ts","sourceRoot":"","sources":["../../src/context/context.manager.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,sBAAsB,CAAC;AAG5D;;;GAGG;AACH,qBAAa,gBAAiB,YAAW,SAAS;IAChD,QAAQ,EAAE,SAAS,EAAE,CAAM;IAC3B,SAAS,EAAE,MAAM,CAAC;IAClB,aAAa,EAAE,MAAM,CAAK;IAC1B,OAAO,CAAC,QAAQ,CAAC,kBAAkB,CAAK;IACxC,OAAO,CAAC,QAAQ,CAAC,eAAe,CAAK;gBAEzB,SAAS,GAAE,MAAa;IAKpC;;OAEG;IACH,UAAU,CAAC,OAAO,EAAE,SAAS,GAAG,IAAI;IAiBpC;;OAEG;IACH,WAAW,IAAI,SAAS,EAAE;IAI1B;;OAEG;IACH,KAAK,IAAI,IAAI;IAMb;;;OAGG;IACH,WAAW,IAAI,IAAI;IAwCnB;;;OAGG;IACH,OAAO,CAAC,cAAc;IAoBtB;;OAEG;IACH,QAAQ,IAAI;QACV,YAAY,EAAE,MAAM,CAAC;QACrB,aAAa,EAAE,MAAM,CAAC;QACtB,SAAS,EAAE,MAAM,CAAC;QAClB,kBAAkB,EAAE,MAAM,CAAC;KAC5B;IASD;;OAEG;IACH,YAAY,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI;IASrC;;OAEG;IACH,iBAAiB,IAAI,SAAS,EAAE;IAIhC;;OAEG;IACH,uBAAuB,IAAI,SAAS,EAAE;IAItC;;OAEG;IACH,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;IAOvC;;OAEG;IACH,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;IAOrC;;OAEG;IACH,mBAAmB,CAAC,OAAO,EAAE,MAAM,GAAG,IAAI;CAM3C"}
@@ -0,0 +1,168 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.AIContextManager = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ /**
9
+ * AI Context Manager
10
+ * Manages conversation context and token limits
11
+ */
12
+ class AIContextManager {
13
+ constructor(maxTokens = 4096) {
14
+ this.messages = [];
15
+ this.currentTokens = 0;
16
+ this.TOKENS_PER_MESSAGE = 4; // Approximate overhead per message
17
+ this.TOKENS_PER_NAME = 1; // Approximate overhead for name field
18
+ this.maxTokens = maxTokens;
19
+ core_1.default.debug(`AI Context Manager initialized with max tokens: ${maxTokens}`);
20
+ }
21
+ /**
22
+ * Add message to context
23
+ */
24
+ addMessage(message) {
25
+ const tokens = this.estimateTokens(message);
26
+ this.messages.push(message);
27
+ this.currentTokens += tokens;
28
+ core_1.default.debug(`Message added to context`, {
29
+ role: message.role,
30
+ tokens,
31
+ totalTokens: this.currentTokens,
32
+ });
33
+ // Auto-trim if exceeds limit
34
+ if (this.currentTokens > this.maxTokens) {
35
+ this.trimToLimit();
36
+ }
37
+ }
38
+ /**
39
+ * Get all messages
40
+ */
41
+ getMessages() {
42
+ return [...this.messages];
43
+ }
44
+ /**
45
+ * Clear all messages
46
+ */
47
+ clear() {
48
+ this.messages = [];
49
+ this.currentTokens = 0;
50
+ core_1.default.debug('Context cleared');
51
+ }
52
+ /**
53
+ * Trim messages to fit within token limit
54
+ * Keeps system messages and removes oldest user/assistant messages
55
+ */
56
+ trimToLimit() {
57
+ core_1.default.debug('Trimming context to fit token limit');
58
+ // Separate system messages from conversation messages
59
+ const systemMessages = this.messages.filter((m) => m.role === 'system');
60
+ const conversationMessages = this.messages.filter((m) => m.role !== 'system');
61
+ // Calculate tokens for system messages
62
+ const systemTokens = systemMessages.reduce((sum, msg) => sum + this.estimateTokens(msg), 0);
63
+ // Available tokens for conversation
64
+ const availableTokens = this.maxTokens - systemTokens;
65
+ // Keep most recent messages that fit
66
+ const keptMessages = [];
67
+ let conversationTokens = 0;
68
+ for (let i = conversationMessages.length - 1; i >= 0; i--) {
69
+ const msg = conversationMessages[i];
70
+ const tokens = this.estimateTokens(msg);
71
+ if (conversationTokens + tokens <= availableTokens) {
72
+ keptMessages.unshift(msg);
73
+ conversationTokens += tokens;
74
+ }
75
+ else {
76
+ break;
77
+ }
78
+ }
79
+ // Combine system messages with kept conversation messages
80
+ this.messages = [...systemMessages, ...keptMessages];
81
+ this.currentTokens = systemTokens + conversationTokens;
82
+ core_1.default.debug('Context trimmed', {
83
+ removedMessages: conversationMessages.length - keptMessages.length,
84
+ remainingMessages: this.messages.length,
85
+ currentTokens: this.currentTokens,
86
+ });
87
+ }
88
+ /**
89
+ * Estimate tokens for a message
90
+ * This is a rough estimation. For accurate counting, use tiktoken library
91
+ */
92
+ estimateTokens(message) {
93
+ let tokens = this.TOKENS_PER_MESSAGE;
94
+ // Add tokens for content (rough estimate: 1 token ≈ 4 characters)
95
+ tokens += Math.ceil(message.content.length / 4);
96
+ // Add tokens for name if present
97
+ if (message.name) {
98
+ tokens += this.TOKENS_PER_NAME;
99
+ }
100
+ // Add tokens for function call if present
101
+ if (message.functionCall) {
102
+ tokens += Math.ceil(message.functionCall.name.length / 4);
103
+ tokens += Math.ceil(message.functionCall.arguments.length / 4);
104
+ }
105
+ return tokens;
106
+ }
107
+ /**
108
+ * Get context statistics
109
+ */
110
+ getStats() {
111
+ return {
112
+ messageCount: this.messages.length,
113
+ currentTokens: this.currentTokens,
114
+ maxTokens: this.maxTokens,
115
+ utilizationPercent: Math.round((this.currentTokens / this.maxTokens) * 100),
116
+ };
117
+ }
118
+ /**
119
+ * Set max tokens limit
120
+ */
121
+ setMaxTokens(maxTokens) {
122
+ this.maxTokens = maxTokens;
123
+ core_1.default.debug(`Max tokens updated to: ${maxTokens}`);
124
+ if (this.currentTokens > maxTokens) {
125
+ this.trimToLimit();
126
+ }
127
+ }
128
+ /**
129
+ * Get system messages
130
+ */
131
+ getSystemMessages() {
132
+ return this.messages.filter((m) => m.role === 'system');
133
+ }
134
+ /**
135
+ * Get conversation messages (user + assistant)
136
+ */
137
+ getConversationMessages() {
138
+ return this.messages.filter((m) => m.role === 'user' || m.role === 'assistant');
139
+ }
140
+ /**
141
+ * Add system message
142
+ */
143
+ addSystemMessage(content) {
144
+ this.addMessage({
145
+ role: 'system',
146
+ content,
147
+ });
148
+ }
149
+ /**
150
+ * Add user message
151
+ */
152
+ addUserMessage(content) {
153
+ this.addMessage({
154
+ role: 'user',
155
+ content,
156
+ });
157
+ }
158
+ /**
159
+ * Add assistant message
160
+ */
161
+ addAssistantMessage(content) {
162
+ this.addMessage({
163
+ role: 'assistant',
164
+ content,
165
+ });
166
+ }
167
+ }
168
+ exports.AIContextManager = AIContextManager;
@@ -0,0 +1,42 @@
1
+ import 'reflect-metadata';
2
+ import { AIFunctionOptions } from '../ai-enhanced.types';
3
+ /**
4
+ * AIFunction decorator for AI-powered methods
5
+ *
6
+ * @example
7
+ * ```typescript
8
+ * @AIFunction({
9
+ * provider: 'openai',
10
+ * model: 'gpt-4',
11
+ * streaming: true
12
+ * })
13
+ * async generateContent(@AIPrompt() prompt: string) {
14
+ * // Auto-handled by framework
15
+ * }
16
+ * ```
17
+ */
18
+ export declare function AIFunction(options: AIFunctionOptions): MethodDecorator;
19
+ /**
20
+ * Get AI function metadata
21
+ */
22
+ export declare function getAIFunctionMetadata(target: object, propertyKey: string | symbol): AIFunctionOptions | undefined;
23
+ /**
24
+ * Check if method has AI function metadata
25
+ */
26
+ export declare function hasAIFunctionMetadata(target: object, propertyKey: string | symbol): boolean;
27
+ /**
28
+ * AIPrompt parameter decorator
29
+ *
30
+ * @example
31
+ * ```typescript
32
+ * async generateContent(@AIPrompt() prompt: string) {
33
+ * // prompt parameter is marked for AI processing
34
+ * }
35
+ * ```
36
+ */
37
+ export declare function AIPrompt(): ParameterDecorator;
38
+ /**
39
+ * Get AI prompt parameter metadata
40
+ */
41
+ export declare function getAIPromptMetadata(target: object, propertyKey: string | symbol): number[];
42
+ //# sourceMappingURL=ai-function.decorator.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-function.decorator.d.ts","sourceRoot":"","sources":["../../src/decorators/ai-function.decorator.ts"],"names":[],"mappings":"AAAA,OAAO,kBAAkB,CAAC;AAC1B,OAAO,EAAE,iBAAiB,EAAE,MAAM,sBAAsB,CAAC;AAMzD;;;;;;;;;;;;;;GAcG;AACH,wBAAgB,UAAU,CAAC,OAAO,EAAE,iBAAiB,GAAG,eAAe,CAgBtE;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CACnC,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,MAAM,GAAG,MAAM,GAC3B,iBAAiB,GAAG,SAAS,CAE/B;AAED;;GAEG;AACH,wBAAgB,qBAAqB,CAAC,MAAM,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,GAAG,OAAO,CAE3F;AAED;;;;;;;;;GASG;AACH,wBAAgB,QAAQ,IAAI,kBAAkB,CAM7C;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,MAAM,EAAE,MAAM,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,EAAE,CAK1F"}