@hazeljs/ai 0.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +496 -0
  2. package/dist/ai-enhanced.service.d.ts +108 -0
  3. package/dist/ai-enhanced.service.d.ts.map +1 -0
  4. package/dist/ai-enhanced.service.js +345 -0
  5. package/dist/ai-enhanced.types.d.ts +269 -0
  6. package/dist/ai-enhanced.types.d.ts.map +1 -0
  7. package/dist/ai-enhanced.types.js +2 -0
  8. package/dist/ai.decorator.d.ts +4 -0
  9. package/dist/ai.decorator.d.ts.map +1 -0
  10. package/dist/ai.decorator.js +57 -0
  11. package/dist/ai.module.d.ts +12 -0
  12. package/dist/ai.module.d.ts.map +1 -0
  13. package/dist/ai.module.js +44 -0
  14. package/dist/ai.service.d.ts +10 -0
  15. package/dist/ai.service.d.ts.map +1 -0
  16. package/dist/ai.service.js +261 -0
  17. package/dist/ai.types.d.ts +30 -0
  18. package/dist/ai.types.d.ts.map +1 -0
  19. package/dist/ai.types.js +2 -0
  20. package/dist/context/context.manager.d.ts +69 -0
  21. package/dist/context/context.manager.d.ts.map +1 -0
  22. package/dist/context/context.manager.js +168 -0
  23. package/dist/decorators/ai-function.decorator.d.ts +42 -0
  24. package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
  25. package/dist/decorators/ai-function.decorator.js +80 -0
  26. package/dist/decorators/ai-validate.decorator.d.ts +46 -0
  27. package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
  28. package/dist/decorators/ai-validate.decorator.js +83 -0
  29. package/dist/index.d.ts +17 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +38 -0
  32. package/dist/providers/anthropic.provider.d.ts +48 -0
  33. package/dist/providers/anthropic.provider.d.ts.map +1 -0
  34. package/dist/providers/anthropic.provider.js +194 -0
  35. package/dist/providers/cohere.provider.d.ts +57 -0
  36. package/dist/providers/cohere.provider.d.ts.map +1 -0
  37. package/dist/providers/cohere.provider.js +230 -0
  38. package/dist/providers/gemini.provider.d.ts +45 -0
  39. package/dist/providers/gemini.provider.d.ts.map +1 -0
  40. package/dist/providers/gemini.provider.js +180 -0
  41. package/dist/providers/ollama.provider.d.ts +45 -0
  42. package/dist/providers/ollama.provider.d.ts.map +1 -0
  43. package/dist/providers/ollama.provider.js +232 -0
  44. package/dist/providers/openai.provider.d.ts +47 -0
  45. package/dist/providers/openai.provider.d.ts.map +1 -0
  46. package/dist/providers/openai.provider.js +273 -0
  47. package/dist/tracking/token.tracker.d.ts +72 -0
  48. package/dist/tracking/token.tracker.d.ts.map +1 -0
  49. package/dist/tracking/token.tracker.js +222 -0
  50. package/dist/vector/vector.service.d.ts +50 -0
  51. package/dist/vector/vector.service.d.ts.map +1 -0
  52. package/dist/vector/vector.service.js +163 -0
  53. package/package.json +52 -0
@@ -0,0 +1,232 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OllamaProvider = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ /**
9
+ * Ollama Provider
10
+ * Production-ready implementation for local LLM support via Ollama
11
+ * Supports models like Llama 2, Mistral, CodeLlama, and other open-source models
12
+ */
13
+ class OllamaProvider {
14
+ constructor(config) {
15
+ this.name = 'ollama';
16
+ this.baseURL = config?.baseURL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
17
+ this.defaultModel = config?.defaultModel || 'llama2';
18
+ core_1.default.info(`Ollama provider initialized with base URL: ${this.baseURL}`);
19
+ }
20
+ /**
21
+ * Transform messages to Ollama prompt format
22
+ */
23
+ transformMessages(messages) {
24
+ return messages
25
+ .map((msg) => {
26
+ const role = msg.role === 'assistant' ? 'Assistant' : msg.role === 'system' ? 'System' : 'User';
27
+ return `${role}: ${msg.content}`;
28
+ })
29
+ .join('\n\n');
30
+ }
31
+ /**
32
+ * Generate completion
33
+ */
34
+ async complete(request) {
35
+ try {
36
+ const model = request.model || this.defaultModel;
37
+ core_1.default.debug(`Ollama completion request for model: ${model}`);
38
+ const prompt = this.transformMessages(request.messages);
39
+ const ollamaRequest = {
40
+ model,
41
+ prompt,
42
+ stream: false,
43
+ temperature: request.temperature ?? 0.7,
44
+ num_predict: request.maxTokens,
45
+ top_p: request.topP,
46
+ };
47
+ const response = await fetch(`${this.baseURL}/api/generate`, {
48
+ method: 'POST',
49
+ headers: { 'Content-Type': 'application/json' },
50
+ body: JSON.stringify(ollamaRequest),
51
+ });
52
+ if (!response.ok) {
53
+ const errorText = await response.text();
54
+ throw new Error(`Ollama API error: ${response.status} ${errorText}`);
55
+ }
56
+ const data = (await response.json());
57
+ return {
58
+ id: `ollama-${Date.now()}`,
59
+ content: data.response,
60
+ role: 'assistant',
61
+ model: data.model,
62
+ usage: {
63
+ promptTokens: data.prompt_eval_count || 0,
64
+ completionTokens: data.eval_count || 0,
65
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
66
+ },
67
+ finishReason: data.done ? 'stop' : 'length',
68
+ };
69
+ }
70
+ catch (error) {
71
+ core_1.default.error('Ollama completion error:', error);
72
+ throw error;
73
+ }
74
+ }
75
+ /**
76
+ * Generate streaming completion
77
+ */
78
+ async *streamComplete(request) {
79
+ try {
80
+ const model = request.model || this.defaultModel;
81
+ core_1.default.debug(`Ollama streaming completion request for model: ${model}`);
82
+ const prompt = this.transformMessages(request.messages);
83
+ const ollamaRequest = {
84
+ model,
85
+ prompt,
86
+ stream: true,
87
+ temperature: request.temperature ?? 0.7,
88
+ num_predict: request.maxTokens,
89
+ top_p: request.topP,
90
+ };
91
+ const response = await fetch(`${this.baseURL}/api/generate`, {
92
+ method: 'POST',
93
+ headers: { 'Content-Type': 'application/json' },
94
+ body: JSON.stringify(ollamaRequest),
95
+ });
96
+ if (!response.ok) {
97
+ const errorText = await response.text();
98
+ throw new Error(`Ollama API error: ${response.status} ${errorText}`);
99
+ }
100
+ if (!response.body) {
101
+ throw new Error('No response body available for streaming');
102
+ }
103
+ const reader = response.body.getReader();
104
+ const decoder = new TextDecoder();
105
+ let fullContent = '';
106
+ let totalPromptTokens = 0;
107
+ let totalCompletionTokens = 0;
108
+ const chunkId = `ollama-${Date.now()}`;
109
+ try {
110
+ while (true) {
111
+ const { done, value } = await reader.read();
112
+ if (done)
113
+ break;
114
+ const chunk = decoder.decode(value, { stream: true });
115
+ const lines = chunk.split('\n').filter(Boolean);
116
+ for (const line of lines) {
117
+ try {
118
+ const data = JSON.parse(line);
119
+ if (data.response) {
120
+ fullContent += data.response;
121
+ totalPromptTokens = data.prompt_eval_count || totalPromptTokens;
122
+ totalCompletionTokens = data.eval_count || totalCompletionTokens;
123
+ yield {
124
+ id: chunkId,
125
+ content: fullContent,
126
+ delta: data.response,
127
+ done: data.done || false,
128
+ usage: {
129
+ promptTokens: totalPromptTokens,
130
+ completionTokens: totalCompletionTokens,
131
+ totalTokens: totalPromptTokens + totalCompletionTokens,
132
+ },
133
+ };
134
+ }
135
+ if (data.done) {
136
+ return;
137
+ }
138
+ }
139
+ catch {
140
+ // Skip invalid JSON lines
141
+ continue;
142
+ }
143
+ }
144
+ }
145
+ }
146
+ finally {
147
+ reader.releaseLock();
148
+ }
149
+ }
150
+ catch (error) {
151
+ core_1.default.error('Ollama streaming error:', error);
152
+ throw error;
153
+ }
154
+ }
155
+ /**
156
+ * Generate embeddings
157
+ */
158
+ async embed(request) {
159
+ try {
160
+ const model = request.model || this.defaultModel;
161
+ core_1.default.debug(`Ollama embedding request for model: ${model}`);
162
+ const input = Array.isArray(request.input) ? request.input[0] : request.input;
163
+ const response = await fetch(`${this.baseURL}/api/embeddings`, {
164
+ method: 'POST',
165
+ headers: { 'Content-Type': 'application/json' },
166
+ body: JSON.stringify({
167
+ model,
168
+ prompt: input,
169
+ }),
170
+ });
171
+ if (!response.ok) {
172
+ const errorText = await response.text();
173
+ throw new Error(`Ollama API error: ${response.status} ${errorText}`);
174
+ }
175
+ const data = (await response.json());
176
+ return {
177
+ embeddings: [data.embedding],
178
+ model,
179
+ usage: {
180
+ promptTokens: 0, // Ollama doesn't provide token usage for embeddings
181
+ totalTokens: 0,
182
+ },
183
+ };
184
+ }
185
+ catch (error) {
186
+ core_1.default.error('Ollama embedding error:', error);
187
+ throw error;
188
+ }
189
+ }
190
+ /**
191
+ * Check if provider is available
192
+ */
193
+ async isAvailable() {
194
+ try {
195
+ const response = await fetch(`${this.baseURL}/api/tags`, {
196
+ method: 'GET',
197
+ });
198
+ return response.ok;
199
+ }
200
+ catch {
201
+ return false;
202
+ }
203
+ }
204
+ /**
205
+ * Get supported models
206
+ * Note: This returns common models, but Ollama supports any model you pull
207
+ */
208
+ getSupportedModels() {
209
+ return [
210
+ 'llama2',
211
+ 'llama2:13b',
212
+ 'llama2:70b',
213
+ 'mistral',
214
+ 'mixtral',
215
+ 'codellama',
216
+ 'neural-chat',
217
+ 'starling-lm',
218
+ 'phi',
219
+ 'orca-mini',
220
+ 'vicuna',
221
+ 'wizardcoder',
222
+ 'wizard-vicuna',
223
+ ];
224
+ }
225
+ /**
226
+ * Get supported embedding models
227
+ */
228
+ getSupportedEmbeddingModels() {
229
+ return ['llama2', 'mistral', 'nomic-embed-text'];
230
+ }
231
+ }
232
+ exports.OllamaProvider = OllamaProvider;
@@ -0,0 +1,47 @@
1
+ import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
2
+ /**
3
+ * OpenAI Provider
4
+ * Production-ready implementation with full OpenAI API support
5
+ */
6
+ export declare class OpenAIProvider implements IAIProvider {
7
+ readonly name: AIProvider;
8
+ private client;
9
+ private defaultModel;
10
+ constructor(apiKey?: string, config?: {
11
+ baseURL?: string;
12
+ defaultModel?: string;
13
+ });
14
+ /**
15
+ * Generate completion
16
+ */
17
+ complete(request: AICompletionRequest): Promise<AICompletionResponse>;
18
+ /**
19
+ * Generate streaming completion
20
+ */
21
+ streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
22
+ /**
23
+ * Generate embeddings
24
+ */
25
+ embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
26
+ /**
27
+ * Check if provider is available
28
+ */
29
+ isAvailable(): Promise<boolean>;
30
+ /**
31
+ * Get supported models
32
+ */
33
+ getSupportedModels(): string[];
34
+ /**
35
+ * Get supported embedding models
36
+ */
37
+ getSupportedEmbeddingModels(): string[];
38
+ /**
39
+ * Transform messages to OpenAI format
40
+ */
41
+ private transformMessages;
42
+ /**
43
+ * Handle OpenAI errors
44
+ */
45
+ private handleError;
46
+ }
47
+ //# sourceMappingURL=openai.provider.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai.provider.d.ts","sourceRoot":"","sources":["../../src/providers/openai.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EAEpB,MAAM,sBAAsB,CAAC;AAK9B;;;GAGG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAY;IACrC,OAAO,CAAC,MAAM,CAA8B;IAC5C,OAAO,CAAC,YAAY,CAAS;gBAEjB,MAAM,CAAC,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE;QAAE,OAAO,CAAC,EAAE,MAAM,CAAC;QAAC,YAAY,CAAC,EAAE,MAAM,CAAA;KAAE;IASjF;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IA8E3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IA2DlF;;OAEG;IACG,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAgCtE;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAWrC;;OAEG;IACH,kBAAkB,IAAI,MAAM,EAAE;IAa9B;;OAEG;IACH,2BAA2B,IAAI,MAAM,EAAE;IAIvC;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAgDzB;;OAEG;IACH,OAAO,CAAC,WAAW;CAcpB"}
@@ -0,0 +1,273 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.OpenAIProvider = void 0;
7
+ const core_1 = __importDefault(require("@hazeljs/core"));
8
+ const openai_1 = __importDefault(require("openai"));
9
+ /**
10
+ * OpenAI Provider
11
+ * Production-ready implementation with full OpenAI API support
12
+ */
13
+ class OpenAIProvider {
14
+ constructor(apiKey, config) {
15
+ this.name = 'openai';
16
+ this.client = new openai_1.default({
17
+ apiKey: apiKey || process.env.OPENAI_API_KEY,
18
+ baseURL: config?.baseURL,
19
+ });
20
+ this.defaultModel = config?.defaultModel || 'gpt-4-turbo-preview';
21
+ core_1.default.info('OpenAI provider initialized');
22
+ }
23
+ /**
24
+ * Generate completion
25
+ */
26
+ async complete(request) {
27
+ try {
28
+ core_1.default.debug(`OpenAI completion request for model: ${request.model || this.defaultModel}`);
29
+ const messages = this.transformMessages(request.messages);
30
+ // Build tools array from functions (modern API)
31
+ const tools = request.functions?.map((fn) => ({
32
+ type: 'function',
33
+ function: fn,
34
+ }));
35
+ const response = await this.client.chat.completions.create({
36
+ model: request.model || this.defaultModel,
37
+ messages,
38
+ temperature: request.temperature ?? 0.7,
39
+ max_tokens: request.maxTokens,
40
+ top_p: request.topP,
41
+ tools: tools && tools.length > 0 ? tools : undefined,
42
+ tool_choice: request.functionCall === 'auto' ? 'auto' : request.functionCall === 'none' ? 'none' : undefined,
43
+ });
44
+ const choice = response.choices[0];
45
+ if (!choice) {
46
+ throw new Error('No completion choice returned');
47
+ }
48
+ // Extract tool calls from the modern tool_calls response
49
+ // Filter to function-type calls and cast to access .function safely
50
+ const rawToolCalls = choice.message.tool_calls;
51
+ const functionCalls = rawToolCalls?.filter(tc => tc.type === 'function');
52
+ const firstToolCall = functionCalls?.[0];
53
+ const result = {
54
+ id: response.id,
55
+ content: choice.message.content || '',
56
+ role: 'assistant',
57
+ model: response.model,
58
+ usage: response.usage
59
+ ? {
60
+ promptTokens: response.usage.prompt_tokens,
61
+ completionTokens: response.usage.completion_tokens,
62
+ totalTokens: response.usage.total_tokens,
63
+ }
64
+ : undefined,
65
+ functionCall: firstToolCall
66
+ ? {
67
+ name: firstToolCall.function.name,
68
+ arguments: firstToolCall.function.arguments,
69
+ }
70
+ : undefined,
71
+ toolCalls: functionCalls?.map(tc => ({
72
+ id: tc.id,
73
+ type: 'function',
74
+ function: {
75
+ name: tc.function.name,
76
+ arguments: tc.function.arguments,
77
+ },
78
+ })),
79
+ finishReason: choice.finish_reason,
80
+ };
81
+ core_1.default.debug('OpenAI completion successful', {
82
+ tokens: result.usage?.totalTokens,
83
+ finishReason: result.finishReason,
84
+ });
85
+ return result;
86
+ }
87
+ catch (error) {
88
+ core_1.default.error('OpenAI completion failed:', error);
89
+ throw this.handleError(error);
90
+ }
91
+ }
92
+ /**
93
+ * Generate streaming completion
94
+ */
95
+ async *streamComplete(request) {
96
+ try {
97
+ core_1.default.debug('OpenAI streaming completion started');
98
+ const messages = this.transformMessages(request.messages);
99
+ const stream = await this.client.chat.completions.create({
100
+ model: request.model || this.defaultModel,
101
+ messages,
102
+ temperature: request.temperature ?? 0.7,
103
+ max_tokens: request.maxTokens,
104
+ top_p: request.topP,
105
+ stream: true,
106
+ });
107
+ let fullContent = '';
108
+ let chunkId = '';
109
+ for await (const chunk of stream) {
110
+ const delta = chunk.choices[0]?.delta;
111
+ const content = delta?.content || '';
112
+ if (content) {
113
+ fullContent += content;
114
+ chunkId = chunk.id;
115
+ yield {
116
+ id: chunk.id,
117
+ content: fullContent,
118
+ delta: content,
119
+ done: false,
120
+ };
121
+ }
122
+ // Check if stream is done
123
+ if (chunk.choices[0]?.finish_reason) {
124
+ yield {
125
+ id: chunkId,
126
+ content: fullContent,
127
+ delta: '',
128
+ done: true,
129
+ usage: chunk.usage
130
+ ? {
131
+ promptTokens: chunk.usage.prompt_tokens,
132
+ completionTokens: chunk.usage.completion_tokens,
133
+ totalTokens: chunk.usage.total_tokens,
134
+ }
135
+ : undefined,
136
+ };
137
+ }
138
+ }
139
+ core_1.default.debug('OpenAI streaming completed');
140
+ }
141
+ catch (error) {
142
+ core_1.default.error('OpenAI streaming failed:', error);
143
+ throw this.handleError(error);
144
+ }
145
+ }
146
+ /**
147
+ * Generate embeddings
148
+ */
149
+ async embed(request) {
150
+ try {
151
+ core_1.default.debug('OpenAI embedding request');
152
+ const input = Array.isArray(request.input) ? request.input : [request.input];
153
+ const response = await this.client.embeddings.create({
154
+ model: request.model || 'text-embedding-3-small',
155
+ input,
156
+ });
157
+ const result = {
158
+ embeddings: response.data.map((item) => item.embedding),
159
+ model: response.model,
160
+ usage: {
161
+ promptTokens: response.usage.prompt_tokens,
162
+ totalTokens: response.usage.total_tokens,
163
+ },
164
+ };
165
+ core_1.default.debug('OpenAI embedding successful', {
166
+ count: result.embeddings.length,
167
+ dimensions: result.embeddings[0]?.length,
168
+ });
169
+ return result;
170
+ }
171
+ catch (error) {
172
+ core_1.default.error('OpenAI embedding failed:', error);
173
+ throw this.handleError(error);
174
+ }
175
+ }
176
+ /**
177
+ * Check if provider is available
178
+ */
179
+ async isAvailable() {
180
+ try {
181
+ // Make a minimal API call to check availability
182
+ await this.client.models.list();
183
+ return true;
184
+ }
185
+ catch (error) {
186
+ core_1.default.warn('OpenAI provider not available:', error);
187
+ return false;
188
+ }
189
+ }
190
+ /**
191
+ * Get supported models
192
+ */
193
+ getSupportedModels() {
194
+ return [
195
+ 'gpt-4-turbo-preview',
196
+ 'gpt-4-0125-preview',
197
+ 'gpt-4-1106-preview',
198
+ 'gpt-4',
199
+ 'gpt-4-0613',
200
+ 'gpt-3.5-turbo',
201
+ 'gpt-3.5-turbo-0125',
202
+ 'gpt-3.5-turbo-1106',
203
+ ];
204
+ }
205
+ /**
206
+ * Get supported embedding models
207
+ */
208
+ getSupportedEmbeddingModels() {
209
+ return ['text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002'];
210
+ }
211
+ /**
212
+ * Transform messages to OpenAI format
213
+ */
214
+ transformMessages(messages) {
215
+ return messages.map((msg) => {
216
+ // Map legacy 'function' role to modern 'tool' role
217
+ if (msg.role === 'function' || msg.role === 'tool') {
218
+ return {
219
+ role: 'tool',
220
+ content: msg.content,
221
+ tool_call_id: msg.toolCallId || msg.name || 'unknown',
222
+ };
223
+ }
224
+ if (msg.role === 'assistant' && (msg.functionCall || msg.toolCalls)) {
225
+ return {
226
+ role: 'assistant',
227
+ content: msg.content || null,
228
+ tool_calls: msg.toolCalls || (msg.functionCall ? [{
229
+ id: msg.functionCall.name,
230
+ type: 'function',
231
+ function: {
232
+ name: msg.functionCall.name,
233
+ arguments: msg.functionCall.arguments,
234
+ },
235
+ }] : undefined),
236
+ };
237
+ }
238
+ if (msg.role === 'system') {
239
+ return {
240
+ role: 'system',
241
+ content: msg.content,
242
+ };
243
+ }
244
+ if (msg.role === 'user') {
245
+ return {
246
+ role: 'user',
247
+ content: msg.content,
248
+ };
249
+ }
250
+ // Default to assistant
251
+ return {
252
+ role: 'assistant',
253
+ content: msg.content,
254
+ };
255
+ });
256
+ }
257
+ /**
258
+ * Handle OpenAI errors
259
+ */
260
+ handleError(error) {
261
+ if (error && typeof error === 'object' && 'status' in error && 'message' in error) {
262
+ const apiError = error;
263
+ const status = apiError.status ?? 'unknown';
264
+ const message = `OpenAI API Error (${status}): ${apiError.message ?? 'Unknown error'}`;
265
+ return new Error(message);
266
+ }
267
+ if (error instanceof Error) {
268
+ return error;
269
+ }
270
+ return new Error('Unknown OpenAI error');
271
+ }
272
+ }
273
+ exports.OpenAIProvider = OpenAIProvider;
@@ -0,0 +1,72 @@
1
+ import { TokenUsage, TokenLimitConfig } from '../ai-enhanced.types';
2
+ /**
3
+ * Token Usage Tracker
4
+ * Tracks and limits token usage per user/request
5
+ */
6
+ export declare class TokenTracker {
7
+ private usageHistory;
8
+ private config;
9
+ private userUsage;
10
+ private readonly TOKEN_COSTS;
11
+ constructor(config?: TokenLimitConfig);
12
+ /**
13
+ * Track token usage
14
+ */
15
+ track(usage: TokenUsage, model?: string): void;
16
+ /**
17
+ * Check if request is within limits
18
+ */
19
+ checkLimits(userId?: string, requestTokens?: number): Promise<{
20
+ allowed: boolean;
21
+ reason?: string;
22
+ usage?: {
23
+ today: number;
24
+ month: number;
25
+ limit: {
26
+ daily: number;
27
+ monthly: number;
28
+ };
29
+ };
30
+ }>;
31
+ /**
32
+ * Calculate cost for token usage
33
+ */
34
+ calculateCost(usage: TokenUsage, model: string): number;
35
+ /**
36
+ * Get usage statistics for a user
37
+ */
38
+ getUserStats(userId: string, days?: number): {
39
+ totalTokens: number;
40
+ totalCost: number;
41
+ requestCount: number;
42
+ averageTokensPerRequest: number;
43
+ dailyAverage: number;
44
+ };
45
+ /**
46
+ * Get global statistics
47
+ */
48
+ getGlobalStats(days?: number): {
49
+ totalTokens: number;
50
+ totalCost: number;
51
+ requestCount: number;
52
+ uniqueUsers: number;
53
+ topUsers: Array<{
54
+ userId: string;
55
+ tokens: number;
56
+ cost: number;
57
+ }>;
58
+ };
59
+ /**
60
+ * Clear old usage data
61
+ */
62
+ cleanup(daysToKeep?: number): void;
63
+ /**
64
+ * Export usage data
65
+ */
66
+ exportData(userId?: string): TokenUsage[];
67
+ /**
68
+ * Update configuration
69
+ */
70
+ updateConfig(config: Partial<TokenLimitConfig>): void;
71
+ }
72
+ //# sourceMappingURL=token.tracker.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"token.tracker.d.ts","sourceRoot":"","sources":["../../src/tracking/token.tracker.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,UAAU,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AAIpE;;;GAGG;AACH,qBACa,YAAY;IACvB,OAAO,CAAC,YAAY,CAAoB;IACxC,OAAO,CAAC,MAAM,CAAmB;IACjC,OAAO,CAAC,SAAS,CAAwC;IAGzD,OAAO,CAAC,QAAQ,CAAC,WAAW,CAO1B;gBAEU,MAAM,CAAC,EAAE,gBAAgB;IAUrC;;OAEG;IACH,KAAK,CAAC,KAAK,EAAE,UAAU,EAAE,KAAK,CAAC,EAAE,MAAM,GAAG,IAAI;IAsB9C;;OAEG;IACG,WAAW,CACf,MAAM,CAAC,EAAE,MAAM,EACf,aAAa,CAAC,EAAE,MAAM,GACrB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,MAAM,CAAC,EAAE,MAAM,CAAC;QAChB,KAAK,CAAC,EAAE;YACN,KAAK,EAAE,MAAM,CAAC;YACd,KAAK,EAAE,MAAM,CAAC;YACd,KAAK,EAAE;gBACL,KAAK,EAAE,MAAM,CAAC;gBACd,OAAO,EAAE,MAAM,CAAC;aACjB,CAAC;SACH,CAAC;KACH,CAAC;IA2DF;;OAEG;IACH,aAAa,CAAC,KAAK,EAAE,UAAU,EAAE,KAAK,EAAE,MAAM,GAAG,MAAM;IAavD;;OAEG;IACH,YAAY,CACV,MAAM,EAAE,MAAM,EACd,IAAI,GAAE,MAAW,GAChB;QACD,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,uBAAuB,EAAE,MAAM,CAAC;QAChC,YAAY,EAAE,MAAM,CAAC;KACtB;IAiBD;;OAEG;IACH,cAAc,CAAC,IAAI,GAAE,MAAW,GAAG;QACjC,WAAW,EAAE,MAAM,CAAC;QACpB,SAAS,EAAE,MAAM,CAAC;QAClB,YAAY,EAAE,MAAM,CAAC;QACrB,WAAW,EAAE,MAAM,CAAC;QACpB,QAAQ,EAAE,KAAK,CAAC;YAAE,MAAM,EAAE,MAAM,CAAC;YAAC,MAAM,EAAE,MAAM,CAAC;YAAC,IAAI,EAAE,MAAM,CAAA;SAAE,CAAC,CAAC;KACnE;IAsCD;;OAEG;IACH,OAAO,CAAC,UAAU,GAAE,MAAW,GAAG,IAAI;IAmBtC;;OAEG;IACH,UAAU,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,UAAU,EAAE;IAOzC;;OAEG;IACH,YAAY,CAAC,MAAM,EAAE,OAAO,CAAC,gBAAgB,CAAC,GAAG,IAAI;CAItD"}