@hazeljs/ai 0.2.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +192 -0
- package/README.md +497 -0
- package/dist/ai-enhanced.service.d.ts +108 -0
- package/dist/ai-enhanced.service.d.ts.map +1 -0
- package/dist/ai-enhanced.service.js +345 -0
- package/dist/ai-enhanced.service.test.d.ts +2 -0
- package/dist/ai-enhanced.service.test.d.ts.map +1 -0
- package/dist/ai-enhanced.service.test.js +501 -0
- package/dist/ai-enhanced.test.d.ts +2 -0
- package/dist/ai-enhanced.test.d.ts.map +1 -0
- package/dist/ai-enhanced.test.js +587 -0
- package/dist/ai-enhanced.types.d.ts +277 -0
- package/dist/ai-enhanced.types.d.ts.map +1 -0
- package/dist/ai-enhanced.types.js +2 -0
- package/dist/ai.decorator.d.ts +4 -0
- package/dist/ai.decorator.d.ts.map +1 -0
- package/dist/ai.decorator.js +57 -0
- package/dist/ai.decorator.test.d.ts +2 -0
- package/dist/ai.decorator.test.d.ts.map +1 -0
- package/dist/ai.decorator.test.js +189 -0
- package/dist/ai.module.d.ts +12 -0
- package/dist/ai.module.d.ts.map +1 -0
- package/dist/ai.module.js +44 -0
- package/dist/ai.module.test.d.ts +2 -0
- package/dist/ai.module.test.d.ts.map +1 -0
- package/dist/ai.module.test.js +23 -0
- package/dist/ai.service.d.ts +11 -0
- package/dist/ai.service.d.ts.map +1 -0
- package/dist/ai.service.js +266 -0
- package/dist/ai.service.test.d.ts +2 -0
- package/dist/ai.service.test.d.ts.map +1 -0
- package/dist/ai.service.test.js +222 -0
- package/dist/ai.types.d.ts +30 -0
- package/dist/ai.types.d.ts.map +1 -0
- package/dist/ai.types.js +2 -0
- package/dist/context/context.manager.d.ts +69 -0
- package/dist/context/context.manager.d.ts.map +1 -0
- package/dist/context/context.manager.js +168 -0
- package/dist/context/context.manager.test.d.ts +2 -0
- package/dist/context/context.manager.test.d.ts.map +1 -0
- package/dist/context/context.manager.test.js +180 -0
- package/dist/decorators/ai-function.decorator.d.ts +42 -0
- package/dist/decorators/ai-function.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-function.decorator.js +80 -0
- package/dist/decorators/ai-validate.decorator.d.ts +46 -0
- package/dist/decorators/ai-validate.decorator.d.ts.map +1 -0
- package/dist/decorators/ai-validate.decorator.js +83 -0
- package/dist/index.d.ts +18 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +40 -0
- package/dist/prompts/task.prompt.d.ts +12 -0
- package/dist/prompts/task.prompt.d.ts.map +1 -0
- package/dist/prompts/task.prompt.js +12 -0
- package/dist/providers/anthropic.provider.d.ts +48 -0
- package/dist/providers/anthropic.provider.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.js +194 -0
- package/dist/providers/anthropic.provider.test.d.ts +2 -0
- package/dist/providers/anthropic.provider.test.d.ts.map +1 -0
- package/dist/providers/anthropic.provider.test.js +222 -0
- package/dist/providers/cohere.provider.d.ts +57 -0
- package/dist/providers/cohere.provider.d.ts.map +1 -0
- package/dist/providers/cohere.provider.js +230 -0
- package/dist/providers/cohere.provider.test.d.ts +2 -0
- package/dist/providers/cohere.provider.test.d.ts.map +1 -0
- package/dist/providers/cohere.provider.test.js +267 -0
- package/dist/providers/gemini.provider.d.ts +45 -0
- package/dist/providers/gemini.provider.d.ts.map +1 -0
- package/dist/providers/gemini.provider.js +180 -0
- package/dist/providers/gemini.provider.test.d.ts +2 -0
- package/dist/providers/gemini.provider.test.d.ts.map +1 -0
- package/dist/providers/gemini.provider.test.js +219 -0
- package/dist/providers/ollama.provider.d.ts +45 -0
- package/dist/providers/ollama.provider.d.ts.map +1 -0
- package/dist/providers/ollama.provider.js +232 -0
- package/dist/providers/ollama.provider.test.d.ts +2 -0
- package/dist/providers/ollama.provider.test.d.ts.map +1 -0
- package/dist/providers/ollama.provider.test.js +267 -0
- package/dist/providers/openai.provider.d.ts +57 -0
- package/dist/providers/openai.provider.d.ts.map +1 -0
- package/dist/providers/openai.provider.js +320 -0
- package/dist/providers/openai.provider.test.d.ts +2 -0
- package/dist/providers/openai.provider.test.d.ts.map +1 -0
- package/dist/providers/openai.provider.test.js +364 -0
- package/dist/tracking/token.tracker.d.ts +72 -0
- package/dist/tracking/token.tracker.d.ts.map +1 -0
- package/dist/tracking/token.tracker.js +222 -0
- package/dist/tracking/token.tracker.test.d.ts +2 -0
- package/dist/tracking/token.tracker.test.d.ts.map +1 -0
- package/dist/tracking/token.tracker.test.js +272 -0
- package/dist/vector/vector.service.d.ts +50 -0
- package/dist/vector/vector.service.d.ts.map +1 -0
- package/dist/vector/vector.service.js +163 -0
- package/package.json +60 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* @hazeljs/ai - AI integration module for HazelJS
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.VectorService = exports.getAIPropertyValidationMetadata = exports.hasAIValidationMetadata = exports.getAIValidationMetadata = exports.AIValidateProperty = exports.AIValidate = exports.getAIPromptMetadata = exports.hasAIFunctionMetadata = exports.getAIFunctionMetadata = exports.AIPrompt = exports.AIFunction = exports.OllamaProvider = exports.CohereProvider = exports.GeminiProvider = exports.AnthropicProvider = exports.OpenAIProvider = exports.AITask = exports.AIEnhancedService = exports.AIService = exports.AIModule = void 0;
|
|
7
|
+
// AI Module
|
|
8
|
+
var ai_module_1 = require("./ai.module");
|
|
9
|
+
Object.defineProperty(exports, "AIModule", { enumerable: true, get: function () { return ai_module_1.AIModule; } });
|
|
10
|
+
var ai_service_1 = require("./ai.service");
|
|
11
|
+
Object.defineProperty(exports, "AIService", { enumerable: true, get: function () { return ai_service_1.AIService; } });
|
|
12
|
+
var ai_enhanced_service_1 = require("./ai-enhanced.service");
|
|
13
|
+
Object.defineProperty(exports, "AIEnhancedService", { enumerable: true, get: function () { return ai_enhanced_service_1.AIEnhancedService; } });
|
|
14
|
+
var ai_decorator_1 = require("./ai.decorator");
|
|
15
|
+
Object.defineProperty(exports, "AITask", { enumerable: true, get: function () { return ai_decorator_1.AITask; } });
|
|
16
|
+
// Enhanced AI
|
|
17
|
+
var openai_provider_1 = require("./providers/openai.provider");
|
|
18
|
+
Object.defineProperty(exports, "OpenAIProvider", { enumerable: true, get: function () { return openai_provider_1.OpenAIProvider; } });
|
|
19
|
+
var anthropic_provider_1 = require("./providers/anthropic.provider");
|
|
20
|
+
Object.defineProperty(exports, "AnthropicProvider", { enumerable: true, get: function () { return anthropic_provider_1.AnthropicProvider; } });
|
|
21
|
+
var gemini_provider_1 = require("./providers/gemini.provider");
|
|
22
|
+
Object.defineProperty(exports, "GeminiProvider", { enumerable: true, get: function () { return gemini_provider_1.GeminiProvider; } });
|
|
23
|
+
var cohere_provider_1 = require("./providers/cohere.provider");
|
|
24
|
+
Object.defineProperty(exports, "CohereProvider", { enumerable: true, get: function () { return cohere_provider_1.CohereProvider; } });
|
|
25
|
+
var ollama_provider_1 = require("./providers/ollama.provider");
|
|
26
|
+
Object.defineProperty(exports, "OllamaProvider", { enumerable: true, get: function () { return ollama_provider_1.OllamaProvider; } });
|
|
27
|
+
var ai_function_decorator_1 = require("./decorators/ai-function.decorator");
|
|
28
|
+
Object.defineProperty(exports, "AIFunction", { enumerable: true, get: function () { return ai_function_decorator_1.AIFunction; } });
|
|
29
|
+
Object.defineProperty(exports, "AIPrompt", { enumerable: true, get: function () { return ai_function_decorator_1.AIPrompt; } });
|
|
30
|
+
Object.defineProperty(exports, "getAIFunctionMetadata", { enumerable: true, get: function () { return ai_function_decorator_1.getAIFunctionMetadata; } });
|
|
31
|
+
Object.defineProperty(exports, "hasAIFunctionMetadata", { enumerable: true, get: function () { return ai_function_decorator_1.hasAIFunctionMetadata; } });
|
|
32
|
+
Object.defineProperty(exports, "getAIPromptMetadata", { enumerable: true, get: function () { return ai_function_decorator_1.getAIPromptMetadata; } });
|
|
33
|
+
var ai_validate_decorator_1 = require("./decorators/ai-validate.decorator");
|
|
34
|
+
Object.defineProperty(exports, "AIValidate", { enumerable: true, get: function () { return ai_validate_decorator_1.AIValidate; } });
|
|
35
|
+
Object.defineProperty(exports, "AIValidateProperty", { enumerable: true, get: function () { return ai_validate_decorator_1.AIValidateProperty; } });
|
|
36
|
+
Object.defineProperty(exports, "getAIValidationMetadata", { enumerable: true, get: function () { return ai_validate_decorator_1.getAIValidationMetadata; } });
|
|
37
|
+
Object.defineProperty(exports, "hasAIValidationMetadata", { enumerable: true, get: function () { return ai_validate_decorator_1.hasAIValidationMetadata; } });
|
|
38
|
+
Object.defineProperty(exports, "getAIPropertyValidationMetadata", { enumerable: true, get: function () { return ai_validate_decorator_1.getAIPropertyValidationMetadata; } });
|
|
39
|
+
var vector_service_1 = require("./vector/vector.service");
|
|
40
|
+
Object.defineProperty(exports, "VectorService", { enumerable: true, get: function () { return vector_service_1.VectorService; } });
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { PromptTemplate } from '@hazeljs/prompts';
|
|
2
|
+
export declare const AI_TASK_FORMAT_KEY = "ai:task:format";
|
|
3
|
+
export interface AITaskFormatVariables {
|
|
4
|
+
taskName: string;
|
|
5
|
+
description: string;
|
|
6
|
+
input: string;
|
|
7
|
+
inputExample: string;
|
|
8
|
+
outputExample: string;
|
|
9
|
+
}
|
|
10
|
+
declare const template: PromptTemplate<AITaskFormatVariables>;
|
|
11
|
+
export { template as aiTaskFormatPrompt };
|
|
12
|
+
//# sourceMappingURL=task.prompt.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"task.prompt.d.ts","sourceRoot":"","sources":["../../src/prompts/task.prompt.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,cAAc,EAAkB,MAAM,kBAAkB,CAAC;AAElE,eAAO,MAAM,kBAAkB,mBAAmB,CAAC;AAEnD,MAAM,WAAW,qBAAqB;IACpC,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,KAAK,EAAE,MAAM,CAAC;IACd,YAAY,EAAE,MAAM,CAAC;IACrB,aAAa,EAAE,MAAM,CAAC;CACvB;AAED,QAAA,MAAM,QAAQ,uCAKZ,CAAC;AAIH,OAAO,EAAE,QAAQ,IAAI,kBAAkB,EAAE,CAAC"}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.aiTaskFormatPrompt = exports.AI_TASK_FORMAT_KEY = void 0;
|
|
4
|
+
const prompts_1 = require("@hazeljs/prompts");
|
|
5
|
+
exports.AI_TASK_FORMAT_KEY = 'ai:task:format';
|
|
6
|
+
const template = new prompts_1.PromptTemplate(`{description}`, {
|
|
7
|
+
name: 'AI Task Format',
|
|
8
|
+
description: 'Formats an AI task prompt by substituting context variables into the template string',
|
|
9
|
+
version: '1.0.0',
|
|
10
|
+
});
|
|
11
|
+
exports.aiTaskFormatPrompt = template;
|
|
12
|
+
prompts_1.PromptRegistry.register(exports.AI_TASK_FORMAT_KEY, template);
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
|
|
2
|
+
/**
|
|
3
|
+
* Anthropic Claude AI Provider
|
|
4
|
+
*
|
|
5
|
+
* Production-ready implementation using Anthropic SDK.
|
|
6
|
+
*
|
|
7
|
+
* Setup:
|
|
8
|
+
* 1. Install the SDK: `npm install @anthropic-ai/sdk`
|
|
9
|
+
* 2. Set ANTHROPIC_API_KEY environment variable
|
|
10
|
+
* 3. Use the provider in your application
|
|
11
|
+
*
|
|
12
|
+
* Supported models:
|
|
13
|
+
* - claude-3-5-sonnet-20241022: Latest and most intelligent model
|
|
14
|
+
* - claude-3-opus-20240229: Most powerful for complex tasks
|
|
15
|
+
* - claude-3-sonnet-20240229: Balanced performance
|
|
16
|
+
* - claude-3-haiku-20240307: Fast and cost-effective
|
|
17
|
+
*
|
|
18
|
+
* Note: Anthropic does not provide embeddings API. Use OpenAI or Cohere for embeddings.
|
|
19
|
+
*/
|
|
20
|
+
export declare class AnthropicProvider implements IAIProvider {
|
|
21
|
+
readonly name: AIProvider;
|
|
22
|
+
private apiKey;
|
|
23
|
+
private anthropic;
|
|
24
|
+
private endpoint;
|
|
25
|
+
constructor(apiKey?: string, endpoint?: string);
|
|
26
|
+
/**
|
|
27
|
+
* Generate completion
|
|
28
|
+
*/
|
|
29
|
+
complete(request: AICompletionRequest): Promise<AICompletionResponse>;
|
|
30
|
+
/**
|
|
31
|
+
* Generate streaming completion
|
|
32
|
+
*/
|
|
33
|
+
streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
|
|
34
|
+
/**
|
|
35
|
+
* Generate embeddings
|
|
36
|
+
* Note: Anthropic doesn't provide embeddings API
|
|
37
|
+
*/
|
|
38
|
+
embed(_request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
|
|
39
|
+
/**
|
|
40
|
+
* Check if provider is available
|
|
41
|
+
*/
|
|
42
|
+
isAvailable(): Promise<boolean>;
|
|
43
|
+
/**
|
|
44
|
+
* Get supported models
|
|
45
|
+
*/
|
|
46
|
+
getSupportedModels(): string[];
|
|
47
|
+
}
|
|
48
|
+
//# sourceMappingURL=anthropic.provider.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.provider.d.ts","sourceRoot":"","sources":["../../src/providers/anthropic.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EACpB,MAAM,sBAAsB,CAAC;AAI9B;;;;;;;;;;;;;;;;;GAiBG;AACH,qBAAa,iBAAkB,YAAW,WAAW;IACnD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAe;IACxC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,SAAS,CAAY;IAC7B,OAAO,CAAC,QAAQ,CAAS;gBAEb,MAAM,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM;IAY9C;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IAiD3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IAsElF;;;OAGG;IACG,KAAK,CAAC,QAAQ,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAIvE;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAoBrC;;OAEG;IACH,kBAAkB,IAAI,MAAM,EAAE;CAW/B"}
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AnthropicProvider = void 0;
|
|
7
|
+
const core_1 = __importDefault(require("@hazeljs/core"));
|
|
8
|
+
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
9
|
+
/**
|
|
10
|
+
* Anthropic Claude AI Provider
|
|
11
|
+
*
|
|
12
|
+
* Production-ready implementation using Anthropic SDK.
|
|
13
|
+
*
|
|
14
|
+
* Setup:
|
|
15
|
+
* 1. Install the SDK: `npm install @anthropic-ai/sdk`
|
|
16
|
+
* 2. Set ANTHROPIC_API_KEY environment variable
|
|
17
|
+
* 3. Use the provider in your application
|
|
18
|
+
*
|
|
19
|
+
* Supported models:
|
|
20
|
+
* - claude-3-5-sonnet-20241022: Latest and most intelligent model
|
|
21
|
+
* - claude-3-opus-20240229: Most powerful for complex tasks
|
|
22
|
+
* - claude-3-sonnet-20240229: Balanced performance
|
|
23
|
+
* - claude-3-haiku-20240307: Fast and cost-effective
|
|
24
|
+
*
|
|
25
|
+
* Note: Anthropic does not provide embeddings API. Use OpenAI or Cohere for embeddings.
|
|
26
|
+
*/
|
|
27
|
+
class AnthropicProvider {
|
|
28
|
+
constructor(apiKey, endpoint) {
|
|
29
|
+
this.name = 'anthropic';
|
|
30
|
+
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY || '';
|
|
31
|
+
this.endpoint = endpoint || 'https://api.anthropic.com/v1';
|
|
32
|
+
if (!this.apiKey) {
|
|
33
|
+
core_1.default.warn('Anthropic API key not provided. Set ANTHROPIC_API_KEY environment variable.');
|
|
34
|
+
}
|
|
35
|
+
this.anthropic = new sdk_1.default({ apiKey: this.apiKey });
|
|
36
|
+
core_1.default.info('Anthropic provider initialized');
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Generate completion
|
|
40
|
+
*/
|
|
41
|
+
async complete(request) {
|
|
42
|
+
const modelName = request.model || 'claude-3-5-sonnet-20241022';
|
|
43
|
+
core_1.default.debug(`Anthropic completion request for model: ${modelName}`);
|
|
44
|
+
try {
|
|
45
|
+
// Separate system messages from conversation messages
|
|
46
|
+
const systemMessages = request.messages.filter((m) => m.role === 'system');
|
|
47
|
+
const conversationMessages = request.messages.filter((m) => m.role !== 'system');
|
|
48
|
+
const systemPrompt = systemMessages.map((m) => m.content).join('\n\n');
|
|
49
|
+
// Create message request
|
|
50
|
+
const response = await this.anthropic.messages.create({
|
|
51
|
+
model: modelName,
|
|
52
|
+
max_tokens: request.maxTokens || 4096,
|
|
53
|
+
temperature: request.temperature,
|
|
54
|
+
system: systemPrompt || undefined,
|
|
55
|
+
messages: conversationMessages.map((m) => ({
|
|
56
|
+
role: m.role,
|
|
57
|
+
content: m.content,
|
|
58
|
+
})),
|
|
59
|
+
});
|
|
60
|
+
// Extract text content
|
|
61
|
+
const textContent = response.content
|
|
62
|
+
.filter((block) => block.type === 'text')
|
|
63
|
+
.map((block) => block.text || '')
|
|
64
|
+
.join('');
|
|
65
|
+
return {
|
|
66
|
+
id: response.id,
|
|
67
|
+
content: textContent,
|
|
68
|
+
role: 'assistant',
|
|
69
|
+
model: response.model,
|
|
70
|
+
usage: {
|
|
71
|
+
promptTokens: response.usage.input_tokens,
|
|
72
|
+
completionTokens: response.usage.output_tokens,
|
|
73
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
74
|
+
},
|
|
75
|
+
finishReason: response.stop_reason || 'end_turn',
|
|
76
|
+
};
|
|
77
|
+
}
|
|
78
|
+
catch (error) {
|
|
79
|
+
core_1.default.error('Anthropic completion error:', error);
|
|
80
|
+
throw new Error(`Anthropic API error: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Generate streaming completion
|
|
85
|
+
*/
|
|
86
|
+
async *streamComplete(request) {
|
|
87
|
+
const modelName = request.model || 'claude-3-5-sonnet-20241022';
|
|
88
|
+
core_1.default.debug('Anthropic streaming completion started');
|
|
89
|
+
try {
|
|
90
|
+
// Separate system messages from conversation messages
|
|
91
|
+
const systemMessages = request.messages.filter((m) => m.role === 'system');
|
|
92
|
+
const conversationMessages = request.messages.filter((m) => m.role !== 'system');
|
|
93
|
+
const systemPrompt = systemMessages.map((m) => m.content).join('\n\n');
|
|
94
|
+
// Create streaming request
|
|
95
|
+
const stream = await this.anthropic.messages.stream({
|
|
96
|
+
model: modelName,
|
|
97
|
+
max_tokens: request.maxTokens || 4096,
|
|
98
|
+
temperature: request.temperature,
|
|
99
|
+
system: systemPrompt || undefined,
|
|
100
|
+
messages: conversationMessages.map((m) => ({
|
|
101
|
+
role: m.role,
|
|
102
|
+
content: m.content,
|
|
103
|
+
})),
|
|
104
|
+
});
|
|
105
|
+
let fullContent = '';
|
|
106
|
+
let messageId = '';
|
|
107
|
+
let inputTokens = 0;
|
|
108
|
+
let outputTokens = 0;
|
|
109
|
+
for await (const event of stream) {
|
|
110
|
+
if (event.type === 'message_start') {
|
|
111
|
+
messageId = event.message.id;
|
|
112
|
+
inputTokens = event.message.usage.input_tokens;
|
|
113
|
+
}
|
|
114
|
+
else if (event.type === 'content_block_delta') {
|
|
115
|
+
if (event.delta.type === 'text_delta') {
|
|
116
|
+
const text = event.delta.text;
|
|
117
|
+
fullContent += text;
|
|
118
|
+
yield {
|
|
119
|
+
id: messageId || `claude-stream-${Date.now()}`,
|
|
120
|
+
content: fullContent,
|
|
121
|
+
delta: text,
|
|
122
|
+
done: false,
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
else if (event.type === 'message_delta') {
|
|
127
|
+
outputTokens = event.usage.output_tokens;
|
|
128
|
+
}
|
|
129
|
+
else if (event.type === 'message_stop') {
|
|
130
|
+
yield {
|
|
131
|
+
id: messageId || `claude-stream-${Date.now()}`,
|
|
132
|
+
content: fullContent,
|
|
133
|
+
delta: '',
|
|
134
|
+
done: true,
|
|
135
|
+
usage: {
|
|
136
|
+
promptTokens: inputTokens,
|
|
137
|
+
completionTokens: outputTokens,
|
|
138
|
+
totalTokens: inputTokens + outputTokens,
|
|
139
|
+
},
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
core_1.default.debug('Anthropic streaming completed');
|
|
144
|
+
}
|
|
145
|
+
catch (error) {
|
|
146
|
+
core_1.default.error('Anthropic streaming error:', error);
|
|
147
|
+
throw new Error(`Anthropic streaming error: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
/**
|
|
151
|
+
* Generate embeddings
|
|
152
|
+
* Note: Anthropic doesn't provide embeddings API
|
|
153
|
+
*/
|
|
154
|
+
async embed(_request) {
|
|
155
|
+
throw new Error('Anthropic does not support embeddings. Use OpenAI or Cohere instead.');
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Check if provider is available
|
|
159
|
+
*/
|
|
160
|
+
async isAvailable() {
|
|
161
|
+
if (!this.apiKey) {
|
|
162
|
+
core_1.default.warn('Anthropic API key not configured');
|
|
163
|
+
return false;
|
|
164
|
+
}
|
|
165
|
+
try {
|
|
166
|
+
// Test with a minimal request using fastest model
|
|
167
|
+
await this.anthropic.messages.create({
|
|
168
|
+
model: 'claude-3-haiku-20240307',
|
|
169
|
+
max_tokens: 10,
|
|
170
|
+
messages: [{ role: 'user', content: 'test' }],
|
|
171
|
+
});
|
|
172
|
+
return true;
|
|
173
|
+
}
|
|
174
|
+
catch (error) {
|
|
175
|
+
core_1.default.error('Anthropic availability check failed:', error);
|
|
176
|
+
return false;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Get supported models
|
|
181
|
+
*/
|
|
182
|
+
getSupportedModels() {
|
|
183
|
+
return [
|
|
184
|
+
'claude-3-5-sonnet-20241022',
|
|
185
|
+
'claude-3-5-sonnet-20240620',
|
|
186
|
+
'claude-3-opus-20240229',
|
|
187
|
+
'claude-3-sonnet-20240229',
|
|
188
|
+
'claude-3-haiku-20240307',
|
|
189
|
+
'claude-2.1',
|
|
190
|
+
'claude-2.0',
|
|
191
|
+
];
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
exports.AnthropicProvider = AnthropicProvider;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.provider.test.d.ts","sourceRoot":"","sources":["../../src/providers/anthropic.provider.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
jest.mock('@hazeljs/core', () => ({
|
|
4
|
+
__esModule: true,
|
|
5
|
+
default: { info: jest.fn(), debug: jest.fn(), warn: jest.fn(), error: jest.fn() },
|
|
6
|
+
}));
|
|
7
|
+
const mockMessagesCreate = jest.fn();
|
|
8
|
+
const mockMessagesStream = jest.fn();
|
|
9
|
+
jest.mock('@anthropic-ai/sdk', () => ({
|
|
10
|
+
__esModule: true,
|
|
11
|
+
default: jest.fn().mockImplementation(() => ({
|
|
12
|
+
messages: {
|
|
13
|
+
create: mockMessagesCreate,
|
|
14
|
+
stream: mockMessagesStream,
|
|
15
|
+
},
|
|
16
|
+
})),
|
|
17
|
+
}));
|
|
18
|
+
const anthropic_provider_1 = require("./anthropic.provider");
|
|
19
|
+
const BASE_REQUEST = {
|
|
20
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
21
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
22
|
+
};
|
|
23
|
+
const MOCK_RESPONSE = {
|
|
24
|
+
id: 'msg_001',
|
|
25
|
+
content: [{ type: 'text', text: 'Hello there!' }],
|
|
26
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
27
|
+
stop_reason: 'end_turn',
|
|
28
|
+
usage: { input_tokens: 10, output_tokens: 15 },
|
|
29
|
+
};
|
|
30
|
+
describe('AnthropicProvider', () => {
|
|
31
|
+
let provider;
|
|
32
|
+
beforeEach(() => {
|
|
33
|
+
jest.clearAllMocks();
|
|
34
|
+
provider = new anthropic_provider_1.AnthropicProvider('test-api-key');
|
|
35
|
+
});
|
|
36
|
+
describe('constructor', () => {
|
|
37
|
+
it('sets name to anthropic', () => {
|
|
38
|
+
expect(provider.name).toBe('anthropic');
|
|
39
|
+
});
|
|
40
|
+
it('warns when no API key provided', () => {
|
|
41
|
+
new anthropic_provider_1.AnthropicProvider();
|
|
42
|
+
// Constructor runs without throwing
|
|
43
|
+
});
|
|
44
|
+
it('uses ANTHROPIC_API_KEY env var', () => {
|
|
45
|
+
process.env.ANTHROPIC_API_KEY = 'env-key';
|
|
46
|
+
const p = new anthropic_provider_1.AnthropicProvider();
|
|
47
|
+
expect(p).toBeDefined();
|
|
48
|
+
delete process.env.ANTHROPIC_API_KEY;
|
|
49
|
+
});
|
|
50
|
+
});
|
|
51
|
+
describe('getSupportedModels()', () => {
|
|
52
|
+
it('returns a list of claude models', () => {
|
|
53
|
+
const models = provider.getSupportedModels();
|
|
54
|
+
expect(models).toContain('claude-3-5-sonnet-20241022');
|
|
55
|
+
expect(models.length).toBeGreaterThan(0);
|
|
56
|
+
});
|
|
57
|
+
});
|
|
58
|
+
describe('complete()', () => {
|
|
59
|
+
it('returns a completion response', async () => {
|
|
60
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
61
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
62
|
+
expect(result.content).toBe('Hello there!');
|
|
63
|
+
expect(result.role).toBe('assistant');
|
|
64
|
+
expect(result.usage?.promptTokens).toBe(10);
|
|
65
|
+
expect(result.usage?.completionTokens).toBe(15);
|
|
66
|
+
expect(result.usage?.totalTokens).toBe(25);
|
|
67
|
+
expect(result.finishReason).toBe('end_turn');
|
|
68
|
+
});
|
|
69
|
+
it('uses default model when not specified', async () => {
|
|
70
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
71
|
+
await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
|
|
72
|
+
expect(mockMessagesCreate).toHaveBeenCalledWith(expect.objectContaining({ model: 'claude-3-5-sonnet-20241022' }));
|
|
73
|
+
});
|
|
74
|
+
it('passes maxTokens when specified', async () => {
|
|
75
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
76
|
+
await provider.complete({ ...BASE_REQUEST, maxTokens: 500 });
|
|
77
|
+
expect(mockMessagesCreate).toHaveBeenCalledWith(expect.objectContaining({ max_tokens: 500 }));
|
|
78
|
+
});
|
|
79
|
+
it('separates system messages from conversation', async () => {
|
|
80
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
81
|
+
await provider.complete({
|
|
82
|
+
messages: [
|
|
83
|
+
{ role: 'system', content: 'You are helpful.' },
|
|
84
|
+
{ role: 'user', content: 'Hello' },
|
|
85
|
+
],
|
|
86
|
+
});
|
|
87
|
+
expect(mockMessagesCreate).toHaveBeenCalledWith(expect.objectContaining({
|
|
88
|
+
system: 'You are helpful.',
|
|
89
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
90
|
+
}));
|
|
91
|
+
});
|
|
92
|
+
it('handles response with no stop_reason', async () => {
|
|
93
|
+
mockMessagesCreate.mockResolvedValue({ ...MOCK_RESPONSE, stop_reason: null });
|
|
94
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
95
|
+
expect(result.finishReason).toBe('end_turn');
|
|
96
|
+
});
|
|
97
|
+
it('concatenates multiple text content blocks', async () => {
|
|
98
|
+
mockMessagesCreate.mockResolvedValue({
|
|
99
|
+
...MOCK_RESPONSE,
|
|
100
|
+
content: [
|
|
101
|
+
{ type: 'text', text: 'Part 1. ' },
|
|
102
|
+
{ type: 'text', text: 'Part 2.' },
|
|
103
|
+
],
|
|
104
|
+
});
|
|
105
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
106
|
+
expect(result.content).toBe('Part 1. Part 2.');
|
|
107
|
+
});
|
|
108
|
+
it('ignores non-text content blocks', async () => {
|
|
109
|
+
mockMessagesCreate.mockResolvedValue({
|
|
110
|
+
...MOCK_RESPONSE,
|
|
111
|
+
content: [
|
|
112
|
+
{ type: 'tool_use', id: 'tool_1' },
|
|
113
|
+
{ type: 'text', text: 'Some text' },
|
|
114
|
+
],
|
|
115
|
+
});
|
|
116
|
+
const result = await provider.complete(BASE_REQUEST);
|
|
117
|
+
expect(result.content).toBe('Some text');
|
|
118
|
+
});
|
|
119
|
+
it('throws wrapped error on API failure', async () => {
|
|
120
|
+
mockMessagesCreate.mockRejectedValue(new Error('Rate limit exceeded'));
|
|
121
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow('Anthropic API error: Rate limit exceeded');
|
|
122
|
+
});
|
|
123
|
+
it('handles non-Error thrown objects', async () => {
|
|
124
|
+
mockMessagesCreate.mockRejectedValue('string error');
|
|
125
|
+
await expect(provider.complete(BASE_REQUEST)).rejects.toThrow('Anthropic API error: Unknown error');
|
|
126
|
+
});
|
|
127
|
+
it('sets undefined system when no system messages', async () => {
|
|
128
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
129
|
+
await provider.complete({ messages: [{ role: 'user', content: 'hi' }] });
|
|
130
|
+
expect(mockMessagesCreate).toHaveBeenCalledWith(expect.objectContaining({ system: undefined }));
|
|
131
|
+
});
|
|
132
|
+
});
|
|
133
|
+
describe('streamComplete()', () => {
|
|
134
|
+
function makeStreamEvents(events) {
|
|
135
|
+
return (async function* () {
|
|
136
|
+
for (const ev of events) {
|
|
137
|
+
yield ev;
|
|
138
|
+
}
|
|
139
|
+
})();
|
|
140
|
+
}
|
|
141
|
+
it('yields chunks for message_start and content_block_delta events', async () => {
|
|
142
|
+
mockMessagesStream.mockReturnValue(makeStreamEvents([
|
|
143
|
+
{ type: 'message_start', message: { id: 'msg_1', usage: { input_tokens: 5 } } },
|
|
144
|
+
{ type: 'content_block_delta', delta: { type: 'text_delta', text: 'Hello' } },
|
|
145
|
+
{ type: 'content_block_delta', delta: { type: 'text_delta', text: ' world' } },
|
|
146
|
+
{ type: 'message_delta', usage: { output_tokens: 10 } },
|
|
147
|
+
{ type: 'message_stop' },
|
|
148
|
+
]));
|
|
149
|
+
const results = [];
|
|
150
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
151
|
+
results.push(chunk);
|
|
152
|
+
}
|
|
153
|
+
// 2 content deltas + 1 message_stop
|
|
154
|
+
expect(results.length).toBeGreaterThan(0);
|
|
155
|
+
});
|
|
156
|
+
it('includes usage in final chunk', async () => {
|
|
157
|
+
mockMessagesStream.mockReturnValue(makeStreamEvents([
|
|
158
|
+
{ type: 'message_start', message: { id: 'msg_2', usage: { input_tokens: 8 } } },
|
|
159
|
+
{ type: 'message_delta', usage: { output_tokens: 12 } },
|
|
160
|
+
{ type: 'message_stop' },
|
|
161
|
+
]));
|
|
162
|
+
const results = [];
|
|
163
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
164
|
+
results.push(chunk);
|
|
165
|
+
}
|
|
166
|
+
const finalChunk = results[results.length - 1];
|
|
167
|
+
expect(finalChunk.done).toBe(true);
|
|
168
|
+
expect(finalChunk.usage).toBeDefined();
|
|
169
|
+
});
|
|
170
|
+
it('ignores non-text_delta content blocks', async () => {
|
|
171
|
+
mockMessagesStream.mockReturnValue(makeStreamEvents([
|
|
172
|
+
{ type: 'content_block_delta', delta: { type: 'input_json_delta', partial_json: '{}' } },
|
|
173
|
+
{ type: 'message_stop' },
|
|
174
|
+
]));
|
|
175
|
+
const results = [];
|
|
176
|
+
for await (const chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
177
|
+
results.push(chunk);
|
|
178
|
+
}
|
|
179
|
+
// Only message_stop chunk
|
|
180
|
+
expect(results).toHaveLength(1);
|
|
181
|
+
});
|
|
182
|
+
it('uses default model for streaming', async () => {
|
|
183
|
+
mockMessagesStream.mockReturnValue(makeStreamEvents([{ type: 'message_stop' }]));
|
|
184
|
+
for await (const _chunk of provider.streamComplete({
|
|
185
|
+
messages: [{ role: 'user', content: 'hi' }],
|
|
186
|
+
})) {
|
|
187
|
+
// consume
|
|
188
|
+
}
|
|
189
|
+
expect(mockMessagesStream).toHaveBeenCalledWith(expect.objectContaining({ model: 'claude-3-5-sonnet-20241022' }));
|
|
190
|
+
});
|
|
191
|
+
it('throws wrapped error on streaming failure', async () => {
|
|
192
|
+
mockMessagesStream.mockImplementation(async function* () {
|
|
193
|
+
throw new Error('Stream error');
|
|
194
|
+
yield { type: 'message_stop' };
|
|
195
|
+
});
|
|
196
|
+
await expect(async () => {
|
|
197
|
+
for await (const _chunk of provider.streamComplete(BASE_REQUEST)) {
|
|
198
|
+
// consume
|
|
199
|
+
}
|
|
200
|
+
}).rejects.toThrow('Anthropic streaming error: Stream error');
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
describe('embed()', () => {
|
|
204
|
+
it('throws not supported error', async () => {
|
|
205
|
+
await expect(provider.embed({ input: 'test' })).rejects.toThrow('Anthropic does not support embeddings');
|
|
206
|
+
});
|
|
207
|
+
});
|
|
208
|
+
describe('isAvailable()', () => {
|
|
209
|
+
it('returns false when no API key', async () => {
|
|
210
|
+
const p = new anthropic_provider_1.AnthropicProvider('');
|
|
211
|
+
expect(await p.isAvailable()).toBe(false);
|
|
212
|
+
});
|
|
213
|
+
it('returns true when API responds successfully', async () => {
|
|
214
|
+
mockMessagesCreate.mockResolvedValue(MOCK_RESPONSE);
|
|
215
|
+
expect(await provider.isAvailable()).toBe(true);
|
|
216
|
+
});
|
|
217
|
+
it('returns false on API error', async () => {
|
|
218
|
+
mockMessagesCreate.mockRejectedValue(new Error('Unauthorized'));
|
|
219
|
+
expect(await provider.isAvailable()).toBe(false);
|
|
220
|
+
});
|
|
221
|
+
});
|
|
222
|
+
});
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { IAIProvider, AIProvider, AICompletionRequest, AICompletionResponse, AIStreamChunk, AIEmbeddingRequest, AIEmbeddingResponse } from '../ai-enhanced.types';
|
|
2
|
+
/**
|
|
3
|
+
* Cohere AI Provider
|
|
4
|
+
*
|
|
5
|
+
* Production-ready implementation using Cohere AI SDK.
|
|
6
|
+
*
|
|
7
|
+
* Setup:
|
|
8
|
+
* 1. Install the SDK: `npm install cohere-ai`
|
|
9
|
+
* 2. Set COHERE_API_KEY environment variable
|
|
10
|
+
* 3. Use the provider in your application
|
|
11
|
+
*
|
|
12
|
+
* Supported models:
|
|
13
|
+
* - command-r-plus: Most powerful model for complex tasks
|
|
14
|
+
* - command-r: Balanced performance and cost
|
|
15
|
+
* - command: Standard text generation
|
|
16
|
+
* - command-light: Fast, cost-effective model
|
|
17
|
+
* - embed-english-v3.0: English text embeddings
|
|
18
|
+
* - embed-multilingual-v3.0: Multilingual embeddings
|
|
19
|
+
* - rerank-english-v3.0: Document reranking
|
|
20
|
+
*/
|
|
21
|
+
export declare class CohereProvider implements IAIProvider {
|
|
22
|
+
readonly name: AIProvider;
|
|
23
|
+
private apiKey;
|
|
24
|
+
private cohere;
|
|
25
|
+
private endpoint;
|
|
26
|
+
constructor(apiKey?: string, endpoint?: string);
|
|
27
|
+
/**
|
|
28
|
+
* Generate completion
|
|
29
|
+
*/
|
|
30
|
+
complete(request: AICompletionRequest): Promise<AICompletionResponse>;
|
|
31
|
+
/**
|
|
32
|
+
* Generate streaming completion
|
|
33
|
+
*/
|
|
34
|
+
streamComplete(request: AICompletionRequest): AsyncGenerator<AIStreamChunk>;
|
|
35
|
+
/**
|
|
36
|
+
* Generate embeddings
|
|
37
|
+
*/
|
|
38
|
+
embed(request: AIEmbeddingRequest): Promise<AIEmbeddingResponse>;
|
|
39
|
+
/**
|
|
40
|
+
* Check if provider is available
|
|
41
|
+
*/
|
|
42
|
+
isAvailable(): Promise<boolean>;
|
|
43
|
+
/**
|
|
44
|
+
* Get supported models
|
|
45
|
+
*/
|
|
46
|
+
getSupportedModels(): string[];
|
|
47
|
+
/**
|
|
48
|
+
* Rerank documents (Cohere-specific feature)
|
|
49
|
+
* Useful for RAG applications to improve retrieval quality
|
|
50
|
+
*/
|
|
51
|
+
rerank(query: string, documents: string[], topN?: number, model?: string): Promise<Array<{
|
|
52
|
+
index: number;
|
|
53
|
+
score: number;
|
|
54
|
+
document: string;
|
|
55
|
+
}>>;
|
|
56
|
+
}
|
|
57
|
+
//# sourceMappingURL=cohere.provider.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"cohere.provider.d.ts","sourceRoot":"","sources":["../../src/providers/cohere.provider.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,WAAW,EACX,UAAU,EACV,mBAAmB,EACnB,oBAAoB,EACpB,aAAa,EACb,kBAAkB,EAClB,mBAAmB,EACpB,MAAM,sBAAsB,CAAC;AAI9B;;;;;;;;;;;;;;;;;;GAkBG;AACH,qBAAa,cAAe,YAAW,WAAW;IAChD,QAAQ,CAAC,IAAI,EAAE,UAAU,CAAY;IACrC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,MAAM,CAAe;IAC7B,OAAO,CAAC,QAAQ,CAAS;gBAEb,MAAM,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,MAAM;IAY9C;;OAEG;IACG,QAAQ,CAAC,OAAO,EAAE,mBAAmB,GAAG,OAAO,CAAC,oBAAoB,CAAC;IAuC3E;;OAEG;IACI,cAAc,CAAC,OAAO,EAAE,mBAAmB,GAAG,cAAc,CAAC,aAAa,CAAC;IAgElF;;OAEG;IACG,KAAK,CAAC,OAAO,EAAE,kBAAkB,GAAG,OAAO,CAAC,mBAAmB,CAAC;IAsCtE;;OAEG;IACG,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC;IAoBrC;;OAEG;IACH,kBAAkB,IAAI,MAAM,EAAE;IAgB9B;;;OAGG;IACG,MAAM,CACV,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,EAAE,EACnB,IAAI,CAAC,EAAE,MAAM,EACb,KAAK,CAAC,EAAE,MAAM,GACb,OAAO,CAAC,KAAK,CAAC;QAAE,KAAK,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,MAAM,CAAC;QAAC,QAAQ,EAAE,MAAM,CAAA;KAAE,CAAC,CAAC;CAwBtE"}
|