@compilr-dev/agents 0.3.2 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent.d.ts +28 -1
- package/dist/agent.js +12 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.js +6 -0
- package/dist/providers/claude.d.ts +27 -0
- package/dist/providers/claude.js +53 -3
- package/dist/providers/fireworks.d.ts +86 -0
- package/dist/providers/fireworks.js +123 -0
- package/dist/providers/gemini-native.js +61 -26
- package/dist/providers/groq.d.ts +86 -0
- package/dist/providers/groq.js +123 -0
- package/dist/providers/index.d.ts +10 -0
- package/dist/providers/index.js +6 -0
- package/dist/providers/openai-compatible.js +12 -1
- package/dist/providers/openrouter.d.ts +95 -0
- package/dist/providers/openrouter.js +138 -0
- package/dist/providers/perplexity.d.ts +86 -0
- package/dist/providers/perplexity.js +123 -0
- package/dist/providers/together.d.ts +86 -0
- package/dist/providers/together.js +123 -0
- package/dist/providers/types.d.ts +20 -0
- package/dist/tools/builtin/ask-user-simple.js +1 -0
- package/dist/tools/builtin/ask-user.js +1 -0
- package/dist/tools/builtin/bash.js +123 -2
- package/dist/tools/builtin/shell-manager.d.ts +15 -0
- package/dist/tools/builtin/shell-manager.js +51 -0
- package/dist/tools/builtin/suggest.js +1 -0
- package/dist/tools/builtin/todo.js +2 -0
- package/dist/tools/define.d.ts +6 -0
- package/dist/tools/define.js +1 -0
- package/dist/tools/types.d.ts +19 -0
- package/package.json +6 -3
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Groq LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Groq models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createGroqProvider({
|
|
10
|
+
* model: 'llama-3.2-90b-vision-preview',
|
|
11
|
+
* apiKey: process.env.GROQ_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Groq API key
|
|
17
|
+
* - Default model is llama-3.2-8b-preview
|
|
18
|
+
* - Known for extremely fast inference speeds
|
|
19
|
+
*/
|
|
20
|
+
import type { ChatOptions } from './types.js';
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for GroqProvider
|
|
25
|
+
*/
|
|
26
|
+
export interface GroqProviderConfig {
|
|
27
|
+
/** Groq API key (falls back to GROQ_API_KEY env var) */
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
/** Base URL for Groq API (default: https://api.groq.com/openai) */
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
/** Default model to use (default: llama-3.2-8b-preview) */
|
|
32
|
+
model?: string;
|
|
33
|
+
/** Default max tokens (default: 4096) */
|
|
34
|
+
maxTokens?: number;
|
|
35
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
36
|
+
timeout?: number;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Groq LLM Provider
|
|
40
|
+
*
|
|
41
|
+
* Provides streaming chat completion using Groq's ultra-fast inference.
|
|
42
|
+
* Supports Llama, Mixtral, and other models optimized for speed.
|
|
43
|
+
*/
|
|
44
|
+
export declare class GroqProvider extends OpenAICompatibleProvider {
|
|
45
|
+
readonly name = "groq";
|
|
46
|
+
private readonly apiKey;
|
|
47
|
+
constructor(config?: GroqProviderConfig);
|
|
48
|
+
/**
|
|
49
|
+
* Groq authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
52
|
+
/**
|
|
53
|
+
* Groq chat completions endpoint (OpenAI-compatible)
|
|
54
|
+
*/
|
|
55
|
+
protected getEndpointPath(): string;
|
|
56
|
+
/**
|
|
57
|
+
* Groq uses standard OpenAI body format
|
|
58
|
+
*/
|
|
59
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
60
|
+
/**
|
|
61
|
+
* Map HTTP errors with Groq-specific messages
|
|
62
|
+
*/
|
|
63
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
64
|
+
/**
|
|
65
|
+
* Map connection errors with Groq-specific messages
|
|
66
|
+
*/
|
|
67
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Create a Groq provider instance
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* // Using environment variable (GROQ_API_KEY)
|
|
75
|
+
* const provider = createGroqProvider();
|
|
76
|
+
*
|
|
77
|
+
* // With explicit API key
|
|
78
|
+
* const provider = createGroqProvider({ apiKey: 'gsk_...' });
|
|
79
|
+
*
|
|
80
|
+
* // With custom model
|
|
81
|
+
* const provider = createGroqProvider({
|
|
82
|
+
* model: 'llama-3.2-90b-vision-preview'
|
|
83
|
+
* });
|
|
84
|
+
* ```
|
|
85
|
+
*/
|
|
86
|
+
export declare function createGroqProvider(config?: GroqProviderConfig): GroqProvider;
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Groq LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Groq models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createGroqProvider({
|
|
10
|
+
* model: 'llama-3.2-90b-vision-preview',
|
|
11
|
+
* apiKey: process.env.GROQ_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Groq API key
|
|
17
|
+
* - Default model is llama-3.2-8b-preview
|
|
18
|
+
* - Known for extremely fast inference speeds
|
|
19
|
+
*/
|
|
20
|
+
import { ProviderError } from '../errors.js';
|
|
21
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
22
|
+
// Default configuration
|
|
23
|
+
const DEFAULT_MODEL = 'llama-3.2-8b-preview';
|
|
24
|
+
const DEFAULT_BASE_URL = 'https://api.groq.com/openai';
|
|
25
|
+
/**
|
|
26
|
+
* Groq LLM Provider
|
|
27
|
+
*
|
|
28
|
+
* Provides streaming chat completion using Groq's ultra-fast inference.
|
|
29
|
+
* Supports Llama, Mixtral, and other models optimized for speed.
|
|
30
|
+
*/
|
|
31
|
+
export class GroqProvider extends OpenAICompatibleProvider {
|
|
32
|
+
name = 'groq';
|
|
33
|
+
apiKey;
|
|
34
|
+
constructor(config = {}) {
|
|
35
|
+
const apiKey = config.apiKey ?? process.env.GROQ_API_KEY;
|
|
36
|
+
if (!apiKey) {
|
|
37
|
+
throw new ProviderError('Groq API key not found. Set GROQ_API_KEY environment variable or pass apiKey in config.', 'groq');
|
|
38
|
+
}
|
|
39
|
+
const baseConfig = {
|
|
40
|
+
baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
|
|
41
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
42
|
+
maxTokens: config.maxTokens,
|
|
43
|
+
timeout: config.timeout,
|
|
44
|
+
};
|
|
45
|
+
super(baseConfig);
|
|
46
|
+
this.apiKey = apiKey;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Groq authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
getAuthHeaders() {
|
|
52
|
+
return {
|
|
53
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Groq chat completions endpoint (OpenAI-compatible)
|
|
58
|
+
*/
|
|
59
|
+
getEndpointPath() {
|
|
60
|
+
return '/v1/chat/completions';
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Groq uses standard OpenAI body format
|
|
64
|
+
*/
|
|
65
|
+
buildProviderSpecificBody(_options) {
|
|
66
|
+
return {};
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Map HTTP errors with Groq-specific messages
|
|
70
|
+
*/
|
|
71
|
+
mapHttpError(status, body, _model) {
|
|
72
|
+
let message = `Groq error (${String(status)})`;
|
|
73
|
+
try {
|
|
74
|
+
const parsed = JSON.parse(body);
|
|
75
|
+
if (parsed.error?.message) {
|
|
76
|
+
message = parsed.error.message;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
message = body || message;
|
|
81
|
+
}
|
|
82
|
+
switch (status) {
|
|
83
|
+
case 401:
|
|
84
|
+
return new ProviderError('Invalid Groq API key. Check your GROQ_API_KEY.', 'groq', 401);
|
|
85
|
+
case 403:
|
|
86
|
+
return new ProviderError('Access denied. Check your Groq API key permissions.', 'groq', 403);
|
|
87
|
+
case 429:
|
|
88
|
+
return new ProviderError('Groq rate limit exceeded. Please wait and try again.', 'groq', 429);
|
|
89
|
+
case 500:
|
|
90
|
+
case 502:
|
|
91
|
+
case 503:
|
|
92
|
+
return new ProviderError('Groq service temporarily unavailable. Please try again later.', 'groq', status);
|
|
93
|
+
default:
|
|
94
|
+
return new ProviderError(message, 'groq', status);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Map connection errors with Groq-specific messages
|
|
99
|
+
*/
|
|
100
|
+
mapConnectionError(_error) {
|
|
101
|
+
return new ProviderError('Failed to connect to Groq API. Check your internet connection.', 'groq');
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Create a Groq provider instance
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* ```typescript
|
|
109
|
+
* // Using environment variable (GROQ_API_KEY)
|
|
110
|
+
* const provider = createGroqProvider();
|
|
111
|
+
*
|
|
112
|
+
* // With explicit API key
|
|
113
|
+
* const provider = createGroqProvider({ apiKey: 'gsk_...' });
|
|
114
|
+
*
|
|
115
|
+
* // With custom model
|
|
116
|
+
* const provider = createGroqProvider({
|
|
117
|
+
* model: 'llama-3.2-90b-vision-preview'
|
|
118
|
+
* });
|
|
119
|
+
* ```
|
|
120
|
+
*/
|
|
121
|
+
export function createGroqProvider(config = {}) {
|
|
122
|
+
return new GroqProvider(config);
|
|
123
|
+
}
|
|
@@ -19,3 +19,13 @@ export type { GeminiNativeProviderConfig } from './gemini-native.js';
|
|
|
19
19
|
export { GeminiNativeProvider as GeminiProvider } from './gemini-native.js';
|
|
20
20
|
export { createGeminiNativeProvider as createGeminiProvider } from './gemini-native.js';
|
|
21
21
|
export type { GeminiNativeProviderConfig as GeminiProviderConfig } from './gemini-native.js';
|
|
22
|
+
export { TogetherProvider, createTogetherProvider } from './together.js';
|
|
23
|
+
export type { TogetherProviderConfig } from './together.js';
|
|
24
|
+
export { GroqProvider, createGroqProvider } from './groq.js';
|
|
25
|
+
export type { GroqProviderConfig } from './groq.js';
|
|
26
|
+
export { FireworksProvider, createFireworksProvider } from './fireworks.js';
|
|
27
|
+
export type { FireworksProviderConfig } from './fireworks.js';
|
|
28
|
+
export { PerplexityProvider, createPerplexityProvider } from './perplexity.js';
|
|
29
|
+
export type { PerplexityProviderConfig } from './perplexity.js';
|
|
30
|
+
export { OpenRouterProvider, createOpenRouterProvider } from './openrouter.js';
|
|
31
|
+
export type { OpenRouterProviderConfig } from './openrouter.js';
|
package/dist/providers/index.js
CHANGED
|
@@ -18,3 +18,9 @@ export { GeminiNativeProvider, createGeminiNativeProvider } from './gemini-nativ
|
|
|
18
18
|
// Re-export native as default Gemini provider
|
|
19
19
|
export { GeminiNativeProvider as GeminiProvider } from './gemini-native.js';
|
|
20
20
|
export { createGeminiNativeProvider as createGeminiProvider } from './gemini-native.js';
|
|
21
|
+
// "Others" providers - OpenAI-compatible cloud APIs
|
|
22
|
+
export { TogetherProvider, createTogetherProvider } from './together.js';
|
|
23
|
+
export { GroqProvider, createGroqProvider } from './groq.js';
|
|
24
|
+
export { FireworksProvider, createFireworksProvider } from './fireworks.js';
|
|
25
|
+
export { PerplexityProvider, createPerplexityProvider } from './perplexity.js';
|
|
26
|
+
export { OpenRouterProvider, createOpenRouterProvider } from './openrouter.js';
|
|
@@ -57,6 +57,15 @@ export class OpenAICompatibleProvider {
|
|
|
57
57
|
const openaiMessages = this.convertMessages(messages);
|
|
58
58
|
// Convert tools if provided
|
|
59
59
|
const tools = options?.tools ? this.convertTools(options.tools) : undefined;
|
|
60
|
+
// Calculate payload sizes for debugging
|
|
61
|
+
// Note: OpenAI format has system message in messages array, not separate
|
|
62
|
+
const systemMsg = openaiMessages.find((m) => m.role === 'system');
|
|
63
|
+
const systemChars = systemMsg && typeof systemMsg.content === 'string' ? systemMsg.content.length : 0;
|
|
64
|
+
const debugPayload = {
|
|
65
|
+
systemChars,
|
|
66
|
+
contentsChars: JSON.stringify(openaiMessages).length,
|
|
67
|
+
toolsChars: tools ? JSON.stringify(tools).length : 0,
|
|
68
|
+
};
|
|
60
69
|
// Build request body
|
|
61
70
|
const body = {
|
|
62
71
|
model,
|
|
@@ -143,7 +152,9 @@ export class OpenAICompatibleProvider {
|
|
|
143
152
|
// Yield done chunk with usage
|
|
144
153
|
yield {
|
|
145
154
|
type: 'done',
|
|
146
|
-
usage
|
|
155
|
+
usage: usage
|
|
156
|
+
? { ...usage, debugPayload }
|
|
157
|
+
: { inputTokens: 0, outputTokens: 0, debugPayload },
|
|
147
158
|
};
|
|
148
159
|
}
|
|
149
160
|
catch (error) {
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenRouter LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for OpenRouter's unified API.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createOpenRouterProvider({
|
|
10
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
11
|
+
* apiKey: process.env.OPENROUTER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid OpenRouter API key
|
|
17
|
+
* - Default model is meta-llama/llama-3.1-8b-instruct
|
|
18
|
+
* - Provides access to 100+ models from multiple providers
|
|
19
|
+
* - Includes custom headers for ranking and attribution
|
|
20
|
+
*/
|
|
21
|
+
import type { ChatOptions } from './types.js';
|
|
22
|
+
import { ProviderError } from '../errors.js';
|
|
23
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
24
|
+
/**
|
|
25
|
+
* Configuration for OpenRouterProvider
|
|
26
|
+
*/
|
|
27
|
+
export interface OpenRouterProviderConfig {
|
|
28
|
+
/** OpenRouter API key (falls back to OPENROUTER_API_KEY env var) */
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
/** Base URL for OpenRouter API (default: https://openrouter.ai/api) */
|
|
31
|
+
baseUrl?: string;
|
|
32
|
+
/** Default model to use (default: meta-llama/llama-3.1-8b-instruct) */
|
|
33
|
+
model?: string;
|
|
34
|
+
/** Default max tokens (default: 4096) */
|
|
35
|
+
maxTokens?: number;
|
|
36
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
37
|
+
timeout?: number;
|
|
38
|
+
/** Site URL for OpenRouter rankings (optional) */
|
|
39
|
+
siteUrl?: string;
|
|
40
|
+
/** Site name for OpenRouter rankings (optional) */
|
|
41
|
+
siteName?: string;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* OpenRouter LLM Provider
|
|
45
|
+
*
|
|
46
|
+
* Provides streaming chat completion via OpenRouter's unified API.
|
|
47
|
+
* Access 100+ models from Claude, GPT, Llama, Mistral, and more.
|
|
48
|
+
*/
|
|
49
|
+
export declare class OpenRouterProvider extends OpenAICompatibleProvider {
|
|
50
|
+
readonly name = "openrouter";
|
|
51
|
+
private readonly apiKey;
|
|
52
|
+
private readonly siteUrl?;
|
|
53
|
+
private readonly siteName?;
|
|
54
|
+
constructor(config?: OpenRouterProviderConfig);
|
|
55
|
+
/**
|
|
56
|
+
* OpenRouter authentication with Bearer token and custom headers
|
|
57
|
+
*/
|
|
58
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
59
|
+
/**
|
|
60
|
+
* OpenRouter chat completions endpoint (OpenAI-compatible)
|
|
61
|
+
*/
|
|
62
|
+
protected getEndpointPath(): string;
|
|
63
|
+
/**
|
|
64
|
+
* OpenRouter uses standard OpenAI body format
|
|
65
|
+
*/
|
|
66
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
67
|
+
/**
|
|
68
|
+
* Map HTTP errors with OpenRouter-specific messages
|
|
69
|
+
*/
|
|
70
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
71
|
+
/**
|
|
72
|
+
* Map connection errors with OpenRouter-specific messages
|
|
73
|
+
*/
|
|
74
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Create an OpenRouter provider instance
|
|
78
|
+
*
|
|
79
|
+
* @example
|
|
80
|
+
* ```typescript
|
|
81
|
+
* // Using environment variable (OPENROUTER_API_KEY)
|
|
82
|
+
* const provider = createOpenRouterProvider();
|
|
83
|
+
*
|
|
84
|
+
* // With explicit API key
|
|
85
|
+
* const provider = createOpenRouterProvider({ apiKey: 'sk-or-...' });
|
|
86
|
+
*
|
|
87
|
+
* // With custom model and site attribution
|
|
88
|
+
* const provider = createOpenRouterProvider({
|
|
89
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
90
|
+
* siteUrl: 'https://myapp.com',
|
|
91
|
+
* siteName: 'My App'
|
|
92
|
+
* });
|
|
93
|
+
* ```
|
|
94
|
+
*/
|
|
95
|
+
export declare function createOpenRouterProvider(config?: OpenRouterProviderConfig): OpenRouterProvider;
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenRouter LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for OpenRouter's unified API.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createOpenRouterProvider({
|
|
10
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
11
|
+
* apiKey: process.env.OPENROUTER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid OpenRouter API key
|
|
17
|
+
* - Default model is meta-llama/llama-3.1-8b-instruct
|
|
18
|
+
* - Provides access to 100+ models from multiple providers
|
|
19
|
+
* - Includes custom headers for ranking and attribution
|
|
20
|
+
*/
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
// Default configuration
|
|
24
|
+
const DEFAULT_MODEL = 'meta-llama/llama-3.1-8b-instruct';
|
|
25
|
+
const DEFAULT_BASE_URL = 'https://openrouter.ai/api';
|
|
26
|
+
/**
|
|
27
|
+
* OpenRouter LLM Provider
|
|
28
|
+
*
|
|
29
|
+
* Provides streaming chat completion via OpenRouter's unified API.
|
|
30
|
+
* Access 100+ models from Claude, GPT, Llama, Mistral, and more.
|
|
31
|
+
*/
|
|
32
|
+
export class OpenRouterProvider extends OpenAICompatibleProvider {
|
|
33
|
+
name = 'openrouter';
|
|
34
|
+
apiKey;
|
|
35
|
+
siteUrl;
|
|
36
|
+
siteName;
|
|
37
|
+
constructor(config = {}) {
|
|
38
|
+
const apiKey = config.apiKey ?? process.env.OPENROUTER_API_KEY;
|
|
39
|
+
if (!apiKey) {
|
|
40
|
+
throw new ProviderError('OpenRouter API key not found. Set OPENROUTER_API_KEY environment variable or pass apiKey in config.', 'openrouter');
|
|
41
|
+
}
|
|
42
|
+
const baseConfig = {
|
|
43
|
+
baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
|
|
44
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
45
|
+
maxTokens: config.maxTokens,
|
|
46
|
+
timeout: config.timeout,
|
|
47
|
+
};
|
|
48
|
+
super(baseConfig);
|
|
49
|
+
this.apiKey = apiKey;
|
|
50
|
+
this.siteUrl = config.siteUrl;
|
|
51
|
+
this.siteName = config.siteName;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* OpenRouter authentication with Bearer token and custom headers
|
|
55
|
+
*/
|
|
56
|
+
getAuthHeaders() {
|
|
57
|
+
const headers = {
|
|
58
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
59
|
+
};
|
|
60
|
+
// OpenRouter-specific headers for ranking and attribution
|
|
61
|
+
if (this.siteUrl) {
|
|
62
|
+
headers['HTTP-Referer'] = this.siteUrl;
|
|
63
|
+
}
|
|
64
|
+
if (this.siteName) {
|
|
65
|
+
headers['X-Title'] = this.siteName;
|
|
66
|
+
}
|
|
67
|
+
return headers;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* OpenRouter chat completions endpoint (OpenAI-compatible)
|
|
71
|
+
*/
|
|
72
|
+
getEndpointPath() {
|
|
73
|
+
return '/v1/chat/completions';
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* OpenRouter uses standard OpenAI body format
|
|
77
|
+
*/
|
|
78
|
+
buildProviderSpecificBody(_options) {
|
|
79
|
+
return {};
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Map HTTP errors with OpenRouter-specific messages
|
|
83
|
+
*/
|
|
84
|
+
mapHttpError(status, body, _model) {
|
|
85
|
+
let message = `OpenRouter error (${String(status)})`;
|
|
86
|
+
try {
|
|
87
|
+
const parsed = JSON.parse(body);
|
|
88
|
+
if (parsed.error?.message) {
|
|
89
|
+
message = parsed.error.message;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
catch {
|
|
93
|
+
message = body || message;
|
|
94
|
+
}
|
|
95
|
+
switch (status) {
|
|
96
|
+
case 401:
|
|
97
|
+
return new ProviderError('Invalid OpenRouter API key. Check your OPENROUTER_API_KEY.', 'openrouter', 401);
|
|
98
|
+
case 403:
|
|
99
|
+
return new ProviderError('Access denied. Check your OpenRouter API key permissions or credits.', 'openrouter', 403);
|
|
100
|
+
case 429:
|
|
101
|
+
return new ProviderError('OpenRouter rate limit exceeded. Please wait and try again.', 'openrouter', 429);
|
|
102
|
+
case 500:
|
|
103
|
+
case 502:
|
|
104
|
+
case 503:
|
|
105
|
+
return new ProviderError('OpenRouter service temporarily unavailable. Please try again later.', 'openrouter', status);
|
|
106
|
+
default:
|
|
107
|
+
return new ProviderError(message, 'openrouter', status);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Map connection errors with OpenRouter-specific messages
|
|
112
|
+
*/
|
|
113
|
+
mapConnectionError(_error) {
|
|
114
|
+
return new ProviderError('Failed to connect to OpenRouter API. Check your internet connection.', 'openrouter');
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
/**
|
|
118
|
+
* Create an OpenRouter provider instance
|
|
119
|
+
*
|
|
120
|
+
* @example
|
|
121
|
+
* ```typescript
|
|
122
|
+
* // Using environment variable (OPENROUTER_API_KEY)
|
|
123
|
+
* const provider = createOpenRouterProvider();
|
|
124
|
+
*
|
|
125
|
+
* // With explicit API key
|
|
126
|
+
* const provider = createOpenRouterProvider({ apiKey: 'sk-or-...' });
|
|
127
|
+
*
|
|
128
|
+
* // With custom model and site attribution
|
|
129
|
+
* const provider = createOpenRouterProvider({
|
|
130
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
131
|
+
* siteUrl: 'https://myapp.com',
|
|
132
|
+
* siteName: 'My App'
|
|
133
|
+
* });
|
|
134
|
+
* ```
|
|
135
|
+
*/
|
|
136
|
+
export function createOpenRouterProvider(config = {}) {
|
|
137
|
+
return new OpenRouterProvider(config);
|
|
138
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Perplexity LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Perplexity models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createPerplexityProvider({
|
|
10
|
+
* model: 'llama-3.1-sonar-large-128k-online',
|
|
11
|
+
* apiKey: process.env.PERPLEXITY_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Perplexity API key
|
|
17
|
+
* - Default model is llama-3.1-sonar-small-128k-online
|
|
18
|
+
* - Sonar models have built-in web search capabilities
|
|
19
|
+
*/
|
|
20
|
+
import type { ChatOptions } from './types.js';
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for PerplexityProvider
|
|
25
|
+
*/
|
|
26
|
+
export interface PerplexityProviderConfig {
|
|
27
|
+
/** Perplexity API key (falls back to PERPLEXITY_API_KEY env var) */
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
/** Base URL for Perplexity API (default: https://api.perplexity.ai) */
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
/** Default model to use (default: llama-3.1-sonar-small-128k-online) */
|
|
32
|
+
model?: string;
|
|
33
|
+
/** Default max tokens (default: 4096) */
|
|
34
|
+
maxTokens?: number;
|
|
35
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
36
|
+
timeout?: number;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Perplexity LLM Provider
|
|
40
|
+
*
|
|
41
|
+
* Provides streaming chat completion using Perplexity's API.
|
|
42
|
+
* Sonar models include real-time web search capabilities.
|
|
43
|
+
*/
|
|
44
|
+
export declare class PerplexityProvider extends OpenAICompatibleProvider {
|
|
45
|
+
readonly name = "perplexity";
|
|
46
|
+
private readonly apiKey;
|
|
47
|
+
constructor(config?: PerplexityProviderConfig);
|
|
48
|
+
/**
|
|
49
|
+
* Perplexity authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
52
|
+
/**
|
|
53
|
+
* Perplexity chat completions endpoint (OpenAI-compatible)
|
|
54
|
+
*/
|
|
55
|
+
protected getEndpointPath(): string;
|
|
56
|
+
/**
|
|
57
|
+
* Perplexity uses standard OpenAI body format
|
|
58
|
+
*/
|
|
59
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
60
|
+
/**
|
|
61
|
+
* Map HTTP errors with Perplexity-specific messages
|
|
62
|
+
*/
|
|
63
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
64
|
+
/**
|
|
65
|
+
* Map connection errors with Perplexity-specific messages
|
|
66
|
+
*/
|
|
67
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Create a Perplexity provider instance
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* // Using environment variable (PERPLEXITY_API_KEY)
|
|
75
|
+
* const provider = createPerplexityProvider();
|
|
76
|
+
*
|
|
77
|
+
* // With explicit API key
|
|
78
|
+
* const provider = createPerplexityProvider({ apiKey: 'pplx-...' });
|
|
79
|
+
*
|
|
80
|
+
* // With custom model (sonar models have web search)
|
|
81
|
+
* const provider = createPerplexityProvider({
|
|
82
|
+
* model: 'llama-3.1-sonar-large-128k-online'
|
|
83
|
+
* });
|
|
84
|
+
* ```
|
|
85
|
+
*/
|
|
86
|
+
export declare function createPerplexityProvider(config?: PerplexityProviderConfig): PerplexityProvider;
|