@compilr-dev/agents 0.3.1 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent.d.ts +107 -2
- package/dist/agent.js +151 -22
- package/dist/context/manager.d.ts +8 -0
- package/dist/context/manager.js +25 -2
- package/dist/errors.d.ts +20 -1
- package/dist/errors.js +44 -2
- package/dist/index.d.ts +16 -1
- package/dist/index.js +13 -1
- package/dist/messages/index.d.ts +12 -5
- package/dist/messages/index.js +53 -15
- package/dist/providers/claude.js +7 -0
- package/dist/providers/fireworks.d.ts +86 -0
- package/dist/providers/fireworks.js +123 -0
- package/dist/providers/gemini-native.d.ts +86 -0
- package/dist/providers/gemini-native.js +374 -0
- package/dist/providers/groq.d.ts +86 -0
- package/dist/providers/groq.js +123 -0
- package/dist/providers/index.d.ts +17 -2
- package/dist/providers/index.js +13 -2
- package/dist/providers/openai-compatible.js +12 -1
- package/dist/providers/openrouter.d.ts +95 -0
- package/dist/providers/openrouter.js +138 -0
- package/dist/providers/perplexity.d.ts +86 -0
- package/dist/providers/perplexity.js +123 -0
- package/dist/providers/together.d.ts +86 -0
- package/dist/providers/together.js +123 -0
- package/dist/providers/types.d.ts +19 -0
- package/dist/state/agent-state.d.ts +1 -0
- package/dist/state/agent-state.js +2 -0
- package/dist/state/serializer.js +20 -2
- package/dist/state/types.d.ts +5 -0
- package/dist/tools/builtin/ask-user-simple.js +1 -0
- package/dist/tools/builtin/ask-user.js +1 -0
- package/dist/tools/builtin/bash.js +123 -2
- package/dist/tools/builtin/shell-manager.d.ts +15 -0
- package/dist/tools/builtin/shell-manager.js +51 -0
- package/dist/tools/builtin/suggest.js +1 -0
- package/dist/tools/builtin/todo.js +2 -0
- package/dist/tools/define.d.ts +6 -0
- package/dist/tools/define.js +1 -0
- package/dist/tools/types.d.ts +19 -0
- package/dist/utils/index.d.ts +119 -4
- package/dist/utils/index.js +164 -13
- package/package.json +7 -1
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenRouter LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for OpenRouter's unified API.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createOpenRouterProvider({
|
|
10
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
11
|
+
* apiKey: process.env.OPENROUTER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid OpenRouter API key
|
|
17
|
+
* - Default model is meta-llama/llama-3.1-8b-instruct
|
|
18
|
+
* - Provides access to 100+ models from multiple providers
|
|
19
|
+
* - Includes custom headers for ranking and attribution
|
|
20
|
+
*/
|
|
21
|
+
import type { ChatOptions } from './types.js';
|
|
22
|
+
import { ProviderError } from '../errors.js';
|
|
23
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
24
|
+
/**
|
|
25
|
+
* Configuration for OpenRouterProvider
|
|
26
|
+
*/
|
|
27
|
+
export interface OpenRouterProviderConfig {
|
|
28
|
+
/** OpenRouter API key (falls back to OPENROUTER_API_KEY env var) */
|
|
29
|
+
apiKey?: string;
|
|
30
|
+
/** Base URL for OpenRouter API (default: https://openrouter.ai/api) */
|
|
31
|
+
baseUrl?: string;
|
|
32
|
+
/** Default model to use (default: meta-llama/llama-3.1-8b-instruct) */
|
|
33
|
+
model?: string;
|
|
34
|
+
/** Default max tokens (default: 4096) */
|
|
35
|
+
maxTokens?: number;
|
|
36
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
37
|
+
timeout?: number;
|
|
38
|
+
/** Site URL for OpenRouter rankings (optional) */
|
|
39
|
+
siteUrl?: string;
|
|
40
|
+
/** Site name for OpenRouter rankings (optional) */
|
|
41
|
+
siteName?: string;
|
|
42
|
+
}
|
|
43
|
+
/**
|
|
44
|
+
* OpenRouter LLM Provider
|
|
45
|
+
*
|
|
46
|
+
* Provides streaming chat completion via OpenRouter's unified API.
|
|
47
|
+
* Access 100+ models from Claude, GPT, Llama, Mistral, and more.
|
|
48
|
+
*/
|
|
49
|
+
export declare class OpenRouterProvider extends OpenAICompatibleProvider {
|
|
50
|
+
readonly name = "openrouter";
|
|
51
|
+
private readonly apiKey;
|
|
52
|
+
private readonly siteUrl?;
|
|
53
|
+
private readonly siteName?;
|
|
54
|
+
constructor(config?: OpenRouterProviderConfig);
|
|
55
|
+
/**
|
|
56
|
+
* OpenRouter authentication with Bearer token and custom headers
|
|
57
|
+
*/
|
|
58
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
59
|
+
/**
|
|
60
|
+
* OpenRouter chat completions endpoint (OpenAI-compatible)
|
|
61
|
+
*/
|
|
62
|
+
protected getEndpointPath(): string;
|
|
63
|
+
/**
|
|
64
|
+
* OpenRouter uses standard OpenAI body format
|
|
65
|
+
*/
|
|
66
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
67
|
+
/**
|
|
68
|
+
* Map HTTP errors with OpenRouter-specific messages
|
|
69
|
+
*/
|
|
70
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
71
|
+
/**
|
|
72
|
+
* Map connection errors with OpenRouter-specific messages
|
|
73
|
+
*/
|
|
74
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Create an OpenRouter provider instance
|
|
78
|
+
*
|
|
79
|
+
* @example
|
|
80
|
+
* ```typescript
|
|
81
|
+
* // Using environment variable (OPENROUTER_API_KEY)
|
|
82
|
+
* const provider = createOpenRouterProvider();
|
|
83
|
+
*
|
|
84
|
+
* // With explicit API key
|
|
85
|
+
* const provider = createOpenRouterProvider({ apiKey: 'sk-or-...' });
|
|
86
|
+
*
|
|
87
|
+
* // With custom model and site attribution
|
|
88
|
+
* const provider = createOpenRouterProvider({
|
|
89
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
90
|
+
* siteUrl: 'https://myapp.com',
|
|
91
|
+
* siteName: 'My App'
|
|
92
|
+
* });
|
|
93
|
+
* ```
|
|
94
|
+
*/
|
|
95
|
+
export declare function createOpenRouterProvider(config?: OpenRouterProviderConfig): OpenRouterProvider;
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenRouter LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for OpenRouter's unified API.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createOpenRouterProvider({
|
|
10
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
11
|
+
* apiKey: process.env.OPENROUTER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid OpenRouter API key
|
|
17
|
+
* - Default model is meta-llama/llama-3.1-8b-instruct
|
|
18
|
+
* - Provides access to 100+ models from multiple providers
|
|
19
|
+
* - Includes custom headers for ranking and attribution
|
|
20
|
+
*/
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
// Default configuration
|
|
24
|
+
const DEFAULT_MODEL = 'meta-llama/llama-3.1-8b-instruct';
|
|
25
|
+
const DEFAULT_BASE_URL = 'https://openrouter.ai/api';
|
|
26
|
+
/**
|
|
27
|
+
* OpenRouter LLM Provider
|
|
28
|
+
*
|
|
29
|
+
* Provides streaming chat completion via OpenRouter's unified API.
|
|
30
|
+
* Access 100+ models from Claude, GPT, Llama, Mistral, and more.
|
|
31
|
+
*/
|
|
32
|
+
export class OpenRouterProvider extends OpenAICompatibleProvider {
|
|
33
|
+
name = 'openrouter';
|
|
34
|
+
apiKey;
|
|
35
|
+
siteUrl;
|
|
36
|
+
siteName;
|
|
37
|
+
constructor(config = {}) {
|
|
38
|
+
const apiKey = config.apiKey ?? process.env.OPENROUTER_API_KEY;
|
|
39
|
+
if (!apiKey) {
|
|
40
|
+
throw new ProviderError('OpenRouter API key not found. Set OPENROUTER_API_KEY environment variable or pass apiKey in config.', 'openrouter');
|
|
41
|
+
}
|
|
42
|
+
const baseConfig = {
|
|
43
|
+
baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
|
|
44
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
45
|
+
maxTokens: config.maxTokens,
|
|
46
|
+
timeout: config.timeout,
|
|
47
|
+
};
|
|
48
|
+
super(baseConfig);
|
|
49
|
+
this.apiKey = apiKey;
|
|
50
|
+
this.siteUrl = config.siteUrl;
|
|
51
|
+
this.siteName = config.siteName;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* OpenRouter authentication with Bearer token and custom headers
|
|
55
|
+
*/
|
|
56
|
+
getAuthHeaders() {
|
|
57
|
+
const headers = {
|
|
58
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
59
|
+
};
|
|
60
|
+
// OpenRouter-specific headers for ranking and attribution
|
|
61
|
+
if (this.siteUrl) {
|
|
62
|
+
headers['HTTP-Referer'] = this.siteUrl;
|
|
63
|
+
}
|
|
64
|
+
if (this.siteName) {
|
|
65
|
+
headers['X-Title'] = this.siteName;
|
|
66
|
+
}
|
|
67
|
+
return headers;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* OpenRouter chat completions endpoint (OpenAI-compatible)
|
|
71
|
+
*/
|
|
72
|
+
getEndpointPath() {
|
|
73
|
+
return '/v1/chat/completions';
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* OpenRouter uses standard OpenAI body format
|
|
77
|
+
*/
|
|
78
|
+
buildProviderSpecificBody(_options) {
|
|
79
|
+
return {};
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Map HTTP errors with OpenRouter-specific messages
|
|
83
|
+
*/
|
|
84
|
+
mapHttpError(status, body, _model) {
|
|
85
|
+
let message = `OpenRouter error (${String(status)})`;
|
|
86
|
+
try {
|
|
87
|
+
const parsed = JSON.parse(body);
|
|
88
|
+
if (parsed.error?.message) {
|
|
89
|
+
message = parsed.error.message;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
catch {
|
|
93
|
+
message = body || message;
|
|
94
|
+
}
|
|
95
|
+
switch (status) {
|
|
96
|
+
case 401:
|
|
97
|
+
return new ProviderError('Invalid OpenRouter API key. Check your OPENROUTER_API_KEY.', 'openrouter', 401);
|
|
98
|
+
case 403:
|
|
99
|
+
return new ProviderError('Access denied. Check your OpenRouter API key permissions or credits.', 'openrouter', 403);
|
|
100
|
+
case 429:
|
|
101
|
+
return new ProviderError('OpenRouter rate limit exceeded. Please wait and try again.', 'openrouter', 429);
|
|
102
|
+
case 500:
|
|
103
|
+
case 502:
|
|
104
|
+
case 503:
|
|
105
|
+
return new ProviderError('OpenRouter service temporarily unavailable. Please try again later.', 'openrouter', status);
|
|
106
|
+
default:
|
|
107
|
+
return new ProviderError(message, 'openrouter', status);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Map connection errors with OpenRouter-specific messages
|
|
112
|
+
*/
|
|
113
|
+
mapConnectionError(_error) {
|
|
114
|
+
return new ProviderError('Failed to connect to OpenRouter API. Check your internet connection.', 'openrouter');
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
/**
|
|
118
|
+
* Create an OpenRouter provider instance
|
|
119
|
+
*
|
|
120
|
+
* @example
|
|
121
|
+
* ```typescript
|
|
122
|
+
* // Using environment variable (OPENROUTER_API_KEY)
|
|
123
|
+
* const provider = createOpenRouterProvider();
|
|
124
|
+
*
|
|
125
|
+
* // With explicit API key
|
|
126
|
+
* const provider = createOpenRouterProvider({ apiKey: 'sk-or-...' });
|
|
127
|
+
*
|
|
128
|
+
* // With custom model and site attribution
|
|
129
|
+
* const provider = createOpenRouterProvider({
|
|
130
|
+
* model: 'anthropic/claude-3.5-sonnet',
|
|
131
|
+
* siteUrl: 'https://myapp.com',
|
|
132
|
+
* siteName: 'My App'
|
|
133
|
+
* });
|
|
134
|
+
* ```
|
|
135
|
+
*/
|
|
136
|
+
export function createOpenRouterProvider(config = {}) {
|
|
137
|
+
return new OpenRouterProvider(config);
|
|
138
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Perplexity LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Perplexity models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createPerplexityProvider({
|
|
10
|
+
* model: 'llama-3.1-sonar-large-128k-online',
|
|
11
|
+
* apiKey: process.env.PERPLEXITY_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Perplexity API key
|
|
17
|
+
* - Default model is llama-3.1-sonar-small-128k-online
|
|
18
|
+
* - Sonar models have built-in web search capabilities
|
|
19
|
+
*/
|
|
20
|
+
import type { ChatOptions } from './types.js';
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for PerplexityProvider
|
|
25
|
+
*/
|
|
26
|
+
export interface PerplexityProviderConfig {
|
|
27
|
+
/** Perplexity API key (falls back to PERPLEXITY_API_KEY env var) */
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
/** Base URL for Perplexity API (default: https://api.perplexity.ai) */
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
/** Default model to use (default: llama-3.1-sonar-small-128k-online) */
|
|
32
|
+
model?: string;
|
|
33
|
+
/** Default max tokens (default: 4096) */
|
|
34
|
+
maxTokens?: number;
|
|
35
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
36
|
+
timeout?: number;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Perplexity LLM Provider
|
|
40
|
+
*
|
|
41
|
+
* Provides streaming chat completion using Perplexity's API.
|
|
42
|
+
* Sonar models include real-time web search capabilities.
|
|
43
|
+
*/
|
|
44
|
+
export declare class PerplexityProvider extends OpenAICompatibleProvider {
|
|
45
|
+
readonly name = "perplexity";
|
|
46
|
+
private readonly apiKey;
|
|
47
|
+
constructor(config?: PerplexityProviderConfig);
|
|
48
|
+
/**
|
|
49
|
+
* Perplexity authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
52
|
+
/**
|
|
53
|
+
* Perplexity chat completions endpoint (OpenAI-compatible)
|
|
54
|
+
*/
|
|
55
|
+
protected getEndpointPath(): string;
|
|
56
|
+
/**
|
|
57
|
+
* Perplexity uses standard OpenAI body format
|
|
58
|
+
*/
|
|
59
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
60
|
+
/**
|
|
61
|
+
* Map HTTP errors with Perplexity-specific messages
|
|
62
|
+
*/
|
|
63
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
64
|
+
/**
|
|
65
|
+
* Map connection errors with Perplexity-specific messages
|
|
66
|
+
*/
|
|
67
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Create a Perplexity provider instance
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* // Using environment variable (PERPLEXITY_API_KEY)
|
|
75
|
+
* const provider = createPerplexityProvider();
|
|
76
|
+
*
|
|
77
|
+
* // With explicit API key
|
|
78
|
+
* const provider = createPerplexityProvider({ apiKey: 'pplx-...' });
|
|
79
|
+
*
|
|
80
|
+
* // With custom model (sonar models have web search)
|
|
81
|
+
* const provider = createPerplexityProvider({
|
|
82
|
+
* model: 'llama-3.1-sonar-large-128k-online'
|
|
83
|
+
* });
|
|
84
|
+
* ```
|
|
85
|
+
*/
|
|
86
|
+
export declare function createPerplexityProvider(config?: PerplexityProviderConfig): PerplexityProvider;
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Perplexity LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Perplexity models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createPerplexityProvider({
|
|
10
|
+
* model: 'llama-3.1-sonar-large-128k-online',
|
|
11
|
+
* apiKey: process.env.PERPLEXITY_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Perplexity API key
|
|
17
|
+
* - Default model is llama-3.1-sonar-small-128k-online
|
|
18
|
+
* - Sonar models have built-in web search capabilities
|
|
19
|
+
*/
|
|
20
|
+
import { ProviderError } from '../errors.js';
|
|
21
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
22
|
+
// Default configuration
|
|
23
|
+
const DEFAULT_MODEL = 'llama-3.1-sonar-small-128k-online';
|
|
24
|
+
const DEFAULT_BASE_URL = 'https://api.perplexity.ai';
|
|
25
|
+
/**
|
|
26
|
+
* Perplexity LLM Provider
|
|
27
|
+
*
|
|
28
|
+
* Provides streaming chat completion using Perplexity's API.
|
|
29
|
+
* Sonar models include real-time web search capabilities.
|
|
30
|
+
*/
|
|
31
|
+
export class PerplexityProvider extends OpenAICompatibleProvider {
|
|
32
|
+
name = 'perplexity';
|
|
33
|
+
apiKey;
|
|
34
|
+
constructor(config = {}) {
|
|
35
|
+
const apiKey = config.apiKey ?? process.env.PERPLEXITY_API_KEY;
|
|
36
|
+
if (!apiKey) {
|
|
37
|
+
throw new ProviderError('Perplexity API key not found. Set PERPLEXITY_API_KEY environment variable or pass apiKey in config.', 'perplexity');
|
|
38
|
+
}
|
|
39
|
+
const baseConfig = {
|
|
40
|
+
baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
|
|
41
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
42
|
+
maxTokens: config.maxTokens,
|
|
43
|
+
timeout: config.timeout,
|
|
44
|
+
};
|
|
45
|
+
super(baseConfig);
|
|
46
|
+
this.apiKey = apiKey;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Perplexity authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
getAuthHeaders() {
|
|
52
|
+
return {
|
|
53
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Perplexity chat completions endpoint (OpenAI-compatible)
|
|
58
|
+
*/
|
|
59
|
+
getEndpointPath() {
|
|
60
|
+
return '/chat/completions';
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Perplexity uses standard OpenAI body format
|
|
64
|
+
*/
|
|
65
|
+
buildProviderSpecificBody(_options) {
|
|
66
|
+
return {};
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Map HTTP errors with Perplexity-specific messages
|
|
70
|
+
*/
|
|
71
|
+
mapHttpError(status, body, _model) {
|
|
72
|
+
let message = `Perplexity error (${String(status)})`;
|
|
73
|
+
try {
|
|
74
|
+
const parsed = JSON.parse(body);
|
|
75
|
+
if (parsed.error?.message) {
|
|
76
|
+
message = parsed.error.message;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
message = body || message;
|
|
81
|
+
}
|
|
82
|
+
switch (status) {
|
|
83
|
+
case 401:
|
|
84
|
+
return new ProviderError('Invalid Perplexity API key. Check your PERPLEXITY_API_KEY.', 'perplexity', 401);
|
|
85
|
+
case 403:
|
|
86
|
+
return new ProviderError('Access denied. Check your Perplexity API key permissions.', 'perplexity', 403);
|
|
87
|
+
case 429:
|
|
88
|
+
return new ProviderError('Perplexity rate limit exceeded. Please wait and try again.', 'perplexity', 429);
|
|
89
|
+
case 500:
|
|
90
|
+
case 502:
|
|
91
|
+
case 503:
|
|
92
|
+
return new ProviderError('Perplexity service temporarily unavailable. Please try again later.', 'perplexity', status);
|
|
93
|
+
default:
|
|
94
|
+
return new ProviderError(message, 'perplexity', status);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Map connection errors with Perplexity-specific messages
|
|
99
|
+
*/
|
|
100
|
+
mapConnectionError(_error) {
|
|
101
|
+
return new ProviderError('Failed to connect to Perplexity API. Check your internet connection.', 'perplexity');
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Create a Perplexity provider instance
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* ```typescript
|
|
109
|
+
* // Using environment variable (PERPLEXITY_API_KEY)
|
|
110
|
+
* const provider = createPerplexityProvider();
|
|
111
|
+
*
|
|
112
|
+
* // With explicit API key
|
|
113
|
+
* const provider = createPerplexityProvider({ apiKey: 'pplx-...' });
|
|
114
|
+
*
|
|
115
|
+
* // With custom model (sonar models have web search)
|
|
116
|
+
* const provider = createPerplexityProvider({
|
|
117
|
+
* model: 'llama-3.1-sonar-large-128k-online'
|
|
118
|
+
* });
|
|
119
|
+
* ```
|
|
120
|
+
*/
|
|
121
|
+
export function createPerplexityProvider(config = {}) {
|
|
122
|
+
return new PerplexityProvider(config);
|
|
123
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Together AI LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Together AI models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createTogetherProvider({
|
|
10
|
+
* model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
|
11
|
+
* apiKey: process.env.TOGETHER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Together AI API key
|
|
17
|
+
* - Default model is meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
|
|
18
|
+
* - Supports Llama, Mistral, Qwen, and other open models
|
|
19
|
+
*/
|
|
20
|
+
import type { ChatOptions } from './types.js';
|
|
21
|
+
import { ProviderError } from '../errors.js';
|
|
22
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for TogetherProvider
|
|
25
|
+
*/
|
|
26
|
+
export interface TogetherProviderConfig {
|
|
27
|
+
/** Together AI API key (falls back to TOGETHER_API_KEY env var) */
|
|
28
|
+
apiKey?: string;
|
|
29
|
+
/** Base URL for Together API (default: https://api.together.xyz) */
|
|
30
|
+
baseUrl?: string;
|
|
31
|
+
/** Default model to use (default: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo) */
|
|
32
|
+
model?: string;
|
|
33
|
+
/** Default max tokens (default: 4096) */
|
|
34
|
+
maxTokens?: number;
|
|
35
|
+
/** Request timeout in milliseconds (default: 120000) */
|
|
36
|
+
timeout?: number;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Together AI LLM Provider
|
|
40
|
+
*
|
|
41
|
+
* Provides streaming chat completion using Together AI models.
|
|
42
|
+
* Supports Llama, Mistral, Qwen, and other open-source models.
|
|
43
|
+
*/
|
|
44
|
+
export declare class TogetherProvider extends OpenAICompatibleProvider {
|
|
45
|
+
readonly name = "together";
|
|
46
|
+
private readonly apiKey;
|
|
47
|
+
constructor(config?: TogetherProviderConfig);
|
|
48
|
+
/**
|
|
49
|
+
* Together AI authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
protected getAuthHeaders(): Record<string, string>;
|
|
52
|
+
/**
|
|
53
|
+
* Together AI chat completions endpoint (OpenAI-compatible)
|
|
54
|
+
*/
|
|
55
|
+
protected getEndpointPath(): string;
|
|
56
|
+
/**
|
|
57
|
+
* Together AI uses standard OpenAI body format
|
|
58
|
+
*/
|
|
59
|
+
protected buildProviderSpecificBody(_options?: ChatOptions): Record<string, unknown>;
|
|
60
|
+
/**
|
|
61
|
+
* Map HTTP errors with Together AI-specific messages
|
|
62
|
+
*/
|
|
63
|
+
protected mapHttpError(status: number, body: string, _model: string): ProviderError;
|
|
64
|
+
/**
|
|
65
|
+
* Map connection errors with Together AI-specific messages
|
|
66
|
+
*/
|
|
67
|
+
protected mapConnectionError(_error: Error): ProviderError;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Create a Together AI provider instance
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* // Using environment variable (TOGETHER_API_KEY)
|
|
75
|
+
* const provider = createTogetherProvider();
|
|
76
|
+
*
|
|
77
|
+
* // With explicit API key
|
|
78
|
+
* const provider = createTogetherProvider({ apiKey: 'xxx-...' });
|
|
79
|
+
*
|
|
80
|
+
* // With custom model
|
|
81
|
+
* const provider = createTogetherProvider({
|
|
82
|
+
* model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
|
83
|
+
* });
|
|
84
|
+
* ```
|
|
85
|
+
*/
|
|
86
|
+
export declare function createTogetherProvider(config?: TogetherProviderConfig): TogetherProvider;
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Together AI LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements LLMProvider interface for Together AI models.
|
|
5
|
+
* Extends OpenAICompatibleProvider for shared functionality.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* const provider = createTogetherProvider({
|
|
10
|
+
* model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
|
11
|
+
* apiKey: process.env.TOGETHER_API_KEY
|
|
12
|
+
* });
|
|
13
|
+
* ```
|
|
14
|
+
*
|
|
15
|
+
* @remarks
|
|
16
|
+
* - Requires valid Together AI API key
|
|
17
|
+
* - Default model is meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
|
|
18
|
+
* - Supports Llama, Mistral, Qwen, and other open models
|
|
19
|
+
*/
|
|
20
|
+
import { ProviderError } from '../errors.js';
|
|
21
|
+
import { OpenAICompatibleProvider } from './openai-compatible.js';
|
|
22
|
+
// Default configuration
|
|
23
|
+
const DEFAULT_MODEL = 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo';
|
|
24
|
+
const DEFAULT_BASE_URL = 'https://api.together.xyz';
|
|
25
|
+
/**
|
|
26
|
+
* Together AI LLM Provider
|
|
27
|
+
*
|
|
28
|
+
* Provides streaming chat completion using Together AI models.
|
|
29
|
+
* Supports Llama, Mistral, Qwen, and other open-source models.
|
|
30
|
+
*/
|
|
31
|
+
export class TogetherProvider extends OpenAICompatibleProvider {
|
|
32
|
+
name = 'together';
|
|
33
|
+
apiKey;
|
|
34
|
+
constructor(config = {}) {
|
|
35
|
+
const apiKey = config.apiKey ?? process.env.TOGETHER_API_KEY;
|
|
36
|
+
if (!apiKey) {
|
|
37
|
+
throw new ProviderError('Together AI API key not found. Set TOGETHER_API_KEY environment variable or pass apiKey in config.', 'together');
|
|
38
|
+
}
|
|
39
|
+
const baseConfig = {
|
|
40
|
+
baseUrl: config.baseUrl ?? DEFAULT_BASE_URL,
|
|
41
|
+
model: config.model ?? DEFAULT_MODEL,
|
|
42
|
+
maxTokens: config.maxTokens,
|
|
43
|
+
timeout: config.timeout,
|
|
44
|
+
};
|
|
45
|
+
super(baseConfig);
|
|
46
|
+
this.apiKey = apiKey;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Together AI authentication with Bearer token
|
|
50
|
+
*/
|
|
51
|
+
getAuthHeaders() {
|
|
52
|
+
return {
|
|
53
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Together AI chat completions endpoint (OpenAI-compatible)
|
|
58
|
+
*/
|
|
59
|
+
getEndpointPath() {
|
|
60
|
+
return '/v1/chat/completions';
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Together AI uses standard OpenAI body format
|
|
64
|
+
*/
|
|
65
|
+
buildProviderSpecificBody(_options) {
|
|
66
|
+
return {};
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Map HTTP errors with Together AI-specific messages
|
|
70
|
+
*/
|
|
71
|
+
mapHttpError(status, body, _model) {
|
|
72
|
+
let message = `Together AI error (${String(status)})`;
|
|
73
|
+
try {
|
|
74
|
+
const parsed = JSON.parse(body);
|
|
75
|
+
if (parsed.error?.message) {
|
|
76
|
+
message = parsed.error.message;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
message = body || message;
|
|
81
|
+
}
|
|
82
|
+
switch (status) {
|
|
83
|
+
case 401:
|
|
84
|
+
return new ProviderError('Invalid Together AI API key. Check your TOGETHER_API_KEY.', 'together', 401);
|
|
85
|
+
case 403:
|
|
86
|
+
return new ProviderError('Access denied. Check your Together AI API key permissions.', 'together', 403);
|
|
87
|
+
case 429:
|
|
88
|
+
return new ProviderError('Together AI rate limit exceeded. Please wait and try again.', 'together', 429);
|
|
89
|
+
case 500:
|
|
90
|
+
case 502:
|
|
91
|
+
case 503:
|
|
92
|
+
return new ProviderError('Together AI service temporarily unavailable. Please try again later.', 'together', status);
|
|
93
|
+
default:
|
|
94
|
+
return new ProviderError(message, 'together', status);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
/**
|
|
98
|
+
* Map connection errors with Together AI-specific messages
|
|
99
|
+
*/
|
|
100
|
+
mapConnectionError(_error) {
|
|
101
|
+
return new ProviderError('Failed to connect to Together AI API. Check your internet connection.', 'together');
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Create a Together AI provider instance
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* ```typescript
|
|
109
|
+
* // Using environment variable (TOGETHER_API_KEY)
|
|
110
|
+
* const provider = createTogetherProvider();
|
|
111
|
+
*
|
|
112
|
+
* // With explicit API key
|
|
113
|
+
* const provider = createTogetherProvider({ apiKey: 'xxx-...' });
|
|
114
|
+
*
|
|
115
|
+
* // With custom model
|
|
116
|
+
* const provider = createTogetherProvider({
|
|
117
|
+
* model: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
|
118
|
+
* });
|
|
119
|
+
* ```
|
|
120
|
+
*/
|
|
121
|
+
export function createTogetherProvider(config = {}) {
|
|
122
|
+
return new TogetherProvider(config);
|
|
123
|
+
}
|
|
@@ -24,6 +24,12 @@ export interface ToolUseBlock {
|
|
|
24
24
|
id: string;
|
|
25
25
|
name: string;
|
|
26
26
|
input: Record<string, unknown>;
|
|
27
|
+
/**
|
|
28
|
+
* Thought signature for Gemini 3 function calls.
|
|
29
|
+
* Required for Gemini 3 to maintain reasoning context.
|
|
30
|
+
* @see https://ai.google.dev/gemini-api/docs/thought-signatures
|
|
31
|
+
*/
|
|
32
|
+
signature?: string;
|
|
27
33
|
}
|
|
28
34
|
/**
|
|
29
35
|
* Tool result content block (result of a tool call)
|
|
@@ -64,6 +70,14 @@ export interface LLMUsage {
|
|
|
64
70
|
outputTokens: number;
|
|
65
71
|
cacheReadTokens?: number;
|
|
66
72
|
cacheCreationTokens?: number;
|
|
73
|
+
/** Thinking tokens (Gemini 2.5+ models with thinking) */
|
|
74
|
+
thinkingTokens?: number;
|
|
75
|
+
/** Debug payload info - estimated char counts before sending to provider */
|
|
76
|
+
debugPayload?: {
|
|
77
|
+
systemChars: number;
|
|
78
|
+
contentsChars: number;
|
|
79
|
+
toolsChars: number;
|
|
80
|
+
};
|
|
67
81
|
}
|
|
68
82
|
/**
|
|
69
83
|
* Streaming chunk types
|
|
@@ -75,6 +89,11 @@ export interface StreamChunk {
|
|
|
75
89
|
id: string;
|
|
76
90
|
name: string;
|
|
77
91
|
input?: Record<string, unknown>;
|
|
92
|
+
/**
|
|
93
|
+
* Thought signature for Gemini 3 function calls.
|
|
94
|
+
* Only present on first function call in each step.
|
|
95
|
+
*/
|
|
96
|
+
signature?: string;
|
|
78
97
|
};
|
|
79
98
|
/**
|
|
80
99
|
* Thinking block data (for thinking_start/thinking_end)
|