@exagent/agent 0.3.6 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-7UGLJO6W.js +6392 -0
- package/dist/chunk-EHAOPCTJ.js +6406 -0
- package/dist/chunk-FGMXTW5I.js +6540 -0
- package/dist/chunk-GYYW4EKM.js +6756 -0
- package/dist/chunk-IVA2SCSN.js +6756 -0
- package/dist/chunk-JHXCSGPC.js +6352 -0
- package/dist/chunk-V6O4UXVN.js +6345 -0
- package/dist/chunk-WTECTX2Z.js +6345 -0
- package/dist/cli.js +2 -2
- package/dist/index.d.ts +24 -2
- package/dist/index.js +1 -1
- package/package.json +12 -9
- package/src/bridge/across.ts +0 -240
- package/src/bridge/bridge-manager.ts +0 -87
- package/src/bridge/index.ts +0 -9
- package/src/bridge/types.ts +0 -77
- package/src/chains.ts +0 -105
- package/src/cli.ts +0 -250
- package/src/config.ts +0 -502
- package/src/diagnostics.ts +0 -335
- package/src/index.ts +0 -98
- package/src/llm/anthropic.ts +0 -63
- package/src/llm/base.ts +0 -264
- package/src/llm/deepseek.ts +0 -48
- package/src/llm/google.ts +0 -63
- package/src/llm/groq.ts +0 -48
- package/src/llm/index.ts +0 -42
- package/src/llm/mistral.ts +0 -48
- package/src/llm/ollama.ts +0 -52
- package/src/llm/openai.ts +0 -94
- package/src/llm/together.ts +0 -48
- package/src/llm-providers.ts +0 -8
- package/src/logger.ts +0 -137
- package/src/paper/executor.ts +0 -201
- package/src/paper/index.ts +0 -1
- package/src/perp/client.ts +0 -200
- package/src/perp/index.ts +0 -12
- package/src/perp/msgpack.ts +0 -272
- package/src/perp/orders.ts +0 -234
- package/src/perp/positions.ts +0 -126
- package/src/perp/signer.ts +0 -277
- package/src/perp/types.ts +0 -192
- package/src/perp/websocket.ts +0 -274
- package/src/position-tracker.ts +0 -243
- package/src/prediction/client.ts +0 -288
- package/src/prediction/index.ts +0 -3
- package/src/prediction/order-manager.ts +0 -297
- package/src/prediction/types.ts +0 -151
- package/src/relay.ts +0 -254
- package/src/runtime.ts +0 -1755
- package/src/scrub-secrets.ts +0 -39
- package/src/setup.ts +0 -392
- package/src/signal.ts +0 -212
- package/src/spot/aerodrome.ts +0 -158
- package/src/spot/client.ts +0 -138
- package/src/spot/index.ts +0 -11
- package/src/spot/swap-manager.ts +0 -219
- package/src/spot/types.ts +0 -203
- package/src/spot/uniswap.ts +0 -150
- package/src/store.ts +0 -50
- package/src/strategy/index.ts +0 -2
- package/src/strategy/loader.ts +0 -265
- package/src/strategy/templates.ts +0 -74
- package/src/trading/index.ts +0 -2
- package/src/trading/market.ts +0 -120
- package/src/trading/risk.ts +0 -107
- package/src/ui.ts +0 -75
- package/test/strategy-loader.test.ts +0 -150
- package/tsconfig.json +0 -8
package/src/llm/base.ts
DELETED
|
@@ -1,264 +0,0 @@
|
|
|
1
|
-
import type { LLMAdapter, LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { getLogger } from '../logger.js';
|
|
3
|
-
import type { LLMCallRecord } from '../diagnostics.js';
|
|
4
|
-
|
|
5
|
-
/** Status codes that should be retried (transient errors) */
|
|
6
|
-
const RETRYABLE_STATUS_CODES = new Set([429, 500, 502, 503, 504]);
|
|
7
|
-
|
|
8
|
-
/** Default timeout for LLM API calls in ms */
|
|
9
|
-
const DEFAULT_TIMEOUT_MS = 30_000;
|
|
10
|
-
|
|
11
|
-
/** Max retries for transient errors */
|
|
12
|
-
const MAX_RETRIES = 3;
|
|
13
|
-
|
|
14
|
-
/** Backoff delays in ms: 1s, 2s, 4s */
|
|
15
|
-
const BACKOFF_DELAYS = [1000, 2000, 4000];
|
|
16
|
-
|
|
17
|
-
export interface LLMUsageStats {
|
|
18
|
-
totalInputTokens: number;
|
|
19
|
-
totalOutputTokens: number;
|
|
20
|
-
totalCalls: number;
|
|
21
|
-
dailyInputTokens: number;
|
|
22
|
-
dailyOutputTokens: number;
|
|
23
|
-
dailyCalls: number;
|
|
24
|
-
lastResetDate: string; // YYYY-MM-DD UTC
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
export abstract class BaseLLMAdapter implements LLMAdapter {
|
|
28
|
-
protected config: LLMConfig;
|
|
29
|
-
private usage: LLMUsageStats = {
|
|
30
|
-
totalInputTokens: 0,
|
|
31
|
-
totalOutputTokens: 0,
|
|
32
|
-
totalCalls: 0,
|
|
33
|
-
dailyInputTokens: 0,
|
|
34
|
-
dailyOutputTokens: 0,
|
|
35
|
-
dailyCalls: 0,
|
|
36
|
-
lastResetDate: this.todayUTC(),
|
|
37
|
-
};
|
|
38
|
-
|
|
39
|
-
/** Optional daily token budget — if exceeded, chat() throws */
|
|
40
|
-
private maxDailyTokens: number | undefined;
|
|
41
|
-
|
|
42
|
-
/** Callback to send LLM call records to DiagnosticsCollector */
|
|
43
|
-
private onCallRecorded: ((record: LLMCallRecord) => void) | null = null;
|
|
44
|
-
|
|
45
|
-
constructor(config: LLMConfig) {
|
|
46
|
-
this.config = config;
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
/** Register a callback to receive LLM call records (wired by runtime) */
|
|
50
|
-
setCallRecordCallback(cb: (record: LLMCallRecord) => void): void {
|
|
51
|
-
this.onCallRecorded = cb;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
/** Set a daily token budget. Once exceeded, chat() will throw. */
|
|
55
|
-
setMaxDailyTokens(max: number | undefined): void {
|
|
56
|
-
this.maxDailyTokens = max;
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
/** Get current usage statistics */
|
|
60
|
-
getUsageStats(): LLMUsageStats {
|
|
61
|
-
this.resetDailyIfNeeded();
|
|
62
|
-
return { ...this.usage };
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
/** Record token usage from a response (called by subclasses or externally) */
|
|
66
|
-
recordUsage(tokens: { input: number; output: number }): void {
|
|
67
|
-
this.resetDailyIfNeeded();
|
|
68
|
-
this.usage.totalInputTokens += tokens.input;
|
|
69
|
-
this.usage.totalOutputTokens += tokens.output;
|
|
70
|
-
this.usage.totalCalls++;
|
|
71
|
-
this.usage.dailyInputTokens += tokens.input;
|
|
72
|
-
this.usage.dailyOutputTokens += tokens.output;
|
|
73
|
-
this.usage.dailyCalls++;
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
/** Check if the daily token budget has been exceeded */
|
|
77
|
-
isDailyBudgetExceeded(): boolean {
|
|
78
|
-
if (!this.maxDailyTokens) return false;
|
|
79
|
-
this.resetDailyIfNeeded();
|
|
80
|
-
return (this.usage.dailyInputTokens + this.usage.dailyOutputTokens) >= this.maxDailyTokens;
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
/**
|
|
84
|
-
* Chat with retry logic and timeout.
|
|
85
|
-
* Retries up to 3 times with exponential backoff on transient errors (429, 5xx, timeout).
|
|
86
|
-
* Does not retry on 400/401/403 (client errors).
|
|
87
|
-
*/
|
|
88
|
-
async chat(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
89
|
-
const log = getLogger();
|
|
90
|
-
const meta = this.getMetadata();
|
|
91
|
-
|
|
92
|
-
// Check daily budget before calling
|
|
93
|
-
if (this.isDailyBudgetExceeded()) {
|
|
94
|
-
const budgetErr = `LLM daily token budget exceeded (${this.usage.dailyInputTokens + this.usage.dailyOutputTokens} / ${this.maxDailyTokens})`;
|
|
95
|
-
log.warn('llm', budgetErr, { provider: meta.provider, model: meta.model });
|
|
96
|
-
throw new Error(budgetErr);
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
let lastError: Error | null = null;
|
|
100
|
-
let retries = 0;
|
|
101
|
-
const callStart = Date.now();
|
|
102
|
-
|
|
103
|
-
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
|
|
104
|
-
try {
|
|
105
|
-
const response = await this.chatImpl(messages);
|
|
106
|
-
|
|
107
|
-
// Track token usage
|
|
108
|
-
if (response.tokens) {
|
|
109
|
-
this.recordUsage(response.tokens);
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
const latencyMs = Date.now() - callStart;
|
|
113
|
-
log.info('llm', 'Call completed', {
|
|
114
|
-
provider: meta.provider,
|
|
115
|
-
model: meta.model,
|
|
116
|
-
inputTokens: response.tokens?.input ?? 0,
|
|
117
|
-
outputTokens: response.tokens?.output ?? 0,
|
|
118
|
-
latencyMs,
|
|
119
|
-
retries,
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
this.emitCallRecord({
|
|
123
|
-
timestamp: callStart,
|
|
124
|
-
provider: meta.provider,
|
|
125
|
-
model: meta.model,
|
|
126
|
-
inputTokens: response.tokens?.input ?? 0,
|
|
127
|
-
outputTokens: response.tokens?.output ?? 0,
|
|
128
|
-
latencyMs,
|
|
129
|
-
success: true,
|
|
130
|
-
retries,
|
|
131
|
-
});
|
|
132
|
-
|
|
133
|
-
return response;
|
|
134
|
-
} catch (err) {
|
|
135
|
-
lastError = err as Error;
|
|
136
|
-
const isRetryable = this.isRetryableError(lastError);
|
|
137
|
-
|
|
138
|
-
if (!isRetryable || attempt >= MAX_RETRIES) {
|
|
139
|
-
const latencyMs = Date.now() - callStart;
|
|
140
|
-
log.error('llm', 'Call failed', {
|
|
141
|
-
provider: meta.provider,
|
|
142
|
-
model: meta.model,
|
|
143
|
-
error: lastError.message,
|
|
144
|
-
latencyMs,
|
|
145
|
-
retries,
|
|
146
|
-
retryable: isRetryable,
|
|
147
|
-
});
|
|
148
|
-
|
|
149
|
-
this.emitCallRecord({
|
|
150
|
-
timestamp: callStart,
|
|
151
|
-
provider: meta.provider,
|
|
152
|
-
model: meta.model,
|
|
153
|
-
inputTokens: 0,
|
|
154
|
-
outputTokens: 0,
|
|
155
|
-
latencyMs,
|
|
156
|
-
success: false,
|
|
157
|
-
error: lastError.message,
|
|
158
|
-
retries,
|
|
159
|
-
});
|
|
160
|
-
|
|
161
|
-
throw lastError;
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
retries++;
|
|
165
|
-
const delay = BACKOFF_DELAYS[attempt] ?? 4000;
|
|
166
|
-
log.warn('llm', `Retrying after ${delay}ms (attempt ${retries}/${MAX_RETRIES})`, {
|
|
167
|
-
provider: meta.provider,
|
|
168
|
-
model: meta.model,
|
|
169
|
-
error: lastError.message,
|
|
170
|
-
});
|
|
171
|
-
await new Promise(resolve => setTimeout(resolve, delay));
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
|
|
175
|
-
throw lastError ?? new Error('LLM call failed');
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
private emitCallRecord(record: LLMCallRecord): void {
|
|
179
|
-
this.onCallRecorded?.(record);
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
/**
|
|
183
|
-
* Actual LLM call implementation — subclasses override this instead of chat().
|
|
184
|
-
* Must be implemented by each provider adapter.
|
|
185
|
-
*/
|
|
186
|
-
protected abstract chatImpl(messages: LLMMessage[]): Promise<LLMResponse>;
|
|
187
|
-
|
|
188
|
-
abstract getMetadata(): LLMMetadata;
|
|
189
|
-
|
|
190
|
-
protected getTemperature(): number {
|
|
191
|
-
return this.config.temperature ?? 0.7;
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
protected getMaxTokens(): number {
|
|
195
|
-
return this.config.maxTokens ?? 4096;
|
|
196
|
-
}
|
|
197
|
-
|
|
198
|
-
/** Get timeout in ms for fetch calls */
|
|
199
|
-
protected getTimeoutMs(): number {
|
|
200
|
-
return DEFAULT_TIMEOUT_MS;
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
/**
|
|
204
|
-
* Fetch with timeout using AbortController.
|
|
205
|
-
* Wraps the standard fetch with a configurable timeout.
|
|
206
|
-
*/
|
|
207
|
-
protected async fetchWithTimeout(url: string, init: RequestInit): Promise<Response> {
|
|
208
|
-
const controller = new AbortController();
|
|
209
|
-
const timeoutId = setTimeout(() => controller.abort(), this.getTimeoutMs());
|
|
210
|
-
|
|
211
|
-
try {
|
|
212
|
-
const response = await fetch(url, {
|
|
213
|
-
...init,
|
|
214
|
-
signal: controller.signal,
|
|
215
|
-
});
|
|
216
|
-
return response;
|
|
217
|
-
} catch (err) {
|
|
218
|
-
if ((err as Error).name === 'AbortError') {
|
|
219
|
-
throw new Error(`LLM API request timed out after ${this.getTimeoutMs()}ms`);
|
|
220
|
-
}
|
|
221
|
-
throw err;
|
|
222
|
-
} finally {
|
|
223
|
-
clearTimeout(timeoutId);
|
|
224
|
-
}
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
/** Check if an error is retryable (transient) */
|
|
228
|
-
private isRetryableError(err: Error): boolean {
|
|
229
|
-
const msg = err.message;
|
|
230
|
-
|
|
231
|
-
// Timeout errors are retryable
|
|
232
|
-
if (msg.includes('timed out') || msg.includes('AbortError') || msg.includes('ECONNRESET') || msg.includes('ECONNREFUSED') || msg.includes('ETIMEDOUT')) {
|
|
233
|
-
return true;
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
// Check for HTTP status codes in error message
|
|
237
|
-
for (const code of RETRYABLE_STATUS_CODES) {
|
|
238
|
-
if (msg.includes(`${code}`)) {
|
|
239
|
-
return true;
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
// Network errors
|
|
244
|
-
if (msg.includes('network') || msg.includes('fetch failed') || msg.includes('socket hang up')) {
|
|
245
|
-
return true;
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
return false;
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
private todayUTC(): string {
|
|
252
|
-
return new Date().toISOString().slice(0, 10);
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
private resetDailyIfNeeded(): void {
|
|
256
|
-
const today = this.todayUTC();
|
|
257
|
-
if (this.usage.lastResetDate !== today) {
|
|
258
|
-
this.usage.dailyInputTokens = 0;
|
|
259
|
-
this.usage.dailyOutputTokens = 0;
|
|
260
|
-
this.usage.dailyCalls = 0;
|
|
261
|
-
this.usage.lastResetDate = today;
|
|
262
|
-
}
|
|
263
|
-
}
|
|
264
|
-
}
|
package/src/llm/deepseek.ts
DELETED
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class DeepSeekAdapter extends BaseLLMAdapter {
|
|
5
|
-
constructor(config: LLMConfig) {
|
|
6
|
-
super(config);
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
10
|
-
const res = await this.fetchWithTimeout('https://api.deepseek.com/chat/completions', {
|
|
11
|
-
method: 'POST',
|
|
12
|
-
headers: {
|
|
13
|
-
'Content-Type': 'application/json',
|
|
14
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
15
|
-
},
|
|
16
|
-
body: JSON.stringify({
|
|
17
|
-
model: this.config.model || getDefaultModel('deepseek'),
|
|
18
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
19
|
-
temperature: this.getTemperature(),
|
|
20
|
-
max_tokens: this.getMaxTokens(),
|
|
21
|
-
}),
|
|
22
|
-
});
|
|
23
|
-
|
|
24
|
-
if (!res.ok) {
|
|
25
|
-
const body = await res.text();
|
|
26
|
-
throw new Error(`DeepSeek API error ${res.status}: ${body}`);
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
const data = await res.json() as {
|
|
30
|
-
choices: { message: { content: string } }[];
|
|
31
|
-
usage?: { prompt_tokens: number; completion_tokens: number };
|
|
32
|
-
};
|
|
33
|
-
|
|
34
|
-
return {
|
|
35
|
-
content: data.choices[0]?.message?.content || '',
|
|
36
|
-
tokens: data.usage
|
|
37
|
-
? { input: data.usage.prompt_tokens, output: data.usage.completion_tokens }
|
|
38
|
-
: undefined,
|
|
39
|
-
};
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
getMetadata(): LLMMetadata {
|
|
43
|
-
return {
|
|
44
|
-
provider: 'deepseek',
|
|
45
|
-
model: this.config.model || getDefaultModel('deepseek'),
|
|
46
|
-
};
|
|
47
|
-
}
|
|
48
|
-
}
|
package/src/llm/google.ts
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class GoogleAdapter extends BaseLLMAdapter {
|
|
5
|
-
constructor(config: LLMConfig) {
|
|
6
|
-
super(config);
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
10
|
-
const model = this.config.model || getDefaultModel('google');
|
|
11
|
-
const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${this.config.apiKey}`;
|
|
12
|
-
|
|
13
|
-
const systemMessage = messages.find(m => m.role === 'system');
|
|
14
|
-
const nonSystemMessages = messages.filter(m => m.role !== 'system');
|
|
15
|
-
|
|
16
|
-
const body: Record<string, unknown> = {
|
|
17
|
-
contents: nonSystemMessages.map(m => ({
|
|
18
|
-
role: m.role === 'assistant' ? 'model' : 'user',
|
|
19
|
-
parts: [{ text: m.content }],
|
|
20
|
-
})),
|
|
21
|
-
generationConfig: {
|
|
22
|
-
temperature: this.getTemperature(),
|
|
23
|
-
maxOutputTokens: this.getMaxTokens(),
|
|
24
|
-
},
|
|
25
|
-
};
|
|
26
|
-
|
|
27
|
-
if (systemMessage) {
|
|
28
|
-
body.systemInstruction = { parts: [{ text: systemMessage.content }] };
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
const res = await this.fetchWithTimeout(url, {
|
|
32
|
-
method: 'POST',
|
|
33
|
-
headers: { 'Content-Type': 'application/json' },
|
|
34
|
-
body: JSON.stringify(body),
|
|
35
|
-
});
|
|
36
|
-
|
|
37
|
-
if (!res.ok) {
|
|
38
|
-
const text = await res.text();
|
|
39
|
-
throw new Error(`Google AI error ${res.status}: ${text}`);
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
const data = await res.json() as {
|
|
43
|
-
candidates: { content: { parts: { text: string }[] } }[];
|
|
44
|
-
usageMetadata?: { promptTokenCount: number; candidatesTokenCount: number };
|
|
45
|
-
};
|
|
46
|
-
|
|
47
|
-
const text = data.candidates[0]?.content?.parts[0]?.text || '';
|
|
48
|
-
|
|
49
|
-
return {
|
|
50
|
-
content: text,
|
|
51
|
-
tokens: data.usageMetadata
|
|
52
|
-
? { input: data.usageMetadata.promptTokenCount, output: data.usageMetadata.candidatesTokenCount }
|
|
53
|
-
: undefined,
|
|
54
|
-
};
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
getMetadata(): LLMMetadata {
|
|
58
|
-
return {
|
|
59
|
-
provider: 'google',
|
|
60
|
-
model: this.config.model || getDefaultModel('google'),
|
|
61
|
-
};
|
|
62
|
-
}
|
|
63
|
-
}
|
package/src/llm/groq.ts
DELETED
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class GroqAdapter extends BaseLLMAdapter {
|
|
5
|
-
constructor(config: LLMConfig) {
|
|
6
|
-
super(config);
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
10
|
-
const res = await this.fetchWithTimeout('https://api.groq.com/openai/v1/chat/completions', {
|
|
11
|
-
method: 'POST',
|
|
12
|
-
headers: {
|
|
13
|
-
'Content-Type': 'application/json',
|
|
14
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
15
|
-
},
|
|
16
|
-
body: JSON.stringify({
|
|
17
|
-
model: this.config.model || getDefaultModel('groq'),
|
|
18
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
19
|
-
temperature: this.getTemperature(),
|
|
20
|
-
max_tokens: this.getMaxTokens(),
|
|
21
|
-
}),
|
|
22
|
-
});
|
|
23
|
-
|
|
24
|
-
if (!res.ok) {
|
|
25
|
-
const body = await res.text();
|
|
26
|
-
throw new Error(`Groq API error ${res.status}: ${body}`);
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
const data = await res.json() as {
|
|
30
|
-
choices: { message: { content: string } }[];
|
|
31
|
-
usage?: { prompt_tokens: number; completion_tokens: number };
|
|
32
|
-
};
|
|
33
|
-
|
|
34
|
-
return {
|
|
35
|
-
content: data.choices[0]?.message?.content || '',
|
|
36
|
-
tokens: data.usage
|
|
37
|
-
? { input: data.usage.prompt_tokens, output: data.usage.completion_tokens }
|
|
38
|
-
: undefined,
|
|
39
|
-
};
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
getMetadata(): LLMMetadata {
|
|
43
|
-
return {
|
|
44
|
-
provider: 'groq',
|
|
45
|
-
model: this.config.model || getDefaultModel('groq'),
|
|
46
|
-
};
|
|
47
|
-
}
|
|
48
|
-
}
|
package/src/llm/index.ts
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
import type { LLMAdapter, LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { OpenAIAdapter } from './openai.js';
|
|
3
|
-
import { AnthropicAdapter } from './anthropic.js';
|
|
4
|
-
import { GoogleAdapter } from './google.js';
|
|
5
|
-
import { DeepSeekAdapter } from './deepseek.js';
|
|
6
|
-
import { MistralAdapter } from './mistral.js';
|
|
7
|
-
import { GroqAdapter } from './groq.js';
|
|
8
|
-
import { TogetherAdapter } from './together.js';
|
|
9
|
-
import { OllamaAdapter } from './ollama.js';
|
|
10
|
-
|
|
11
|
-
export function createLLMAdapter(config: LLMConfig): LLMAdapter {
|
|
12
|
-
switch (config.provider) {
|
|
13
|
-
case 'openai':
|
|
14
|
-
return new OpenAIAdapter(config);
|
|
15
|
-
case 'anthropic':
|
|
16
|
-
return new AnthropicAdapter(config);
|
|
17
|
-
case 'google':
|
|
18
|
-
return new GoogleAdapter(config);
|
|
19
|
-
case 'deepseek':
|
|
20
|
-
return new DeepSeekAdapter(config);
|
|
21
|
-
case 'mistral':
|
|
22
|
-
return new MistralAdapter(config);
|
|
23
|
-
case 'groq':
|
|
24
|
-
return new GroqAdapter(config);
|
|
25
|
-
case 'together':
|
|
26
|
-
return new TogetherAdapter(config);
|
|
27
|
-
case 'ollama':
|
|
28
|
-
return new OllamaAdapter(config);
|
|
29
|
-
default:
|
|
30
|
-
throw new Error(`Unknown LLM provider: ${config.provider}`);
|
|
31
|
-
}
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
export { BaseLLMAdapter } from './base.js';
|
|
35
|
-
export { OpenAIAdapter } from './openai.js';
|
|
36
|
-
export { AnthropicAdapter } from './anthropic.js';
|
|
37
|
-
export { GoogleAdapter } from './google.js';
|
|
38
|
-
export { DeepSeekAdapter } from './deepseek.js';
|
|
39
|
-
export { MistralAdapter } from './mistral.js';
|
|
40
|
-
export { GroqAdapter } from './groq.js';
|
|
41
|
-
export { TogetherAdapter } from './together.js';
|
|
42
|
-
export { OllamaAdapter } from './ollama.js';
|
package/src/llm/mistral.ts
DELETED
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class MistralAdapter extends BaseLLMAdapter {
|
|
5
|
-
constructor(config: LLMConfig) {
|
|
6
|
-
super(config);
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
10
|
-
const res = await this.fetchWithTimeout('https://api.mistral.ai/v1/chat/completions', {
|
|
11
|
-
method: 'POST',
|
|
12
|
-
headers: {
|
|
13
|
-
'Content-Type': 'application/json',
|
|
14
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
15
|
-
},
|
|
16
|
-
body: JSON.stringify({
|
|
17
|
-
model: this.config.model || getDefaultModel('mistral'),
|
|
18
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
19
|
-
temperature: this.getTemperature(),
|
|
20
|
-
max_tokens: this.getMaxTokens(),
|
|
21
|
-
}),
|
|
22
|
-
});
|
|
23
|
-
|
|
24
|
-
if (!res.ok) {
|
|
25
|
-
const body = await res.text();
|
|
26
|
-
throw new Error(`Mistral API error ${res.status}: ${body}`);
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
const data = await res.json() as {
|
|
30
|
-
choices: { message: { content: string } }[];
|
|
31
|
-
usage?: { prompt_tokens: number; completion_tokens: number };
|
|
32
|
-
};
|
|
33
|
-
|
|
34
|
-
return {
|
|
35
|
-
content: data.choices[0]?.message?.content || '',
|
|
36
|
-
tokens: data.usage
|
|
37
|
-
? { input: data.usage.prompt_tokens, output: data.usage.completion_tokens }
|
|
38
|
-
: undefined,
|
|
39
|
-
};
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
getMetadata(): LLMMetadata {
|
|
43
|
-
return {
|
|
44
|
-
provider: 'mistral',
|
|
45
|
-
model: this.config.model || getDefaultModel('mistral'),
|
|
46
|
-
};
|
|
47
|
-
}
|
|
48
|
-
}
|
package/src/llm/ollama.ts
DELETED
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class OllamaAdapter extends BaseLLMAdapter {
|
|
5
|
-
private endpoint: string;
|
|
6
|
-
|
|
7
|
-
constructor(config: LLMConfig) {
|
|
8
|
-
super(config);
|
|
9
|
-
this.endpoint = config.endpoint || 'http://localhost:11434';
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
13
|
-
const res = await this.fetchWithTimeout(`${this.endpoint}/api/chat`, {
|
|
14
|
-
method: 'POST',
|
|
15
|
-
headers: { 'Content-Type': 'application/json' },
|
|
16
|
-
body: JSON.stringify({
|
|
17
|
-
model: this.config.model || getDefaultModel('ollama'),
|
|
18
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
19
|
-
stream: false,
|
|
20
|
-
options: {
|
|
21
|
-
temperature: this.getTemperature(),
|
|
22
|
-
num_predict: this.getMaxTokens(),
|
|
23
|
-
},
|
|
24
|
-
}),
|
|
25
|
-
});
|
|
26
|
-
|
|
27
|
-
if (!res.ok) {
|
|
28
|
-
const body = await res.text();
|
|
29
|
-
throw new Error(`Ollama error ${res.status}: ${body}`);
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
const data = await res.json() as {
|
|
33
|
-
message: { content: string };
|
|
34
|
-
prompt_eval_count?: number;
|
|
35
|
-
eval_count?: number;
|
|
36
|
-
};
|
|
37
|
-
|
|
38
|
-
return {
|
|
39
|
-
content: data.message?.content || '',
|
|
40
|
-
tokens: data.prompt_eval_count
|
|
41
|
-
? { input: data.prompt_eval_count, output: data.eval_count || 0 }
|
|
42
|
-
: undefined,
|
|
43
|
-
};
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
getMetadata(): LLMMetadata {
|
|
47
|
-
return {
|
|
48
|
-
provider: 'ollama',
|
|
49
|
-
model: this.config.model || getDefaultModel('ollama'),
|
|
50
|
-
};
|
|
51
|
-
}
|
|
52
|
-
}
|
package/src/llm/openai.ts
DELETED
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
import { getDefaultModel, shouldUseOpenAIResponses, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
|
|
2
|
-
import { BaseLLMAdapter } from './base.js';
|
|
3
|
-
|
|
4
|
-
export class OpenAIAdapter extends BaseLLMAdapter {
|
|
5
|
-
private endpoint: string;
|
|
6
|
-
|
|
7
|
-
constructor(config: LLMConfig) {
|
|
8
|
-
super(config);
|
|
9
|
-
this.endpoint = config.endpoint || 'https://api.openai.com/v1';
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
|
|
13
|
-
const model = this.config.model || getDefaultModel('openai');
|
|
14
|
-
if (shouldUseOpenAIResponses(model)) {
|
|
15
|
-
return this.chatResponses(model, messages);
|
|
16
|
-
}
|
|
17
|
-
|
|
18
|
-
const res = await this.fetchWithTimeout(`${this.endpoint}/chat/completions`, {
|
|
19
|
-
method: 'POST',
|
|
20
|
-
headers: {
|
|
21
|
-
'Content-Type': 'application/json',
|
|
22
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
23
|
-
},
|
|
24
|
-
body: JSON.stringify({
|
|
25
|
-
model,
|
|
26
|
-
messages: messages.map(m => ({ role: m.role, content: m.content })),
|
|
27
|
-
temperature: this.getTemperature(),
|
|
28
|
-
max_tokens: this.getMaxTokens(),
|
|
29
|
-
}),
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
if (!res.ok) {
|
|
33
|
-
const body = await res.text();
|
|
34
|
-
throw new Error(`OpenAI API error ${res.status}: ${body}`);
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
const data = await res.json() as {
|
|
38
|
-
choices: { message: { content: string } }[];
|
|
39
|
-
usage?: { prompt_tokens: number; completion_tokens: number };
|
|
40
|
-
};
|
|
41
|
-
|
|
42
|
-
return {
|
|
43
|
-
content: data.choices[0]?.message?.content || '',
|
|
44
|
-
tokens: data.usage
|
|
45
|
-
? { input: data.usage.prompt_tokens, output: data.usage.completion_tokens }
|
|
46
|
-
: undefined,
|
|
47
|
-
};
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
private async chatResponses(model: string, messages: LLMMessage[]): Promise<LLMResponse> {
|
|
51
|
-
const res = await this.fetchWithTimeout(`${this.endpoint}/responses`, {
|
|
52
|
-
method: 'POST',
|
|
53
|
-
headers: {
|
|
54
|
-
'Content-Type': 'application/json',
|
|
55
|
-
Authorization: `Bearer ${this.config.apiKey}`,
|
|
56
|
-
},
|
|
57
|
-
body: JSON.stringify({
|
|
58
|
-
model,
|
|
59
|
-
input: messages.map(m => ({
|
|
60
|
-
role: m.role === 'system' ? 'developer' : m.role,
|
|
61
|
-
content: m.content,
|
|
62
|
-
})),
|
|
63
|
-
max_output_tokens: this.getMaxTokens(),
|
|
64
|
-
}),
|
|
65
|
-
});
|
|
66
|
-
|
|
67
|
-
if (!res.ok) {
|
|
68
|
-
const body = await res.text();
|
|
69
|
-
throw new Error(`OpenAI API error ${res.status}: ${body}`);
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
const data = await res.json() as {
|
|
73
|
-
output_text?: string;
|
|
74
|
-
output?: Array<{ content?: Array<{ text?: string }> }>;
|
|
75
|
-
usage?: { input_tokens?: number; output_tokens?: number };
|
|
76
|
-
};
|
|
77
|
-
|
|
78
|
-
return {
|
|
79
|
-
content: data.output_text
|
|
80
|
-
|| data.output?.flatMap(item => item.content?.map(content => content.text || '') || []).join('')
|
|
81
|
-
|| '',
|
|
82
|
-
tokens: data.usage
|
|
83
|
-
? { input: data.usage.input_tokens || 0, output: data.usage.output_tokens || 0 }
|
|
84
|
-
: undefined,
|
|
85
|
-
};
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
getMetadata(): LLMMetadata {
|
|
89
|
-
return {
|
|
90
|
-
provider: 'openai',
|
|
91
|
-
model: this.config.model || getDefaultModel('openai'),
|
|
92
|
-
};
|
|
93
|
-
}
|
|
94
|
-
}
|