@exagent/agent 0.3.5 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -9,7 +9,7 @@ import {
9
9
  updateSecureStore,
10
10
  writeConfigFile,
11
11
  writeSampleConfig
12
- } from "./chunk-WTECTX2Z.js";
12
+ } from "./chunk-ZRAOPQQW.js";
13
13
 
14
14
  // src/cli.ts
15
15
  import { Command } from "commander";
@@ -80,85 +80,13 @@ function printError(message) {
80
80
  }
81
81
 
82
82
  // src/llm-providers.ts
83
- var LLM_PROVIDERS = [
84
- {
85
- id: "openai",
86
- label: "OpenAI",
87
- models: [
88
- { id: "gpt-5.2", label: "GPT-5.2" },
89
- { id: "gpt-5.2-pro", label: "GPT-5.2 Pro" },
90
- { id: "gpt-5-mini", label: "GPT-5 Mini" },
91
- { id: "gpt-5-nano", label: "GPT-5 Nano" },
92
- { id: "gpt-4o", label: "GPT-4o" },
93
- { id: "gpt-4o-mini", label: "GPT-4o Mini" }
94
- ]
95
- },
96
- {
97
- id: "anthropic",
98
- label: "Anthropic",
99
- models: [
100
- { id: "claude-opus-4-6", label: "Claude Opus 4.6" },
101
- { id: "claude-sonnet-4-6", label: "Claude Sonnet 4.6" },
102
- { id: "claude-haiku-4-5", label: "Claude Haiku 4.5" }
103
- ]
104
- },
105
- {
106
- id: "google",
107
- label: "Google",
108
- models: [
109
- { id: "gemini-3-pro", label: "Gemini 3 Pro" },
110
- { id: "gemini-3-flash", label: "Gemini 3 Flash" },
111
- { id: "gemini-2.5-pro", label: "Gemini 2.5 Pro" },
112
- { id: "gemini-2.5-flash", label: "Gemini 2.5 Flash" },
113
- { id: "gemini-2.5-flash-lite", label: "Gemini 2.5 Flash Lite" }
114
- ]
115
- },
116
- {
117
- id: "deepseek",
118
- label: "DeepSeek",
119
- models: [
120
- { id: "deepseek-chat", label: "DeepSeek Chat" },
121
- { id: "deepseek-reasoner", label: "DeepSeek Reasoner" }
122
- ]
123
- },
124
- {
125
- id: "mistral",
126
- label: "Mistral",
127
- models: [
128
- { id: "mistral-large-latest", label: "Mistral Large" },
129
- { id: "mistral-small-latest", label: "Mistral Small" }
130
- ]
131
- },
132
- {
133
- id: "groq",
134
- label: "Groq",
135
- models: [
136
- { id: "llama-3.3-70b-versatile", label: "Llama 3.3 70B" },
137
- { id: "llama-3.1-8b-instant", label: "Llama 3.1 8B" },
138
- { id: "mixtral-8x7b-32768", label: "Mixtral 8x7B" }
139
- ]
140
- },
141
- {
142
- id: "together",
143
- label: "Together",
144
- models: [
145
- { id: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", label: "Llama 3.1 70B" },
146
- { id: "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", label: "Llama 3.1 8B" }
147
- ]
148
- },
149
- {
150
- id: "ollama",
151
- label: "Ollama (local)",
152
- models: [
153
- { id: "llama3.1", label: "Llama 3.1" },
154
- { id: "mistral", label: "Mistral" },
155
- { id: "custom", label: "Custom (type model name)" }
156
- ]
157
- }
158
- ];
159
- function getProvider(id) {
160
- return LLM_PROVIDERS.find((p) => p.id === id);
161
- }
83
+ import {
84
+ LLM_PROVIDERS,
85
+ getDefaultModel,
86
+ getProvider,
87
+ getProviderIds,
88
+ providerRequiresApiKey
89
+ } from "@exagent/sdk";
162
90
 
163
91
  // src/setup.ts
164
92
  function expandHomeDir(path) {
@@ -261,7 +189,9 @@ async function setupLlm(config) {
261
189
  const apiKey2 = process.env.EXAGENT_LLM_KEY;
262
190
  if (!provider2) throw new Error("EXAGENT_LLM_PROVIDER required in non-interactive mode");
263
191
  if (!model2) throw new Error("EXAGENT_LLM_MODEL required in non-interactive mode");
264
- if (!apiKey2) throw new Error("EXAGENT_LLM_KEY required in non-interactive mode");
192
+ if (providerRequiresApiKey(provider2) && !apiKey2) {
193
+ throw new Error("EXAGENT_LLM_KEY required in non-interactive mode");
194
+ }
265
195
  printDone("LLM configured");
266
196
  return { provider: provider2, model: model2, apiKey: apiKey2 };
267
197
  }
@@ -276,7 +206,7 @@ async function setupLlm(config) {
276
206
  const provider = selected;
277
207
  const defaultModel = config.llm?.model;
278
208
  const providerInfo = getProvider(provider);
279
- const modelOptions = providerInfo ? providerInfo.models.map((m) => ({ value: m.id, label: m.label })) : [{ value: defaultModel || "gpt-4o", label: defaultModel || "gpt-4o" }];
209
+ const modelOptions = providerInfo ? providerInfo.models.map((m) => ({ value: m.id, label: m.label })) : [{ value: defaultModel || getDefaultModel("openai"), label: defaultModel || getDefaultModel("openai") }];
280
210
  const selectedModel = await clack.select({
281
211
  message: "LLM model:",
282
212
  options: modelOptions,
@@ -284,11 +214,17 @@ async function setupLlm(config) {
284
214
  });
285
215
  if (clack.isCancel(selectedModel)) cancelled();
286
216
  const model = selectedModel;
287
- const apiKey = await clack.password({
288
- message: "LLM API key:",
289
- validate: (val) => validateLlmKeyFormat(provider, val)
290
- });
291
- if (clack.isCancel(apiKey)) cancelled();
217
+ let apiKey;
218
+ if (providerRequiresApiKey(provider)) {
219
+ const enteredApiKey = await clack.password({
220
+ message: "LLM API key:",
221
+ validate: (val) => validateLlmKeyFormat(provider, val)
222
+ });
223
+ if (clack.isCancel(enteredApiKey)) cancelled();
224
+ apiKey = enteredApiKey;
225
+ } else {
226
+ printInfo("Ollama uses your local server; no API key needed.");
227
+ }
292
228
  printDone("LLM configured");
293
229
  return { provider, model, apiKey };
294
230
  }
@@ -558,16 +494,22 @@ program.command("config").description("Change LLM provider, model, or API key").
558
494
  }
559
495
  newModel = selectedModel;
560
496
  }
561
- const newKey = await clack2.password({
562
- message: "New LLM API key:",
563
- validate: (val) => {
564
- if (!val?.trim()) return "API key is required.";
565
- if (val.length < 10) return "API key seems too short.";
497
+ let newKey;
498
+ if (providerRequiresApiKey(newProvider)) {
499
+ const enteredKey = await clack2.password({
500
+ message: "New LLM API key:",
501
+ validate: (val) => {
502
+ if (!val?.trim()) return "API key is required.";
503
+ if (val.length < 10) return "API key seems too short.";
504
+ }
505
+ });
506
+ if (clack2.isCancel(enteredKey)) {
507
+ clack2.cancel("Cancelled.");
508
+ process.exit(0);
566
509
  }
567
- });
568
- if (clack2.isCancel(newKey)) {
569
- clack2.cancel("Cancelled.");
570
- process.exit(0);
510
+ newKey = enteredKey;
511
+ } else {
512
+ printInfo("Ollama uses your local server; no API key needed.");
571
513
  }
572
514
  updateSecureStore(secureStorePath, password3, { llmApiKey: newKey });
573
515
  const updatedConfig = readConfigFile(opts.config);
@@ -581,7 +523,7 @@ program.command("config").description("Change LLM provider, model, or API key").
581
523
  printSuccess("Updated", [
582
524
  `${pc.dim("Provider:")} ${pc.cyan(newProvider)}`,
583
525
  `${pc.dim("Model:")} ${pc.cyan(newModel)}`,
584
- `${pc.dim("API key:")} ${pc.dim(`${newKey.slice(0, 7)}...${newKey.slice(-4)}`)}`,
526
+ `${pc.dim("API key:")} ${newKey ? pc.dim(`${newKey.slice(0, 7)}...${newKey.slice(-4)}`) : pc.dim("not required")}`,
585
527
  "",
586
528
  `Run ${pc.cyan("npx exagent run")} to start with the new configuration.`
587
529
  ]);
package/dist/index.js CHANGED
@@ -43,7 +43,7 @@ import {
43
43
  loadConfig,
44
44
  loadStrategy,
45
45
  writeSampleConfig
46
- } from "./chunk-WTECTX2Z.js";
46
+ } from "./chunk-ZRAOPQQW.js";
47
47
  export {
48
48
  AcrossAdapter,
49
49
  AerodromeAdapter,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@exagent/agent",
3
- "version": "0.3.5",
3
+ "version": "0.3.6",
4
4
  "type": "module",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -13,31 +13,31 @@
13
13
  "types": "./dist/index.d.ts"
14
14
  }
15
15
  },
16
- "scripts": {
17
- "build": "tsup src/index.ts src/cli.ts --format esm --dts",
18
- "dev": "tsup src/index.ts src/cli.ts --format esm --dts --watch"
19
- },
20
16
  "dependencies": {
21
17
  "@clack/prompts": "^1.1.0",
22
- "@exagent/sdk": "^0.2.1",
23
- "@polymarket/clob-client": "^4.0.0",
18
+ "@polymarket/clob-client": "^5.8.1",
24
19
  "boxen": "^8.0.1",
25
- "commander": "^12.0.0",
26
- "ethers": "^5.7.2",
20
+ "commander": "^12.1.0",
27
21
  "figlet": "^1.10.0",
28
22
  "gradient-string": "^3.0.0",
29
23
  "picocolors": "^1.1.1",
30
- "viem": "^2.21.0",
31
- "ws": "^8.16.0",
32
- "zod": "^3.22.0"
24
+ "viem": "^2.48.11",
25
+ "ws": "^8.20.0",
26
+ "zod": "^3.25.76",
27
+ "@exagent/sdk": "0.2.2"
33
28
  },
34
29
  "devDependencies": {
35
30
  "@types/figlet": "^1.7.0",
36
31
  "@types/gradient-string": "^1.1.6",
37
- "@types/node": "^20.0.0",
38
- "@types/ws": "^8.5.0",
39
- "tsup": "^8.0.0",
40
- "tsx": "^4.0.0",
41
- "typescript": "^5.6.0"
32
+ "@types/node": "^22.19.18",
33
+ "@types/ws": "^8.18.1",
34
+ "tsup": "^8.5.1",
35
+ "tsx": "^4.21.0",
36
+ "typescript": "^5.9.3"
37
+ },
38
+ "scripts": {
39
+ "build": "tsup src/index.ts src/cli.ts --format esm --dts",
40
+ "dev": "tsup src/index.ts src/cli.ts --format esm --dts --watch",
41
+ "test": "tsx --test test/**/*.test.ts"
42
42
  }
43
- }
43
+ }
package/src/cli.ts CHANGED
@@ -120,7 +120,7 @@ program
120
120
  }
121
121
  });
122
122
 
123
- import { LLM_PROVIDERS, getProvider } from './llm-providers.js';
123
+ import { LLM_PROVIDERS, getProvider, providerRequiresApiKey } from './llm-providers.js';
124
124
 
125
125
  program
126
126
  .command('config')
@@ -202,16 +202,22 @@ program
202
202
  newModel = selectedModel;
203
203
  }
204
204
 
205
- const newKey = await clack.password({
206
- message: 'New LLM API key:',
207
- validate: (val) => {
208
- if (!val?.trim()) return 'API key is required.';
209
- if (val.length < 10) return 'API key seems too short.';
210
- },
211
- });
212
- if (clack.isCancel(newKey)) {
213
- clack.cancel('Cancelled.');
214
- process.exit(0);
205
+ let newKey: string | undefined;
206
+ if (providerRequiresApiKey(newProvider)) {
207
+ const enteredKey = await clack.password({
208
+ message: 'New LLM API key:',
209
+ validate: (val) => {
210
+ if (!val?.trim()) return 'API key is required.';
211
+ if (val.length < 10) return 'API key seems too short.';
212
+ },
213
+ });
214
+ if (clack.isCancel(enteredKey)) {
215
+ clack.cancel('Cancelled.');
216
+ process.exit(0);
217
+ }
218
+ newKey = enteredKey;
219
+ } else {
220
+ printInfo('Ollama uses your local server; no API key needed.');
215
221
  }
216
222
 
217
223
  // Update secure store with new API key
@@ -231,7 +237,7 @@ program
231
237
  printSuccess('Updated', [
232
238
  `${pc.dim('Provider:')} ${pc.cyan(newProvider)}`,
233
239
  `${pc.dim('Model:')} ${pc.cyan(newModel)}`,
234
- `${pc.dim('API key:')} ${pc.dim(`${newKey.slice(0, 7)}...${newKey.slice(-4)}`)}`,
240
+ `${pc.dim('API key:')} ${newKey ? pc.dim(`${newKey.slice(0, 7)}...${newKey.slice(-4)}`) : pc.dim('not required')}`,
235
241
  '',
236
242
  `Run ${pc.cyan('npx exagent run')} to start with the new configuration.`,
237
243
  ]);
package/src/config.ts CHANGED
@@ -3,6 +3,7 @@ import { chmodSync, existsSync, readFileSync, writeFileSync } from 'node:fs';
3
3
  import { homedir } from 'node:os';
4
4
  import { dirname, resolve } from 'node:path';
5
5
  import { z } from 'zod';
6
+ import { LLM_PROVIDER_IDS, providerRequiresApiKey } from '@exagent/sdk';
6
7
  import type { LLMProvider } from '@exagent/sdk';
7
8
 
8
9
  export interface RuntimeConfig {
@@ -115,7 +116,7 @@ export interface LoadConfigOptions {
115
116
  getSecretPassword?: () => Promise<string>;
116
117
  }
117
118
 
118
- const providerEnum = z.enum(['openai', 'anthropic', 'google', 'deepseek', 'mistral', 'groq', 'together', 'ollama']);
119
+ const providerEnum = z.enum(LLM_PROVIDER_IDS);
119
120
 
120
121
  const runtimeSchema = z.object({
121
122
  agentId: z.string(),
@@ -379,7 +380,9 @@ export async function loadConfig(path: string = 'agent-config.json', options: Lo
379
380
  config.wallet = { privateKey: process.env.EXAGENT_WALLET_PRIVATE_KEY };
380
381
  }
381
382
 
382
- if ((!config.apiToken || !llm.apiKey || !config.wallet) && parsed.secrets?.secureStorePath) {
383
+ const llmNeedsApiKey = providerRequiresApiKey(String(llm.provider || ''));
384
+
385
+ if ((!config.apiToken || (llmNeedsApiKey && !llm.apiKey) || !config.wallet) && parsed.secrets?.secureStorePath) {
383
386
  const password = process.env.EXAGENT_SECRET_PASSWORD || await options.getSecretPassword?.();
384
387
  if (!password) {
385
388
  throw new Error('Encrypted secret store found, but no password was provided.');
@@ -391,7 +394,7 @@ export async function loadConfig(path: string = 'agent-config.json', options: Lo
391
394
  if (!llm.apiKey && secrets.llmApiKey) llm.apiKey = secrets.llmApiKey;
392
395
  }
393
396
 
394
- if ((!config.apiToken || !llm.apiKey || !config.wallet) && parsed.secrets?.bootstrapToken && !parsed.secrets?.secureStorePath) {
397
+ if ((!config.apiToken || (llmNeedsApiKey && !llm.apiKey) || !config.wallet) && parsed.secrets?.bootstrapToken && !parsed.secrets?.secureStorePath) {
395
398
  throw new Error(`Config ${path} still requires first-time secure setup. Run 'exagent setup --config ${path}' or start the agent interactively.`);
396
399
  }
397
400
 
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class AnthropicAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class AnthropicAdapter extends BaseLLMAdapter {
14
14
  const nonSystemMessages = messages.filter(m => m.role !== 'system');
15
15
 
16
16
  const body: Record<string, unknown> = {
17
- model: this.config.model || 'claude-sonnet-4-20250514',
17
+ model: this.config.model || getDefaultModel('anthropic'),
18
18
  messages: nonSystemMessages.map(m => ({ role: m.role, content: m.content })),
19
19
  max_tokens: this.getMaxTokens(),
20
20
  temperature: this.getTemperature(),
@@ -57,7 +57,7 @@ export class AnthropicAdapter extends BaseLLMAdapter {
57
57
  getMetadata(): LLMMetadata {
58
58
  return {
59
59
  provider: 'anthropic',
60
- model: this.config.model || 'claude-sonnet-4-20250514',
60
+ model: this.config.model || getDefaultModel('anthropic'),
61
61
  };
62
62
  }
63
63
  }
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class DeepSeekAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class DeepSeekAdapter extends BaseLLMAdapter {
14
14
  Authorization: `Bearer ${this.config.apiKey}`,
15
15
  },
16
16
  body: JSON.stringify({
17
- model: this.config.model || 'deepseek-chat',
17
+ model: this.config.model || getDefaultModel('deepseek'),
18
18
  messages: messages.map(m => ({ role: m.role, content: m.content })),
19
19
  temperature: this.getTemperature(),
20
20
  max_tokens: this.getMaxTokens(),
@@ -42,7 +42,7 @@ export class DeepSeekAdapter extends BaseLLMAdapter {
42
42
  getMetadata(): LLMMetadata {
43
43
  return {
44
44
  provider: 'deepseek',
45
- model: this.config.model || 'deepseek-chat',
45
+ model: this.config.model || getDefaultModel('deepseek'),
46
46
  };
47
47
  }
48
48
  }
package/src/llm/google.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class GoogleAdapter extends BaseLLMAdapter {
@@ -7,7 +7,7 @@ export class GoogleAdapter extends BaseLLMAdapter {
7
7
  }
8
8
 
9
9
  protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
10
- const model = this.config.model || 'gemini-2.5-flash';
10
+ const model = this.config.model || getDefaultModel('google');
11
11
  const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${this.config.apiKey}`;
12
12
 
13
13
  const systemMessage = messages.find(m => m.role === 'system');
@@ -57,7 +57,7 @@ export class GoogleAdapter extends BaseLLMAdapter {
57
57
  getMetadata(): LLMMetadata {
58
58
  return {
59
59
  provider: 'google',
60
- model: this.config.model || 'gemini-2.5-flash',
60
+ model: this.config.model || getDefaultModel('google'),
61
61
  };
62
62
  }
63
63
  }
package/src/llm/groq.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class GroqAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class GroqAdapter extends BaseLLMAdapter {
14
14
  Authorization: `Bearer ${this.config.apiKey}`,
15
15
  },
16
16
  body: JSON.stringify({
17
- model: this.config.model || 'llama-3.3-70b-versatile',
17
+ model: this.config.model || getDefaultModel('groq'),
18
18
  messages: messages.map(m => ({ role: m.role, content: m.content })),
19
19
  temperature: this.getTemperature(),
20
20
  max_tokens: this.getMaxTokens(),
@@ -42,7 +42,7 @@ export class GroqAdapter extends BaseLLMAdapter {
42
42
  getMetadata(): LLMMetadata {
43
43
  return {
44
44
  provider: 'groq',
45
- model: this.config.model || 'llama-3.3-70b-versatile',
45
+ model: this.config.model || getDefaultModel('groq'),
46
46
  };
47
47
  }
48
48
  }
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class MistralAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class MistralAdapter extends BaseLLMAdapter {
14
14
  Authorization: `Bearer ${this.config.apiKey}`,
15
15
  },
16
16
  body: JSON.stringify({
17
- model: this.config.model || 'mistral-large-latest',
17
+ model: this.config.model || getDefaultModel('mistral'),
18
18
  messages: messages.map(m => ({ role: m.role, content: m.content })),
19
19
  temperature: this.getTemperature(),
20
20
  max_tokens: this.getMaxTokens(),
@@ -42,7 +42,7 @@ export class MistralAdapter extends BaseLLMAdapter {
42
42
  getMetadata(): LLMMetadata {
43
43
  return {
44
44
  provider: 'mistral',
45
- model: this.config.model || 'mistral-large-latest',
45
+ model: this.config.model || getDefaultModel('mistral'),
46
46
  };
47
47
  }
48
48
  }
package/src/llm/ollama.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class OllamaAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class OllamaAdapter extends BaseLLMAdapter {
14
14
  method: 'POST',
15
15
  headers: { 'Content-Type': 'application/json' },
16
16
  body: JSON.stringify({
17
- model: this.config.model || 'llama3.3',
17
+ model: this.config.model || getDefaultModel('ollama'),
18
18
  messages: messages.map(m => ({ role: m.role, content: m.content })),
19
19
  stream: false,
20
20
  options: {
@@ -46,7 +46,7 @@ export class OllamaAdapter extends BaseLLMAdapter {
46
46
  getMetadata(): LLMMetadata {
47
47
  return {
48
48
  provider: 'ollama',
49
- model: this.config.model || 'llama3.3',
49
+ model: this.config.model || getDefaultModel('ollama'),
50
50
  };
51
51
  }
52
52
  }
package/src/llm/openai.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, shouldUseOpenAIResponses, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class OpenAIAdapter extends BaseLLMAdapter {
@@ -10,6 +10,11 @@ export class OpenAIAdapter extends BaseLLMAdapter {
10
10
  }
11
11
 
12
12
  protected async chatImpl(messages: LLMMessage[]): Promise<LLMResponse> {
13
+ const model = this.config.model || getDefaultModel('openai');
14
+ if (shouldUseOpenAIResponses(model)) {
15
+ return this.chatResponses(model, messages);
16
+ }
17
+
13
18
  const res = await this.fetchWithTimeout(`${this.endpoint}/chat/completions`, {
14
19
  method: 'POST',
15
20
  headers: {
@@ -17,7 +22,7 @@ export class OpenAIAdapter extends BaseLLMAdapter {
17
22
  Authorization: `Bearer ${this.config.apiKey}`,
18
23
  },
19
24
  body: JSON.stringify({
20
- model: this.config.model || 'gpt-4o',
25
+ model,
21
26
  messages: messages.map(m => ({ role: m.role, content: m.content })),
22
27
  temperature: this.getTemperature(),
23
28
  max_tokens: this.getMaxTokens(),
@@ -42,10 +47,48 @@ export class OpenAIAdapter extends BaseLLMAdapter {
42
47
  };
43
48
  }
44
49
 
50
+ private async chatResponses(model: string, messages: LLMMessage[]): Promise<LLMResponse> {
51
+ const res = await this.fetchWithTimeout(`${this.endpoint}/responses`, {
52
+ method: 'POST',
53
+ headers: {
54
+ 'Content-Type': 'application/json',
55
+ Authorization: `Bearer ${this.config.apiKey}`,
56
+ },
57
+ body: JSON.stringify({
58
+ model,
59
+ input: messages.map(m => ({
60
+ role: m.role === 'system' ? 'developer' : m.role,
61
+ content: m.content,
62
+ })),
63
+ max_output_tokens: this.getMaxTokens(),
64
+ }),
65
+ });
66
+
67
+ if (!res.ok) {
68
+ const body = await res.text();
69
+ throw new Error(`OpenAI API error ${res.status}: ${body}`);
70
+ }
71
+
72
+ const data = await res.json() as {
73
+ output_text?: string;
74
+ output?: Array<{ content?: Array<{ text?: string }> }>;
75
+ usage?: { input_tokens?: number; output_tokens?: number };
76
+ };
77
+
78
+ return {
79
+ content: data.output_text
80
+ || data.output?.flatMap(item => item.content?.map(content => content.text || '') || []).join('')
81
+ || '',
82
+ tokens: data.usage
83
+ ? { input: data.usage.input_tokens || 0, output: data.usage.output_tokens || 0 }
84
+ : undefined,
85
+ };
86
+ }
87
+
45
88
  getMetadata(): LLMMetadata {
46
89
  return {
47
90
  provider: 'openai',
48
- model: this.config.model || 'gpt-4o',
91
+ model: this.config.model || getDefaultModel('openai'),
49
92
  };
50
93
  }
51
94
  }
@@ -1,4 +1,4 @@
1
- import type { LLMMessage, LLMResponse, LLMMetadata, LLMConfig } from '@exagent/sdk';
1
+ import { getDefaultModel, type LLMMessage, type LLMResponse, type LLMMetadata, type LLMConfig } from '@exagent/sdk';
2
2
  import { BaseLLMAdapter } from './base.js';
3
3
 
4
4
  export class TogetherAdapter extends BaseLLMAdapter {
@@ -14,7 +14,7 @@ export class TogetherAdapter extends BaseLLMAdapter {
14
14
  Authorization: `Bearer ${this.config.apiKey}`,
15
15
  },
16
16
  body: JSON.stringify({
17
- model: this.config.model || 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
17
+ model: this.config.model || getDefaultModel('together'),
18
18
  messages: messages.map(m => ({ role: m.role, content: m.content })),
19
19
  temperature: this.getTemperature(),
20
20
  max_tokens: this.getMaxTokens(),
@@ -42,7 +42,7 @@ export class TogetherAdapter extends BaseLLMAdapter {
42
42
  getMetadata(): LLMMetadata {
43
43
  return {
44
44
  provider: 'together',
45
- model: this.config.model || 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
45
+ model: this.config.model || getDefaultModel('together'),
46
46
  };
47
47
  }
48
48
  }