@link-assistant/agent 0.5.2 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@link-assistant/agent",
3
- "version": "0.5.2",
3
+ "version": "0.5.3",
4
4
  "description": "A minimal, public domain AI CLI agent compatible with OpenCode's JSON interface. Bun-only runtime.",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
@@ -82,6 +82,7 @@
82
82
  "hono-openapi": "^1.1.1",
83
83
  "ignore": "^7.0.5",
84
84
  "jsonc-parser": "^3.3.1",
85
+ "lino-objects-codec": "^0.1.1",
85
86
  "minimatch": "^10.1.1",
86
87
  "open": "^11.0.0",
87
88
  "partial-json": "^0.1.7",
@@ -0,0 +1,259 @@
1
+ /**
2
+ * Cache Provider - A synthetic provider for caching API responses
3
+ *
4
+ * This provider caches API responses to enable deterministic testing.
5
+ * When a response is not cached, it falls back to the echo provider behavior.
6
+ * Cached responses are stored using Links Notation format (.lino files).
7
+ *
8
+ * Usage:
9
+ * agent --model link-assistant/cache/opencode -p "hello" # Uses cached responses
10
+ *
11
+ * Cache location: ./data/api-cache/{provider}/{model}/
12
+ * Format: Links Notation files with .lino extension
13
+ *
14
+ * @see https://github.com/link-assistant/agent/issues/89
15
+ * @see https://github.com/link-foundation/lino-objects-codec
16
+ */
17
+
18
+ import type { LanguageModelV2, LanguageModelV2CallOptions } from 'ai';
19
+ import { Log } from '../util/log';
20
+ import { createEchoModel } from './echo';
21
+ import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
22
+ import { dirname, join } from 'path';
23
+ import { fileURLToPath } from 'url';
24
+ // @ts-ignore - lino-objects-codec is a JavaScript library
25
+ import { encode, decode } from 'lino-objects-codec';
26
+
27
+ const log = Log.create({ service: 'provider.cache' });
28
+
29
+ const __dirname = dirname(fileURLToPath(import.meta.url));
30
+ const CACHE_ROOT = join(__dirname, '../../data/api-cache');
31
+
32
+ /**
33
+ * Generate a cache key from the prompt
34
+ */
35
+ function generateCacheKey(
36
+ prompt: LanguageModelV2CallOptions['prompt']
37
+ ): string {
38
+ // Simple hash of the prompt content
39
+ const content = JSON.stringify(prompt);
40
+ let hash = 0;
41
+ for (let i = 0; i < content.length; i++) {
42
+ const char = content.charCodeAt(i);
43
+ hash = (hash << 5) - hash + char;
44
+ hash = hash & hash; // Convert to 32-bit integer
45
+ }
46
+ return Math.abs(hash).toString(36);
47
+ }
48
+
49
+ /**
50
+ * Get cache file path for a provider/model combination
51
+ * Uses .lino extension for Links Notation format
52
+ */
53
+ function getCachePath(provider: string, model: string, key: string): string {
54
+ return join(CACHE_ROOT, provider, model, `${key}.lino`);
55
+ }
56
+
57
+ /**
58
+ * Generate a unique ID for streaming parts
59
+ */
60
+ function generatePartId(): string {
61
+ return `cache_${Date.now()}_${Math.random().toString(36).substring(2, 8)}`;
62
+ }
63
+
64
+ /**
65
+ * Load cached response from file using Links Notation format
66
+ */
67
+ function loadCachedResponse(filePath: string): any | null {
68
+ try {
69
+ if (!existsSync(filePath)) {
70
+ return null;
71
+ }
72
+ const content = readFileSync(filePath, 'utf8');
73
+ // Decode from Links Notation format
74
+ return decode({ notation: content });
75
+ } catch (error: any) {
76
+ log.warn('Failed to load cached response', {
77
+ filePath,
78
+ error: error.message,
79
+ });
80
+ return null;
81
+ }
82
+ }
83
+
84
+ /**
85
+ * Save response to cache file using Links Notation format
86
+ */
87
+ function saveCachedResponse(filePath: string, response: any): void {
88
+ try {
89
+ const dir = dirname(filePath);
90
+ if (!existsSync(dir)) {
91
+ mkdirSync(dir, { recursive: true });
92
+ }
93
+ // Encode to Links Notation format
94
+ const encoded = encode({ obj: response });
95
+ writeFileSync(filePath, encoded, 'utf8');
96
+ log.info('Saved cached response', { filePath });
97
+ } catch (error: any) {
98
+ log.warn('Failed to save cached response', {
99
+ filePath,
100
+ error: error.message,
101
+ });
102
+ }
103
+ }
104
+
105
+ /**
106
+ * Creates a cache language model that stores/retrieves responses
107
+ * Implements LanguageModelV2 interface for AI SDK 6.x compatibility
108
+ */
109
+ export function createCacheModel(
110
+ providerId: string,
111
+ modelId: string
112
+ ): LanguageModelV2 {
113
+ const model: LanguageModelV2 = {
114
+ specificationVersion: 'v2',
115
+ provider: 'link-assistant',
116
+ modelId: `${providerId}/${modelId}`,
117
+
118
+ // No external URLs are supported by this synthetic provider
119
+ supportedUrls: {},
120
+
121
+ async doGenerate(options: LanguageModelV2CallOptions) {
122
+ const cacheKey = generateCacheKey(options.prompt);
123
+ const cachePath = getCachePath(providerId, modelId, cacheKey);
124
+
125
+ // Try to load from cache first
126
+ const cached = loadCachedResponse(cachePath);
127
+ if (cached) {
128
+ log.info('Using cached response', { providerId, modelId, cacheKey });
129
+ return cached;
130
+ }
131
+
132
+ // Fall back to echo behavior
133
+ log.info('No cached response, using echo fallback', {
134
+ providerId,
135
+ modelId,
136
+ cacheKey,
137
+ });
138
+ const echoModel = createEchoModel(`${providerId}/${modelId}`);
139
+ const response = await echoModel.doGenerate(options);
140
+
141
+ // Save to cache for future use
142
+ saveCachedResponse(cachePath, response);
143
+
144
+ return response;
145
+ },
146
+
147
+ async doStream(options: LanguageModelV2CallOptions) {
148
+ const cacheKey = generateCacheKey(options.prompt);
149
+ const cachePath = getCachePath(providerId, modelId, cacheKey);
150
+
151
+ // Try to load from cache first
152
+ const cached = loadCachedResponse(cachePath);
153
+ if (cached) {
154
+ log.info('Using cached streaming response', {
155
+ providerId,
156
+ modelId,
157
+ cacheKey,
158
+ });
159
+
160
+ // For cached responses, we need to simulate streaming
161
+ // Extract the text from the cached response
162
+ const echoText =
163
+ cached.content?.[0]?.text || cached.text || 'Cached response';
164
+ const textPartId = generatePartId();
165
+
166
+ // Create a ReadableStream with LanguageModelV2StreamPart format
167
+ const stream = new ReadableStream({
168
+ async start(controller) {
169
+ // Emit text-start
170
+ controller.enqueue({
171
+ type: 'text-start',
172
+ id: textPartId,
173
+ providerMetadata: undefined,
174
+ });
175
+
176
+ // Emit the text in chunks for realistic streaming behavior
177
+ const chunkSize = 10;
178
+ for (let i = 0; i < echoText.length; i += chunkSize) {
179
+ const chunk = echoText.slice(i, i + chunkSize);
180
+ controller.enqueue({
181
+ type: 'text-delta',
182
+ id: textPartId,
183
+ delta: chunk,
184
+ providerMetadata: undefined,
185
+ });
186
+ }
187
+
188
+ // Emit text-end
189
+ controller.enqueue({
190
+ type: 'text-end',
191
+ id: textPartId,
192
+ providerMetadata: undefined,
193
+ });
194
+
195
+ // Emit finish event
196
+ controller.enqueue({
197
+ type: 'finish',
198
+ finishReason: 'stop',
199
+ usage: cached.usage || {
200
+ promptTokens: Math.ceil(echoText.length / 4),
201
+ completionTokens: Math.ceil(echoText.length / 4),
202
+ },
203
+ providerMetadata: undefined,
204
+ });
205
+
206
+ controller.close();
207
+ },
208
+ });
209
+
210
+ return {
211
+ stream,
212
+ request: undefined,
213
+ response: undefined,
214
+ warnings: [],
215
+ };
216
+ }
217
+
218
+ // Fall back to echo streaming behavior
219
+ log.info('No cached streaming response, using echo fallback', {
220
+ providerId,
221
+ modelId,
222
+ cacheKey,
223
+ });
224
+ const echoModel = createEchoModel(`${providerId}/${modelId}`);
225
+ const response = await echoModel.doStream(options);
226
+
227
+ // Note: We don't cache streaming responses as they're consumed immediately
228
+ return response;
229
+ },
230
+ };
231
+
232
+ return model;
233
+ }
234
+
235
+ /**
236
+ * Cache provider factory function
237
+ */
238
+ export function createCacheProvider(options?: { name?: string }) {
239
+ return {
240
+ languageModel(modelId: string): LanguageModelV2 {
241
+ // Parse provider/model from modelId like "opencode/grok-code"
242
+ const parts = modelId.split('/');
243
+ if (parts.length < 2) {
244
+ throw new Error(
245
+ `Invalid cache model ID: ${modelId}. Expected format: provider/model`
246
+ );
247
+ }
248
+ const [providerId, ...modelParts] = parts;
249
+ const actualModelId = modelParts.join('/');
250
+
251
+ return createCacheModel(providerId, actualModelId);
252
+ },
253
+ textEmbeddingModel() {
254
+ throw new Error('Cache provider does not support text embeddings');
255
+ },
256
+ };
257
+ }
258
+
259
+ export const cacheProvider = createCacheProvider();
@@ -0,0 +1,174 @@
1
+ /**
2
+ * Echo Provider - A synthetic provider for testing dry-run mode
3
+ *
4
+ * This provider echoes back the user's input message without making actual API calls.
5
+ * It's designed to enable robust testing of round-trips and multi-turn conversations
6
+ * without incurring API costs.
7
+ *
8
+ * Usage:
9
+ * agent --dry-run -p "hello" # Uses echo provider automatically
10
+ * agent --model link-assistant/echo -p "hello" # Explicit usage
11
+ *
12
+ * The echo behavior follows the issue #89 specification:
13
+ * Input: "hi" -> Output: "hi"
14
+ * Input: "How are you?" -> Output: "How are you?"
15
+ */
16
+
17
+ import type { LanguageModelV2, LanguageModelV2CallOptions } from 'ai';
18
+ import { Log } from '../util/log';
19
+
20
+ const log = Log.create({ service: 'provider.echo' });
21
+
22
+ /**
23
+ * Extract text content from the prompt messages
24
+ */
25
+ function extractTextFromPrompt(
26
+ prompt: LanguageModelV2CallOptions['prompt']
27
+ ): string {
28
+ const textParts: string[] = [];
29
+
30
+ for (const message of prompt) {
31
+ if (message.role === 'user') {
32
+ for (const part of message.content) {
33
+ if (part.type === 'text') {
34
+ textParts.push(part.text);
35
+ }
36
+ }
37
+ }
38
+ }
39
+
40
+ // Return the last user message or a default response
41
+ return textParts.length > 0
42
+ ? textParts[textParts.length - 1]
43
+ : 'Echo: No user message found';
44
+ }
45
+
46
+ /**
47
+ * Generate a unique ID for streaming parts
48
+ */
49
+ function generatePartId(): string {
50
+ return `echo_${Date.now()}_${Math.random().toString(36).substring(2, 8)}`;
51
+ }
52
+
53
+ /**
54
+ * Creates an echo language model that echoes back the user's input
55
+ * Implements LanguageModelV2 interface for AI SDK 6.x compatibility
56
+ */
57
+ export function createEchoModel(modelId: string = 'echo'): LanguageModelV2 {
58
+ const model: LanguageModelV2 = {
59
+ specificationVersion: 'v2',
60
+ provider: 'link-assistant',
61
+ modelId,
62
+
63
+ // No external URLs are supported by this synthetic provider
64
+ supportedUrls: {},
65
+
66
+ async doGenerate(options: LanguageModelV2CallOptions) {
67
+ const echoText = extractTextFromPrompt(options.prompt);
68
+ log.info('echo generate', { modelId, echoText });
69
+
70
+ // Simulate token usage
71
+ const promptTokens = Math.ceil(echoText.length / 4);
72
+ const completionTokens = Math.ceil(echoText.length / 4);
73
+
74
+ return {
75
+ content: [
76
+ {
77
+ type: 'text' as const,
78
+ text: echoText,
79
+ },
80
+ ],
81
+ finishReason: 'stop' as const,
82
+ usage: {
83
+ promptTokens,
84
+ completionTokens,
85
+ },
86
+ warnings: [],
87
+ providerMetadata: undefined,
88
+ request: undefined,
89
+ response: undefined,
90
+ };
91
+ },
92
+
93
+ async doStream(options: LanguageModelV2CallOptions) {
94
+ const echoText = extractTextFromPrompt(options.prompt);
95
+ log.info('echo stream', { modelId, echoText });
96
+
97
+ // Simulate token usage
98
+ const promptTokens = Math.ceil(echoText.length / 4);
99
+ const completionTokens = Math.ceil(echoText.length / 4);
100
+
101
+ const textPartId = generatePartId();
102
+
103
+ // Create a ReadableStream with LanguageModelV2StreamPart format
104
+ // V2 format uses: text-start -> text-delta (with delta) -> text-end -> finish
105
+ const stream = new ReadableStream({
106
+ async start(controller) {
107
+ // Emit text-start
108
+ controller.enqueue({
109
+ type: 'text-start',
110
+ id: textPartId,
111
+ providerMetadata: undefined,
112
+ });
113
+
114
+ // Emit the text in chunks for realistic streaming behavior
115
+ const chunkSize = 10;
116
+ for (let i = 0; i < echoText.length; i += chunkSize) {
117
+ const chunk = echoText.slice(i, i + chunkSize);
118
+ controller.enqueue({
119
+ type: 'text-delta',
120
+ id: textPartId,
121
+ delta: chunk,
122
+ providerMetadata: undefined,
123
+ });
124
+ }
125
+
126
+ // Emit text-end
127
+ controller.enqueue({
128
+ type: 'text-end',
129
+ id: textPartId,
130
+ providerMetadata: undefined,
131
+ });
132
+
133
+ // Emit finish event with usage information
134
+ controller.enqueue({
135
+ type: 'finish',
136
+ finishReason: 'stop',
137
+ usage: {
138
+ promptTokens,
139
+ completionTokens,
140
+ },
141
+ providerMetadata: undefined,
142
+ });
143
+
144
+ controller.close();
145
+ },
146
+ });
147
+
148
+ return {
149
+ stream,
150
+ request: undefined,
151
+ response: undefined,
152
+ warnings: [],
153
+ };
154
+ },
155
+ };
156
+
157
+ return model;
158
+ }
159
+
160
+ /**
161
+ * Echo provider factory function - follows AI SDK provider pattern
162
+ */
163
+ export function createEchoProvider(options?: { name?: string }) {
164
+ return {
165
+ languageModel(modelId: string): LanguageModelV2 {
166
+ return createEchoModel(modelId);
167
+ },
168
+ textEmbeddingModel() {
169
+ throw new Error('Echo provider does not support text embeddings');
170
+ },
171
+ };
172
+ }
173
+
174
+ export const echoProvider = createEchoProvider();
@@ -14,6 +14,8 @@ import { Instance } from '../project/instance';
14
14
  import { Global } from '../global';
15
15
  import { Flag } from '../flag/flag';
16
16
  import { iife } from '../util/iife';
17
+ import { createEchoModel } from './echo';
18
+ import { createCacheModel } from './cache';
17
19
 
18
20
  export namespace Provider {
19
21
  const log = Log.create({ service: 'provider' });
@@ -452,6 +454,56 @@ export namespace Provider {
452
454
  },
453
455
  };
454
456
  },
457
+ /**
458
+ * Echo provider - synthetic provider for dry-run testing
459
+ * Echoes back the user's input without making actual API calls.
460
+ *
461
+ * This provider is automatically enabled when --dry-run mode is active.
462
+ * It can also be used explicitly with: --model link-assistant/echo
463
+ *
464
+ * @see https://github.com/link-assistant/agent/issues/89
465
+ */
466
+ 'link-assistant': async () => {
467
+ // Echo provider is always available - no external dependencies needed
468
+ return {
469
+ autoload: Flag.OPENCODE_DRY_RUN, // Auto-load only in dry-run mode
470
+ async getModel(_sdk: any, modelID: string) {
471
+ // Return our custom echo model that implements LanguageModelV1
472
+ return createEchoModel(modelID);
473
+ },
474
+ options: {},
475
+ };
476
+ },
477
+ /**
478
+ * Cache provider - synthetic provider for caching API responses
479
+ * Caches responses using links notation for deterministic testing.
480
+ *
481
+ * This provider caches API responses and falls back to echo behavior.
482
+ * It can be used explicitly with: --model link-assistant/cache/opencode/grok-code
483
+ *
484
+ * @see https://github.com/link-assistant/agent/issues/89
485
+ */
486
+ 'link-assistant/cache': async () => {
487
+ // Cache provider is always available - no external dependencies needed
488
+ return {
489
+ autoload: false, // Not auto-loaded
490
+ async getModel(_sdk: any, modelID: string) {
491
+ // modelID should be in format "provider/model" like "opencode/grok-code"
492
+ const parts = modelID.split('/');
493
+ if (parts.length < 2) {
494
+ throw new Error(
495
+ `Invalid cache model ID: ${modelID}. Expected format: provider/model`
496
+ );
497
+ }
498
+ const [providerId, ...modelParts] = parts;
499
+ const actualModelId = modelParts.join('/');
500
+
501
+ // Return our custom cache model that implements LanguageModelV1
502
+ return createCacheModel(providerId, actualModelId);
503
+ },
504
+ options: {},
505
+ };
506
+ },
455
507
  };
456
508
 
457
509
  const state = Instance.state(async () => {
@@ -555,6 +607,51 @@ export namespace Provider {
555
607
  realIdByKey.set('google/gemini-3-pro', 'gemini-3-pro-preview');
556
608
  }
557
609
 
610
+ // Add link-assistant echo provider for dry-run testing
611
+ // This synthetic provider echoes back user input without API calls
612
+ // @see https://github.com/link-assistant/agent/issues/89
613
+ database['link-assistant'] = {
614
+ id: 'link-assistant',
615
+ name: 'Link Assistant (Echo)',
616
+ env: [], // No environment variables needed - synthetic provider
617
+ models: {
618
+ echo: {
619
+ id: 'echo',
620
+ name: 'Echo Model',
621
+ release_date: '2024-01-01',
622
+ attachment: false,
623
+ reasoning: false,
624
+ temperature: false,
625
+ tool_call: true,
626
+ cost: {
627
+ input: 0,
628
+ output: 0,
629
+ cache_read: 0,
630
+ cache_write: 0,
631
+ },
632
+ limit: {
633
+ context: 1000000, // Virtually unlimited
634
+ output: 100000,
635
+ },
636
+ modalities: {
637
+ input: ['text'],
638
+ output: ['text'],
639
+ },
640
+ options: {},
641
+ },
642
+ },
643
+ };
644
+
645
+ // Add link-assistant/cache provider for caching API responses
646
+ // This synthetic provider caches responses and falls back to echo
647
+ // @see https://github.com/link-assistant/agent/issues/89
648
+ database['link-assistant/cache'] = {
649
+ id: 'link-assistant/cache',
650
+ name: 'Link Assistant (Cache)',
651
+ env: [], // No environment variables needed - synthetic provider
652
+ models: {}, // Models are dynamically created based on the provider/model syntax
653
+ };
654
+
558
655
  for (const [providerID, provider] of configProviders) {
559
656
  const existing = database[providerID];
560
657
  const parsed: ModelsDev.Provider = {
@@ -806,23 +903,41 @@ export namespace Provider {
806
903
 
807
904
  const provider = s.providers[providerID];
808
905
  if (!provider) throw new ModelNotFoundError({ providerID, modelID });
809
- const info = provider.info.models[modelID];
810
- if (!info) throw new ModelNotFoundError({ providerID, modelID });
811
- const sdk = await getSDK(provider.info, info);
906
+
907
+ // For synthetic providers (like link-assistant/echo and link-assistant/cache), skip SDK loading
908
+ // These providers have a custom getModel function that creates the model directly
909
+ const isSyntheticProvider =
910
+ providerID === 'link-assistant' || providerID === 'link-assistant/cache';
911
+
912
+ // For synthetic providers, we don't need model info from the database
913
+ const info = isSyntheticProvider ? null : provider.info.models[modelID];
914
+ if (!isSyntheticProvider && !info)
915
+ throw new ModelNotFoundError({ providerID, modelID });
812
916
 
813
917
  try {
814
918
  const keyReal = `${providerID}/${modelID}`;
815
- const realID = s.realIdByKey.get(keyReal) ?? info.id;
816
- const language = provider.getModel
817
- ? await provider.getModel(sdk, realID, provider.options)
818
- : sdk.languageModel(realID);
919
+ const realID = s.realIdByKey.get(keyReal) ?? (info ? info.id : modelID);
920
+
921
+ let language: LanguageModel;
922
+ if (isSyntheticProvider && provider.getModel) {
923
+ // For synthetic providers, call getModel directly without SDK
924
+ language = await provider.getModel(null, realID, provider.options);
925
+ } else {
926
+ // For regular providers, load the SDK first
927
+ const sdk = await getSDK(provider.info, info!);
928
+ language = provider.getModel
929
+ ? await provider.getModel(sdk, realID, provider.options)
930
+ : sdk.languageModel(realID);
931
+ }
819
932
  log.info('found', { providerID, modelID });
820
933
  s.models.set(key, {
821
934
  providerID,
822
935
  modelID,
823
936
  info,
824
937
  language,
825
- npm: info.provider?.npm ?? provider.info.npm,
938
+ npm: isSyntheticProvider
939
+ ? provider.info.npm
940
+ : (info.provider?.npm ?? provider.info.npm),
826
941
  });
827
942
  return {
828
943
  modelID,
@@ -897,6 +1012,18 @@ export namespace Provider {
897
1012
 
898
1013
  export async function defaultModel() {
899
1014
  const cfg = await Config.get();
1015
+
1016
+ // In dry-run mode, use the echo provider by default
1017
+ // This allows testing round-trips and multi-turn conversations without API costs
1018
+ // @see https://github.com/link-assistant/agent/issues/89
1019
+ if (Flag.OPENCODE_DRY_RUN) {
1020
+ log.info('dry-run mode enabled, using echo provider as default');
1021
+ return {
1022
+ providerID: 'link-assistant',
1023
+ modelID: 'echo',
1024
+ };
1025
+ }
1026
+
900
1027
  if (cfg.model) return parseModel(cfg.model);
901
1028
 
902
1029
  // Prefer opencode provider if available
@@ -447,7 +447,7 @@ export namespace SessionPrompt {
447
447
  lastFinished.summary !== true &&
448
448
  SessionCompaction.isOverflow({
449
449
  tokens: lastFinished.tokens,
450
- model: model.info,
450
+ model: model.info ?? { id: model.modelID },
451
451
  })
452
452
  ) {
453
453
  await SessionCompaction.create({
@@ -488,13 +488,13 @@ export namespace SessionPrompt {
488
488
  sessionID,
489
489
  })) as MessageV2.Assistant,
490
490
  sessionID: sessionID,
491
- model: model.info,
491
+ model: model.info ?? { id: model.modelID },
492
492
  providerID: model.providerID,
493
493
  abort,
494
494
  });
495
495
  const system = await resolveSystemPrompt({
496
496
  providerID: model.providerID,
497
- modelID: model.info.id,
497
+ modelID: model.info?.id ?? model.modelID,
498
498
  agent,
499
499
  system: lastUser.system,
500
500
  appendSystem: lastUser.appendSystem,
@@ -507,10 +507,11 @@ export namespace SessionPrompt {
507
507
  processor,
508
508
  });
509
509
  const params = {
510
- temperature: model.info.temperature
511
- ? (agent.temperature ??
512
- ProviderTransform.temperature(model.providerID, model.modelID))
513
- : undefined,
510
+ temperature:
511
+ (model.info?.temperature ?? false)
512
+ ? (agent.temperature ??
513
+ ProviderTransform.temperature(model.providerID, model.modelID))
514
+ : undefined,
514
515
  topP:
515
516
  agent.topP ?? ProviderTransform.topP(model.providerID, model.modelID),
516
517
  options: {
@@ -520,7 +521,7 @@ export namespace SessionPrompt {
520
521
  model.npm ?? '',
521
522
  sessionID
522
523
  ),
523
- ...model.info.options,
524
+ ...(model.info?.options ?? {}),
524
525
  ...agent.options,
525
526
  },
526
527
  };
@@ -575,9 +576,11 @@ export namespace SessionPrompt {
575
576
  log.info(`User message tokens (estimated): ${userTokens}`);
576
577
  log.info(`Total estimated tokens: ${totalEstimatedTokens}`);
577
578
  log.info(
578
- `Model context limit: ${model.info.limit.context || 'unknown'}`
579
+ `Model context limit: ${model.info?.limit?.context || 'unknown'}`
580
+ );
581
+ log.info(
582
+ `Model output limit: ${model.info?.limit?.output || 'unknown'}`
579
583
  );
580
- log.info(`Model output limit: ${model.info.limit.output || 'unknown'}`);
581
584
  log.info('=== END VERBOSE ===');
582
585
  }
583
586
 
@@ -616,7 +619,7 @@ export namespace SessionPrompt {
616
619
  'x-opencode-request': lastUser.id,
617
620
  }
618
621
  : undefined),
619
- ...model.info.headers,
622
+ ...(model.info?.headers ?? {}),
620
623
  },
621
624
  // set to 0, we handle loop
622
625
  maxRetries: 0,
@@ -624,7 +627,7 @@ export namespace SessionPrompt {
624
627
  maxOutputTokens: ProviderTransform.maxOutputTokens(
625
628
  model.providerID,
626
629
  params.options,
627
- model.info.limit.output,
630
+ model.info?.limit?.output ?? 100000,
628
631
  OUTPUT_TOKEN_MAX
629
632
  ),
630
633
  abortSignal: abort,
@@ -662,7 +665,7 @@ export namespace SessionPrompt {
662
665
  })
663
666
  ),
664
667
  ],
665
- tools: model.info.tool_call === false ? undefined : tools,
668
+ tools: model.info?.tool_call === false ? undefined : tools,
666
669
  model: wrapLanguageModel({
667
670
  model: model.language,
668
671
  middleware: [
@@ -1494,7 +1497,7 @@ export namespace SessionPrompt {
1494
1497
  small.npm ?? '',
1495
1498
  input.session.id
1496
1499
  ),
1497
- ...small.info.options,
1500
+ ...(small.info?.options ?? {}),
1498
1501
  };
1499
1502
  if (small.providerID === 'openai' || small.modelID.includes('gpt-5')) {
1500
1503
  if (small.modelID.includes('5.1')) {
@@ -1509,7 +1512,7 @@ export namespace SessionPrompt {
1509
1512
  };
1510
1513
  }
1511
1514
  await generateText({
1512
- maxOutputTokens: small.info.reasoning ? 1500 : 20,
1515
+ maxOutputTokens: small.info?.reasoning ? 1500 : 20,
1513
1516
  providerOptions: ProviderTransform.providerOptions(
1514
1517
  small.npm,
1515
1518
  small.providerID,
@@ -1550,7 +1553,7 @@ export namespace SessionPrompt {
1550
1553
  },
1551
1554
  ]),
1552
1555
  ],
1553
- headers: small.info.headers,
1556
+ headers: small.info?.headers ?? {},
1554
1557
  model: small.language,
1555
1558
  })
1556
1559
  .then((result) => {
@@ -1569,7 +1572,10 @@ export namespace SessionPrompt {
1569
1572
  });
1570
1573
  })
1571
1574
  .catch((error) => {
1572
- log.error('failed to generate title', { error, model: small.info.id });
1575
+ log.error('failed to generate title', {
1576
+ error,
1577
+ model: small.info?.id ?? small.modelID,
1578
+ });
1573
1579
  });
1574
1580
  }
1575
1581
  }