goatchain 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of goatchain might be problematic. Click here for more details.

package/README.md ADDED
@@ -0,0 +1,529 @@
1
+ # GoatChain 🐐
2
+
3
+ > A lightweight, extensible TypeScript SDK for building AI agents with streaming support, tool calling, and middleware pattern.
4
+
5
+ [![npm version](https://badge.fury.io/js/agent-loop.svg)](https://badge.fury.io/js/agent-loop)
6
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
7
+
8
+ ## ✨ Features
9
+
10
+ - **🔄 Agentic Loop** - Automatic tool calling loop with configurable max iterations
11
+ - **📡 Streaming First** - Real-time streaming responses with detailed events
12
+ - **🧅 Middleware Pattern** - Koa-style onion model for extensible hooks
13
+ - **🔧 Tool System** - Easy-to-use tool registration and execution
14
+ - **💾 State Management** - Two-level state store (Agent + Session level)
15
+ - **📸 Snapshot/Restore** - Full persistence support for agents and sessions
16
+ - **🎯 TypeScript Native** - Full type safety with comprehensive type exports
17
+
18
+ ## 📦 Installation
19
+
20
+ ```bash
21
+ pnpm add agent-loop
22
+ # or
23
+ npm install agent-loop
24
+ # or
25
+ yarn add agent-loop
26
+ ```
27
+
28
+ ## 🧰 CLI
29
+
30
+ After installing this package globally (or using `pnpm -s cli` in this repo), run:
31
+
32
+ ```bash
33
+ goatchain
34
+ ```
35
+
36
+ Common options:
37
+
38
+ - `-k, --api-key <key>` (or set `OPENAI_API_KEY`)
39
+ - `-m, --model <id>`
40
+ - `--base-url <url>`
41
+ - `--max-tokens <n>`
42
+ - `--temperature <n>`
43
+
44
+ Commands:
45
+
46
+ - `/help` help
47
+ - `/model <id>` switch model id (OpenAI)
48
+ - `/set <k> <v>` set request params (e.g. `temperature`, `maxTokens`, `topP`)
49
+ - `/unset <k>` clear a request param
50
+ - `/params` show current request params
51
+ - `/base-url <url>` set base URL
52
+ - `/api-key <key>` set API key (not printed)
53
+ - `/tools` list enabled tools (Read/Write/Edit/Glob/Grep)
54
+ - `/sessions` list and pick a saved session
55
+ - `/use <sessionId>` restore a saved session (prints recent history)
56
+ - `/save` persist current config/session
57
+ - `/status` show current model/session info
58
+ - `/new` start a new conversation (clears history)
59
+
60
+ Requires `OPENAI_API_KEY` in the environment.
61
+
62
+ Local persistence (workspace-scoped):
63
+
64
+ - Config and sessions are saved under `./.goatchain/` (auto-created).
65
+ - `.goatchain/` is gitignored to avoid accidentally committing secrets.
66
+
67
+ ## 🏗️ Architecture
68
+
69
+ ```mermaid
70
+ classDiagram
71
+ direction TB
72
+
73
+ class Agent {
74
+ +id: string
75
+ +name: string
76
+ +systemPrompt: string
77
+ +model: BaseModel
78
+ +tools: ToolRegistry
79
+ +checkpointStore: CheckpointStore
80
+ +sessionManager: BaseSessionManager
81
+ +stats: AgentStats
82
+ +use(middleware): this
83
+ +stream(input): AsyncIterable~AgentEvent~
84
+ +setModel(model): void
85
+ +toSnapshot(): AgentSnapshot
86
+ +restoreFromSnapshot(snapshot): void
87
+ }
88
+
89
+ class BaseModel {
90
+ <<abstract>>
91
+ +modelId: string
92
+ +invoke(messages, options): Promise~ChatResponse~
93
+ +stream(messages, options): AsyncIterable~StreamEvent~
94
+ }
95
+
96
+ class CheckpointStore {
97
+ <<interface>>
98
+ +savePoint: string
99
+ +deleteOnComplete: boolean
100
+ +saveLoopCheckpoint(checkpoint): Promise~void~
101
+ +loadLoopCheckpoint(sessionId): Promise~AgentLoopCheckpoint~
102
+ +deleteLoopCheckpoint(sessionId): Promise~void~
103
+ +listLoopCheckpoints(agentId): Promise~AgentLoopCheckpoint[]~
104
+ }
105
+
106
+ class BaseTool {
107
+ <<abstract>>
108
+ +name: string
109
+ +description: string
110
+ +parameters: JSONSchema
111
+ +execute(args, ctx?): Promise~unknown~
112
+ }
113
+
114
+ class ToolRegistry {
115
+ +register(tool): void
116
+ +unregister(name): boolean
117
+ +get(name): BaseTool
118
+ +list(): BaseTool[]
119
+ +toOpenAIFormat(): OpenAITool[]
120
+ }
121
+
122
+ class BaseSession {
123
+ <<abstract>>
124
+ +id: string
125
+ +agentId: string
126
+ +status: SessionStatus
127
+ +messages: Message[]
128
+ +usage: Usage
129
+ +configOverride: SessionConfigOverride
130
+ +addMessage(message): void
131
+ +toSnapshot(): SessionSnapshot
132
+ +restoreFromSnapshot(snapshot): void
133
+ }
134
+
135
+ class BaseSessionManager {
136
+ <<abstract>>
137
+ +create(agentId, metadata): Promise~BaseSession~
138
+ +get(sessionId): Promise~BaseSession~
139
+ +list(agentId): Promise~BaseSession[]~
140
+ +destroy(sessionId): Promise~void~
141
+ }
142
+
143
+ class Middleware {
144
+ <<function>>
145
+ (ctx: AgentLoopState, next: NextFunction) => Promise~void~
146
+ }
147
+
148
+ class AgentLoopState {
149
+ +sessionId: string
150
+ +agentId: string
151
+ +messages: Message[]
152
+ +iteration: number
153
+ +pendingToolCalls: ToolCallWithResult[]
154
+ +currentResponse: string
155
+ +shouldContinue: boolean
156
+ +usage: Usage
157
+ }
158
+
159
+ Agent --> BaseModel : uses
160
+ Agent --> ToolRegistry : uses
161
+ Agent --> CheckpointStore : uses
162
+ Agent --> BaseSessionManager : uses
163
+ Agent ..> Middleware : applies
164
+ Agent ..> AgentLoopState : manages
165
+ ToolRegistry --> BaseTool : contains
166
+ BaseSessionManager --> BaseSession : manages
167
+ ```
168
+
169
+ ## 🚀 Quick Start
170
+
171
+ ```typescript
172
+ import { Agent, BaseModel, BaseTool, ToolRegistry, createModel, createOpenAIAdapter } from 'agent-loop'
173
+
174
+ // 1. Create model with built-in adapters (recommended)
175
+ const model = createModel({
176
+ adapters: [
177
+ createOpenAIAdapter({
178
+ defaultModelId: 'gpt-4o', // Set default model
179
+ apiKeySecretName: 'OPENAI_API_KEY',
180
+ }),
181
+ ],
182
+ })
183
+
184
+ // 2. Create tools
185
+ class WeatherTool extends BaseTool {
186
+ name = 'get_weather'
187
+ description = 'Get current weather for a location'
188
+ parameters = {
189
+ type: 'object',
190
+ properties: {
191
+ location: { type: 'string', description: 'City name' },
192
+ },
193
+ required: ['location'],
194
+ }
195
+
196
+ async execute(args) {
197
+ return { temperature: 22, condition: 'sunny' }
198
+ }
199
+ }
200
+
201
+ // 3. Setup tool registry
202
+ const tools = new ToolRegistry()
203
+ tools.register(new WeatherTool())
204
+
205
+ // 4. Create agent
206
+ const agent = new Agent({
207
+ name: 'Weather Assistant',
208
+ systemPrompt: 'You are a helpful weather assistant.',
209
+ model,
210
+ tools,
211
+ })
212
+
213
+ // 5. Add middleware (optional)
214
+ agent.use(async (state, next) => {
215
+ console.log(`Iteration ${state.iteration} starting...`)
216
+ await next()
217
+ console.log(`Iteration ${state.iteration} complete`)
218
+ })
219
+
220
+ // 6. Stream responses
221
+ for await (const event of agent.stream({
222
+ sessionId: 'session-1',
223
+ input: 'What is the weather in Tokyo?',
224
+ })) {
225
+ switch (event.type) {
226
+ case 'text_delta':
227
+ process.stdout.write(event.delta)
228
+ break
229
+ case 'tool_call_end':
230
+ console.log('Calling tool:', event.toolCall.function.name)
231
+ break
232
+ case 'tool_result':
233
+ console.log('Tool result:', event.result)
234
+ break
235
+ case 'done':
236
+ console.log('\nDone:', event.stopReason)
237
+ break
238
+ }
239
+ }
240
+ ```
241
+
242
+ ## 🧅 Middleware Pattern
243
+
244
+ GoatChain uses a Koa-style onion model for middleware. Each middleware wraps around the core execution:
245
+
246
+ ```
247
+ outer:before → inner:before → exec (model.stream) → inner:after → outer:after
248
+ ```
249
+
250
+ ```typescript
251
+ // Logging middleware
252
+ agent.use(async (state, next) => {
253
+ const start = Date.now()
254
+ console.log(`[${state.iteration}] Before model call`)
255
+
256
+ await next() // Execute model stream
257
+
258
+ console.log(`[${state.iteration}] After model call (${Date.now() - start}ms)`)
259
+ })
260
+
261
+ // Error handling middleware
262
+ agent.use(async (state, next) => {
263
+ try {
264
+ await next()
265
+ } catch (error) {
266
+ state.shouldContinue = false
267
+ state.stopReason = 'error'
268
+ state.error = error
269
+ }
270
+ })
271
+
272
+ // Rate limiting middleware
273
+ agent.use(async (state, next) => {
274
+ await rateLimiter.acquire()
275
+ await next()
276
+ })
277
+ ```
278
+
279
+ ## 📡 Event Types
280
+
281
+ The agent stream emits the following events:
282
+
283
+ | Event | Description |
284
+ | ----------------- | ---------------------------------------- |
285
+ | `iteration_start` | Beginning of a loop iteration |
286
+ | `text_delta` | Partial text response from LLM |
287
+ | `thinking_start` | Thinking phase begins (if supported) |
288
+ | `thinking_delta` | Thinking content delta (if supported) |
289
+ | `thinking_end` | Thinking phase ends (if supported) |
290
+ | `tool_call_start` | Tool call begins |
291
+ | `tool_call_delta` | Tool call arguments delta |
292
+ | `tool_call_end` | Tool call is complete |
293
+ | `tool_result` | Tool execution completed |
294
+ | `usage` | Token consumption stats |
295
+ | `iteration_end` | End of a loop iteration |
296
+ | `done` | Stream completed |
297
+ | `error` | Error occurred |
298
+
299
+ ```typescript
300
+ interface AgentEvent {
301
+ type:
302
+ | 'text_delta'
303
+ | 'tool_call_start'
304
+ | 'tool_call_delta'
305
+ | 'tool_call_end'
306
+ | 'tool_result'
307
+ | 'thinking_start'
308
+ | 'thinking_delta'
309
+ | 'thinking_end'
310
+ | 'usage'
311
+ | 'error'
312
+ | 'done'
313
+ | 'iteration_start'
314
+ | 'iteration_end'
315
+ // ... event-specific fields
316
+ }
317
+ ```
318
+
319
+ ## 💾 Checkpoint & Resume
320
+
321
+ Built-in checkpoint support for resuming interrupted agent executions:
322
+
323
+ ```typescript
324
+ import { Agent, FileCheckpointStore } from 'agent-loop'
325
+
326
+ // Create checkpoint store with configuration
327
+ const checkpointStore = new FileCheckpointStore({
328
+ dir: './checkpoints',
329
+ savePoint: 'before', // Save before each iteration
330
+ deleteOnComplete: true, // Clean up after successful completion
331
+ })
332
+
333
+ // Agent automatically saves checkpoints when checkpointStore is provided
334
+ const agent = new Agent({
335
+ name: 'MyAgent',
336
+ systemPrompt: 'You are helpful.',
337
+ model,
338
+ checkpointStore,
339
+ })
340
+
341
+ // Run agent - checkpoints are saved automatically
342
+ for await (const event of agent.stream({
343
+ sessionId: 'session-1',
344
+ input: 'Hello',
345
+ })) {
346
+ console.log(event)
347
+ }
348
+
349
+ // If interrupted, resume from checkpoint
350
+ const checkpoint = await checkpointStore.loadLoopCheckpoint('session-1')
351
+ if (checkpoint) {
352
+ for await (const event of agent.streamFromCheckpoint({ checkpoint })) {
353
+ console.log(event)
354
+ }
355
+ }
356
+ ```
357
+
358
+ Available checkpoint stores:
359
+ - `FileCheckpointStore` - File-based persistence
360
+ - `InMemoryCheckpointStore` - In-memory (for testing)
361
+
362
+ ## 🔧 Session Management
363
+
364
+ Sessions represent individual conversations with per-session configuration overrides:
365
+
366
+ ```typescript
367
+ // Create session
368
+ const session = await sessionManager.create(agent.id, {
369
+ customField: 'value',
370
+ })
371
+
372
+ // Session-level overrides
373
+ session.setModelOverride({ modelId: 'gpt-4o-mini' })
374
+ session.setSystemPromptOverride('You are a concise assistant.')
375
+ session.disableTools(['dangerous_tool'])
376
+
377
+ // Track session activity
378
+ session.addMessage({ role: 'user', content: 'Hello!' })
379
+ session.addUsage({ promptTokens: 10, completionTokens: 5, totalTokens: 15 })
380
+ session.recordResponse(1500) // ms
381
+
382
+ // Get session snapshot for persistence
383
+ const snapshot = session.toSnapshot()
384
+ ```
385
+
386
+ ## 📁 Project Structure
387
+
388
+ ```
389
+ src/
390
+ ├── index.ts # Public exports
391
+ ├── types/
392
+ │ ├── index.ts # Re-export all types
393
+ │ ├── message.ts # Message types (User, Assistant, Tool, System)
394
+ │ ├── event.ts # Stream event types
395
+ │ ├── common.ts # Shared types (ToolCall, Usage, JSONSchema)
396
+ │ └── snapshot.ts # Snapshot types (Agent, Session)
397
+ ├── model/
398
+ │ ├── index.ts
399
+ │ ├── base.ts # BaseModel abstract class
400
+ │ └── types.ts # Model-specific types
401
+ ├── state/
402
+ │ ├── index.ts
403
+ │ ├── checkpointStore.ts # CheckpointStore interface
404
+ │ ├── FileCheckpointStore.ts # File-based checkpoint storage
405
+ │ └── InMemoryCheckpointStore.ts # In-memory checkpoint storage
406
+ ├── tool/
407
+ │ ├── index.ts
408
+ │ ├── base.ts # BaseTool abstract class
409
+ │ └── registry.ts # ToolRegistry class
410
+ ├── session/
411
+ │ ├── index.ts
412
+ │ ├── base.ts # BaseSession abstract class
413
+ │ └── manager.ts # BaseSessionManager abstract class
414
+ └── agent/
415
+ ├── index.ts
416
+ ├── agent.ts # Agent class
417
+ ├── types.ts # Agent types (AgentLoopState, AgentInput, etc.)
418
+ ├── middleware.ts # Middleware compose function
419
+ └── errors.ts # Agent-specific errors
420
+ ```
421
+
422
+ ## 🛡️ Error Handling
423
+
424
+ ```typescript
425
+ import { AgentAbortError, AgentMaxIterationsError } from 'agent-loop'
426
+
427
+ // Cancellation support
428
+ const controller = new AbortController()
429
+
430
+ try {
431
+ for await (const event of agent.stream({
432
+ sessionId: 'session-1',
433
+ input: 'Hello',
434
+ signal: controller.signal,
435
+ maxIterations: 5,
436
+ })) {
437
+ // Handle events...
438
+ }
439
+ } catch (error) {
440
+ if (error instanceof AgentAbortError) {
441
+ console.log('Agent was cancelled')
442
+ } else if (error instanceof AgentMaxIterationsError) {
443
+ console.log('Max iterations reached')
444
+ }
445
+ }
446
+
447
+ // Cancel from another context
448
+ controller.abort()
449
+ ```
450
+
451
+ ## 📖 API Reference
452
+
453
+ ### Agent
454
+
455
+ | Method | Description |
456
+ | ------------------------------- | ----------------------- |
457
+ | `constructor(options)` | Create a new agent |
458
+ | `use(middleware)` | Add middleware |
459
+ | `stream(input)` | Stream agent execution |
460
+ | `setModel(model)` | Switch model at runtime |
461
+ | `toSnapshot()` | Create agent snapshot |
462
+ | `restoreFromSnapshot(snapshot)` | Restore from snapshot |
463
+
464
+ ### AgentInput
465
+
466
+ | Property | Type | Description |
467
+ | ---------------- | ------------- | --------------------------------- |
468
+ | `sessionId` | `string` | Session identifier |
469
+ | `input` | `string` | User input message |
470
+ | `messages?` | `Message[]` | Optional conversation history |
471
+ | `model?` | `ModelRef` | Optional model override for this request |
472
+ | `signal?` | `AbortSignal` | Cancellation support |
473
+ | `maxIterations?` | `number` | Max loop iterations (default: 10) |
474
+
475
+ ## 🔄 Model Switching
476
+
477
+ ### Per-Request Model Override
478
+
479
+ Temporarily use a different model for a single request:
480
+
481
+ ```typescript
482
+ for await (const event of agent.stream({
483
+ sessionId: 'session-1',
484
+ input: 'Explain quantum physics',
485
+ model: { provider: 'openai', modelId: 'gpt-4' }, // Use GPT-4 for this request only
486
+ })) {
487
+ // ...
488
+ }
489
+ ```
490
+
491
+ ### Persistent Model Switch
492
+
493
+ Change the default model for all subsequent requests:
494
+
495
+ ```typescript
496
+ // Switch model at runtime
497
+ model.setModelId('gpt-4')
498
+
499
+ // All subsequent requests will use the new model
500
+ for await (const event of agent.stream({
501
+ sessionId: 'session-1',
502
+ input: 'Hello',
503
+ })) {
504
+ // Uses gpt-4
505
+ }
506
+ ```
507
+
508
+ ### Multi-Provider Fallback
509
+
510
+ Configure multiple models with automatic fallback:
511
+
512
+ ```typescript
513
+ const model = createModel({
514
+ adapters: [
515
+ createOpenAIAdapter({ defaultModelId: 'gpt-4' }),
516
+ createAnthropicAdapter({ defaultModelId: 'claude-3' }),
517
+ ],
518
+ routing: {
519
+ fallbackOrder: [
520
+ { provider: 'openai', modelId: 'gpt-4' }, // Try first
521
+ { provider: 'anthropic', modelId: 'claude-3' }, // Fallback if first fails
522
+ ],
523
+ },
524
+ })
525
+ ```
526
+
527
+ ## 📄 License
528
+
529
+ MIT © [Simon He](https://github.com/Simon-He95)
package/cli/args.mjs ADDED
@@ -0,0 +1,113 @@
1
+ import { Command, InvalidArgumentError } from 'commander'
2
+
3
+ export function getReplHelpText() {
4
+ return [
5
+ '',
6
+ 'Interactive slash commands:',
7
+ ' /help Show help',
8
+ ' /model [id] Show/set model id',
9
+ ' /workspace Switch workspace root (interactive)',
10
+ ' /files Attach file(s) (interactive)',
11
+ ' /images Attach image(s) (interactive)',
12
+ ' /settings Base URL / API key / tokens',
13
+ ' /output Output & UX options',
14
+ ' /diag Show diagnostics for last turn',
15
+ ' /edit Compose a message in $EDITOR',
16
+ ' /multiline Start multiline input (.send/.cancel)',
17
+ ' /set <k> <v> Set request param (maxTokens/temperature/topP/...)',
18
+ ' /unset <k> Clear request param',
19
+ ' /params Show current request params',
20
+ ' /base-url <url> Set base URL (recreates client if needed)',
21
+ ' /api-key <key> Set API key for this process (not printed)',
22
+ ' /tools List enabled tools',
23
+ ' /sessions List/pick/delete sessions',
24
+ ' /use <sessionId> Switch session (restore history)',
25
+ ' /rm <sessionId...> Delete session(s)',
26
+ ' /save Save current session/config',
27
+ ' /status Show current session/model info',
28
+ ' /new Start a new conversation (clears history)',
29
+ ' /exit Exit',
30
+ '',
31
+ 'Mentions:',
32
+ ' @ Open quick actions (clack if installed)',
33
+ ' @clear Clear pending attachments',
34
+ ' @file Attach file(s) to the next prompt',
35
+ ' @image/@img Attach image(s) to the next prompt',
36
+ ' @workspace Switch workspace root (interactive)',
37
+ '',
38
+ 'Keybindings:',
39
+ ' Ctrl+K Open command palette',
40
+ ' Ctrl+O Manage attachments',
41
+ ' Ctrl+G Clear attachments',
42
+ '',
43
+ ].join('\n')
44
+ }
45
+
46
+ function parseFiniteNumber(name) {
47
+ return (v) => {
48
+ const n = Number(v)
49
+ if (!Number.isFinite(n))
50
+ throw new InvalidArgumentError(`Invalid ${name}: ${v}`)
51
+ return n
52
+ }
53
+ }
54
+
55
+ function parseFiniteInt(name) {
56
+ return (v) => {
57
+ const n = Number.parseInt(v, 10)
58
+ if (!Number.isFinite(n))
59
+ throw new InvalidArgumentError(`Invalid ${name}: ${v}`)
60
+ return n
61
+ }
62
+ }
63
+
64
+ export function buildProgram(version) {
65
+ return new Command()
66
+ .name('goatchain')
67
+ .description('A tiny interactive chat CLI built on agent-loop')
68
+ .argument('[prompt...]', 'one-shot prompt (omit to start interactive mode)')
69
+ .option('-k, --api-key <key>', 'OpenAI API key (or set OPENAI_API_KEY)')
70
+ .option('-m, --model <id>', 'Default model id (OpenAI)')
71
+ .option('--system <prompt>', 'System prompt')
72
+ .option('--base-url <url>', 'Override OpenAI base URL (proxy)')
73
+ .option('--max-tokens <n>', 'Max completion tokens', parseFiniteInt('max-tokens'))
74
+ .option('--temperature <n>', 'Temperature (0-2)', parseFiniteNumber('temperature'))
75
+ .option('--top-p <n>', 'Top-p (0-1)', parseFiniteNumber('top-p'))
76
+ .option('--presence-penalty <n>', 'Presence penalty (-2..2)', parseFiniteNumber('presence-penalty'))
77
+ .option('--frequency-penalty <n>', 'Frequency penalty (-2..2)', parseFiniteNumber('frequency-penalty'))
78
+ .option('--seed <n>', 'Random seed', parseFiniteInt('seed'))
79
+ .option('--timeout-ms <n>', 'Request timeout in ms', parseFiniteInt('timeout-ms'))
80
+ .option('--session <id>', 'Use a saved session id (optional)')
81
+ .version(version, '-v, --version', 'Show version')
82
+ .helpOption('-h, --help', 'Show help')
83
+ .showHelpAfterError()
84
+ .showSuggestionAfterError()
85
+ .addHelpText('after', getReplHelpText())
86
+ }
87
+
88
+ export function parseCliArgs(argv, version) {
89
+ const program = buildProgram(version)
90
+ program.parse(argv, { from: 'user' })
91
+
92
+ const opts = program.opts()
93
+ const prompt = Array.isArray(program.args) ? program.args.join(' ').trim() : ''
94
+
95
+ const normalizeFiniteNumber = (n) => (typeof n === 'number' && Number.isFinite(n) ? n : undefined)
96
+ const normalizeFiniteInt = (n) => (typeof n === 'number' && Number.isFinite(n) ? Math.trunc(n) : undefined)
97
+
98
+ return {
99
+ apiKey: typeof opts.apiKey === 'string' && opts.apiKey.trim() ? opts.apiKey : undefined,
100
+ modelId: typeof opts.model === 'string' && opts.model.trim() ? opts.model : undefined,
101
+ system: typeof opts.system === 'string' && opts.system.trim() ? opts.system : undefined,
102
+ baseUrl: typeof opts.baseUrl === 'string' && opts.baseUrl.trim() ? opts.baseUrl : undefined,
103
+ maxTokens: normalizeFiniteInt(opts.maxTokens),
104
+ temperature: normalizeFiniteNumber(opts.temperature),
105
+ topP: normalizeFiniteNumber(opts.topP),
106
+ presencePenalty: normalizeFiniteNumber(opts.presencePenalty),
107
+ frequencyPenalty: normalizeFiniteNumber(opts.frequencyPenalty),
108
+ seed: normalizeFiniteInt(opts.seed),
109
+ timeoutMs: normalizeFiniteInt(opts.timeoutMs),
110
+ sessionId: typeof opts.session === 'string' && opts.session.trim() ? opts.session : undefined,
111
+ prompt,
112
+ }
113
+ }