llmist 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +394 -0
- package/dist/chunk-DCW33WV7.js +901 -0
- package/dist/chunk-DCW33WV7.js.map +1 -0
- package/dist/chunk-JEBGLCDW.js +22 -0
- package/dist/chunk-JEBGLCDW.js.map +1 -0
- package/dist/chunk-TP7HE3MN.js +4450 -0
- package/dist/chunk-TP7HE3MN.js.map +1 -0
- package/dist/cli.cjs +5333 -0
- package/dist/cli.cjs.map +1 -0
- package/dist/cli.d.cts +1 -0
- package/dist/cli.d.ts +1 -0
- package/dist/cli.js +987 -0
- package/dist/cli.js.map +1 -0
- package/dist/index.cjs +5511 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1101 -0
- package/dist/index.d.ts +1101 -0
- package/dist/index.js +421 -0
- package/dist/index.js.map +1 -0
- package/dist/mock-stream-D4erlo7B.d.cts +2602 -0
- package/dist/mock-stream-D4erlo7B.d.ts +2602 -0
- package/dist/testing/index.cjs +5260 -0
- package/dist/testing/index.cjs.map +1 -0
- package/dist/testing/index.d.cts +274 -0
- package/dist/testing/index.d.ts +274 -0
- package/dist/testing/index.js +34 -0
- package/dist/testing/index.js.map +1 -0
- package/package.json +102 -0
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,1101 @@
|
|
|
1
|
+
import { ZodType, ZodTypeAny } from 'zod';
|
|
2
|
+
export { z } from 'zod';
|
|
3
|
+
import { A as AgentHooks, L as LLMMessage, P as ParameterFormat, S as StreamEvent, G as GadgetRegistry, o as LLMStreamChunk, B as BaseGadget, p as ParsedGadgetCall, q as GadgetExecutionResult, r as ProviderAdapter, s as ModelDescriptor, t as ModelSpec, u as LLMGenerationOptions, v as LLMStream } from './mock-stream-D4erlo7B.cjs';
|
|
4
|
+
export { C as AfterGadgetExecutionAction, D as AfterGadgetExecutionControllerContext, F as AfterLLMCallAction, I as AfterLLMCallControllerContext, J as AfterLLMErrorAction, w as AgentBuilder, K as AgentOptions, N as BeforeGadgetExecutionAction, O as BeforeLLMCallAction, Q as ChunkInterceptorContext, R as Controllers, a9 as CostEstimate, ak as DEFAULT_PROMPTS, E as EventHandlers, ar as GadgetClass, T as GadgetExecutionControllerContext, as as GadgetOrClass, U as GadgetParameterInterceptorContext, V as GadgetResultInterceptorContext, H as HistoryMessage, W as Interceptors, X as LLMCallControllerContext, Y as LLMErrorControllerContext, a8 as LLMMessageBuilder, a7 as LLMRole, a6 as LLMist, a5 as LLMistOptions, Z as MessageInterceptorContext, a as MockBuilder, d as MockManager, h as MockMatcher, i as MockMatcherContext, j as MockOptions, M as MockProviderAdapter, k as MockRegistration, l as MockResponse, n as MockStats, aa as ModelFeatures, ag as ModelIdentifierParser, ab as ModelLimits, ac as ModelPricing, ad as ModelRegistry, _ as ObserveChunkContext, $ as ObserveGadgetCompleteContext, a0 as ObserveGadgetStartContext, a1 as ObserveLLMCallContext, a2 as ObserveLLMCompleteContext, a3 as ObserveLLMErrorContext, a4 as Observers, ah as PromptConfig, ai as PromptContext, aj as PromptTemplate, ae as ProviderIdentifier, an as QuickOptions, aq as StreamParser, at as TextOnlyAction, au as TextOnlyContext, av as TextOnlyCustomHandler, aw as TextOnlyGadgetConfig, ax as TextOnlyHandler, ay as TextOnlyStrategy, af as TokenUsage, x as collectEvents, y as collectText, ao as complete, c as createMockAdapter, b as createMockClient, e as createMockStream, f as createTextMockStream, g as getMockManager, m as mockLLM, al as resolvePromptTemplate, am as resolveRulesTemplate, z as runWithHandlers, ap as stream } from './mock-stream-D4erlo7B.cjs';
|
|
5
|
+
import { Logger, ILogObj } from 'tslog';
|
|
6
|
+
import { MessageCreateParamsStreaming, MessageStreamEvent } from '@anthropic-ai/sdk/resources/messages';
|
|
7
|
+
import OpenAI from 'openai';
|
|
8
|
+
import { ChatCompletionChunk } from 'openai/resources/chat/completions';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Common hook presets for logging, timing, and monitoring.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```typescript
|
|
15
|
+
* import { HookPresets } from 'llmist/hooks';
|
|
16
|
+
*
|
|
17
|
+
* const agent = LLMist.createAgent()
|
|
18
|
+
* .withHooks(HookPresets.logging())
|
|
19
|
+
* .ask("...");
|
|
20
|
+
*
|
|
21
|
+
* // Or combine multiple presets
|
|
22
|
+
* const agent = LLMist.createAgent()
|
|
23
|
+
* .withHooks(HookPresets.merge(
|
|
24
|
+
* HookPresets.logging({ verbose: true }),
|
|
25
|
+
* HookPresets.timing(),
|
|
26
|
+
* HookPresets.tokenTracking()
|
|
27
|
+
* ))
|
|
28
|
+
* .ask("...");
|
|
29
|
+
* ```
|
|
30
|
+
*/
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Options for logging preset.
|
|
34
|
+
*/
|
|
35
|
+
interface LoggingOptions {
|
|
36
|
+
/** Include verbose details like parameters and results */
|
|
37
|
+
verbose?: boolean;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Common hook presets.
|
|
41
|
+
*/
|
|
42
|
+
declare class HookPresets {
|
|
43
|
+
/**
|
|
44
|
+
* Preset: Basic logging of all events.
|
|
45
|
+
*
|
|
46
|
+
* Logs LLM calls and gadget executions to console.
|
|
47
|
+
*
|
|
48
|
+
* @param options - Logging options
|
|
49
|
+
* @returns Hook configuration
|
|
50
|
+
*
|
|
51
|
+
* @example
|
|
52
|
+
* ```typescript
|
|
53
|
+
* .withHooks(HookPresets.logging())
|
|
54
|
+
* .withHooks(HookPresets.logging({ verbose: true }))
|
|
55
|
+
* ```
|
|
56
|
+
*/
|
|
57
|
+
static logging(options?: LoggingOptions): AgentHooks;
|
|
58
|
+
/**
|
|
59
|
+
* Preset: Performance timing for all operations.
|
|
60
|
+
*
|
|
61
|
+
* Measures and logs execution time for LLM calls and gadgets.
|
|
62
|
+
*
|
|
63
|
+
* @returns Hook configuration
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* ```typescript
|
|
67
|
+
* .withHooks(HookPresets.timing())
|
|
68
|
+
* ```
|
|
69
|
+
*/
|
|
70
|
+
static timing(): AgentHooks;
|
|
71
|
+
/**
|
|
72
|
+
* Preset: Token usage tracking.
|
|
73
|
+
*
|
|
74
|
+
* Tracks and logs cumulative token usage across all LLM calls.
|
|
75
|
+
*
|
|
76
|
+
* @returns Hook configuration
|
|
77
|
+
*
|
|
78
|
+
* @example
|
|
79
|
+
* ```typescript
|
|
80
|
+
* .withHooks(HookPresets.tokenTracking())
|
|
81
|
+
* ```
|
|
82
|
+
*/
|
|
83
|
+
static tokenTracking(): AgentHooks;
|
|
84
|
+
/**
|
|
85
|
+
* Preset: Error logging.
|
|
86
|
+
*
|
|
87
|
+
* Logs detailed error information for debugging.
|
|
88
|
+
*
|
|
89
|
+
* @returns Hook configuration
|
|
90
|
+
*
|
|
91
|
+
* @example
|
|
92
|
+
* ```typescript
|
|
93
|
+
* .withHooks(HookPresets.errorLogging())
|
|
94
|
+
* ```
|
|
95
|
+
*/
|
|
96
|
+
static errorLogging(): AgentHooks;
|
|
97
|
+
/**
|
|
98
|
+
* Preset: Silent (no output).
|
|
99
|
+
*
|
|
100
|
+
* Useful for testing or when you want complete control.
|
|
101
|
+
*
|
|
102
|
+
* @returns Empty hook configuration
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```typescript
|
|
106
|
+
* .withHooks(HookPresets.silent())
|
|
107
|
+
* ```
|
|
108
|
+
*/
|
|
109
|
+
static silent(): AgentHooks;
|
|
110
|
+
/**
|
|
111
|
+
* Merge multiple hook configurations.
|
|
112
|
+
*
|
|
113
|
+
* Combines hook presets or custom configurations into a single object.
|
|
114
|
+
* When multiple hooks target the same lifecycle event, they are composed
|
|
115
|
+
* to run sequentially (all handlers will execute).
|
|
116
|
+
*
|
|
117
|
+
* @param hookSets - Array of hook configurations to merge
|
|
118
|
+
* @returns Merged hook configuration with composed handlers
|
|
119
|
+
*
|
|
120
|
+
* @example
|
|
121
|
+
* ```typescript
|
|
122
|
+
* .withHooks(HookPresets.merge(
|
|
123
|
+
* HookPresets.logging({ verbose: true }),
|
|
124
|
+
* HookPresets.timing(),
|
|
125
|
+
* HookPresets.tokenTracking(),
|
|
126
|
+
* {
|
|
127
|
+
* // Custom hook
|
|
128
|
+
* observers: {
|
|
129
|
+
* onLLMCallComplete: async (ctx) => {
|
|
130
|
+
* saveToDatabase(ctx);
|
|
131
|
+
* }
|
|
132
|
+
* }
|
|
133
|
+
* }
|
|
134
|
+
* ))
|
|
135
|
+
* // All onLLMCallComplete handlers from logging, timing, tokenTracking,
|
|
136
|
+
* // and the custom hook will execute in order
|
|
137
|
+
* ```
|
|
138
|
+
*/
|
|
139
|
+
static merge(...hookSets: AgentHooks[]): AgentHooks;
|
|
140
|
+
/**
|
|
141
|
+
* Preset: Complete monitoring suite.
|
|
142
|
+
*
|
|
143
|
+
* Combines logging, timing, and token tracking.
|
|
144
|
+
*
|
|
145
|
+
* @param options - Options for monitoring
|
|
146
|
+
* @returns Merged hook configuration
|
|
147
|
+
*
|
|
148
|
+
* @example
|
|
149
|
+
* ```typescript
|
|
150
|
+
* .withHooks(HookPresets.monitoring())
|
|
151
|
+
* .withHooks(HookPresets.monitoring({ verbose: true }))
|
|
152
|
+
* ```
|
|
153
|
+
*/
|
|
154
|
+
static monitoring(options?: LoggingOptions): AgentHooks;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Core interfaces for the Agent architecture.
|
|
159
|
+
* These interfaces define the contracts for the composable services that make up the agent system.
|
|
160
|
+
*/
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Manages the conversation history and message building.
|
|
164
|
+
* This interface abstracts conversation state management from the orchestration logic.
|
|
165
|
+
*/
|
|
166
|
+
interface IConversationManager {
|
|
167
|
+
/**
|
|
168
|
+
* Adds a user message to the conversation.
|
|
169
|
+
*/
|
|
170
|
+
addUserMessage(content: string): void;
|
|
171
|
+
/**
|
|
172
|
+
* Adds an assistant message to the conversation.
|
|
173
|
+
*/
|
|
174
|
+
addAssistantMessage(content: string): void;
|
|
175
|
+
/**
|
|
176
|
+
* Adds a gadget call and its result to the conversation.
|
|
177
|
+
*/
|
|
178
|
+
addGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): void;
|
|
179
|
+
/**
|
|
180
|
+
* Gets the complete conversation history including base messages (system prompts, gadget instructions).
|
|
181
|
+
*/
|
|
182
|
+
getMessages(): LLMMessage[];
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
/**
|
|
186
|
+
* ConversationManager handles conversation state and message building.
|
|
187
|
+
* Extracted from AgentLoop to follow Single Responsibility Principle.
|
|
188
|
+
*/
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Default implementation of IConversationManager.
|
|
192
|
+
* Manages conversation history by building on top of base messages (system prompt, gadget instructions).
|
|
193
|
+
*/
|
|
194
|
+
declare class ConversationManager implements IConversationManager {
|
|
195
|
+
private readonly baseMessages;
|
|
196
|
+
private readonly initialMessages;
|
|
197
|
+
private readonly historyBuilder;
|
|
198
|
+
private readonly parameterFormat;
|
|
199
|
+
constructor(baseMessages: LLMMessage[], initialMessages: LLMMessage[], parameterFormat?: ParameterFormat);
|
|
200
|
+
addUserMessage(content: string): void;
|
|
201
|
+
addAssistantMessage(content: string): void;
|
|
202
|
+
addGadgetCall(gadgetName: string, parameters: Record<string, unknown>, result: string): void;
|
|
203
|
+
getMessages(): LLMMessage[];
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* StreamProcessor: The heart of the new hooks architecture.
|
|
208
|
+
*
|
|
209
|
+
* Replaces the complex wiring between Agent, ResponseProcessor, and GadgetRuntime.
|
|
210
|
+
* Owns ALL stream processing and hook coordination with a clean, predictable flow.
|
|
211
|
+
*/
|
|
212
|
+
|
|
213
|
+
/**
|
|
214
|
+
* Configuration for the StreamProcessor.
|
|
215
|
+
*/
|
|
216
|
+
interface StreamProcessorOptions {
|
|
217
|
+
/** Current iteration number */
|
|
218
|
+
iteration: number;
|
|
219
|
+
/** Gadget registry for execution */
|
|
220
|
+
registry: GadgetRegistry;
|
|
221
|
+
/** Parameter format for parsing */
|
|
222
|
+
parameterFormat: ParameterFormat;
|
|
223
|
+
/** Custom gadget start prefix */
|
|
224
|
+
gadgetStartPrefix?: string;
|
|
225
|
+
/** Custom gadget end prefix */
|
|
226
|
+
gadgetEndPrefix?: string;
|
|
227
|
+
/** Hooks for lifecycle events */
|
|
228
|
+
hooks?: AgentHooks;
|
|
229
|
+
/** Logger instance */
|
|
230
|
+
logger?: Logger<ILogObj>;
|
|
231
|
+
/** Callback for human input */
|
|
232
|
+
onHumanInputRequired?: (question: string) => Promise<string>;
|
|
233
|
+
/** Whether to stop on gadget errors */
|
|
234
|
+
stopOnGadgetError?: boolean;
|
|
235
|
+
/** Custom error continuation logic */
|
|
236
|
+
shouldContinueAfterError?: (context: {
|
|
237
|
+
error: string;
|
|
238
|
+
gadgetName: string;
|
|
239
|
+
errorType: "parse" | "validation" | "execution";
|
|
240
|
+
parameters?: Record<string, unknown>;
|
|
241
|
+
}) => boolean | Promise<boolean>;
|
|
242
|
+
/** Default gadget timeout */
|
|
243
|
+
defaultGadgetTimeoutMs?: number;
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* Result of stream processing.
|
|
247
|
+
*/
|
|
248
|
+
interface StreamProcessingResult {
|
|
249
|
+
/** All emitted events */
|
|
250
|
+
outputs: StreamEvent[];
|
|
251
|
+
/** Whether the loop should break */
|
|
252
|
+
shouldBreakLoop: boolean;
|
|
253
|
+
/** Whether any gadgets were executed */
|
|
254
|
+
didExecuteGadgets: boolean;
|
|
255
|
+
/** LLM finish reason */
|
|
256
|
+
finishReason: string | null;
|
|
257
|
+
/** Token usage */
|
|
258
|
+
usage?: {
|
|
259
|
+
inputTokens: number;
|
|
260
|
+
outputTokens: number;
|
|
261
|
+
totalTokens: number;
|
|
262
|
+
};
|
|
263
|
+
/** The raw accumulated response text */
|
|
264
|
+
rawResponse: string;
|
|
265
|
+
/** The final message (after interceptors) */
|
|
266
|
+
finalMessage: string;
|
|
267
|
+
}
|
|
268
|
+
/**
|
|
269
|
+
* StreamProcessor: Coordinates all stream processing and hook execution.
|
|
270
|
+
*
|
|
271
|
+
* Execution order:
|
|
272
|
+
* 1. Raw chunk arrives from LLM
|
|
273
|
+
* 2. Interceptor: interceptRawChunk (transform raw text)
|
|
274
|
+
* 3. Observer: onStreamChunk (logging)
|
|
275
|
+
* 4. Parse for gadgets
|
|
276
|
+
* 5. If gadget found:
|
|
277
|
+
* a. Interceptor: interceptGadgetParameters (transform params)
|
|
278
|
+
* b. Controller: beforeGadgetExecution (can skip)
|
|
279
|
+
* c. Observer: onGadgetExecutionStart
|
|
280
|
+
* d. Execute gadget
|
|
281
|
+
* e. Interceptor: interceptGadgetResult (transform result)
|
|
282
|
+
* f. Controller: afterGadgetExecution (can provide fallback)
|
|
283
|
+
* g. Observer: onGadgetExecutionComplete
|
|
284
|
+
* 6. If text chunk:
|
|
285
|
+
* a. Interceptor: interceptTextChunk (transform display text)
|
|
286
|
+
* b. Yield to user
|
|
287
|
+
* 7. Stream complete
|
|
288
|
+
* 8. Interceptor: interceptAssistantMessage (transform final message)
|
|
289
|
+
*/
|
|
290
|
+
declare class StreamProcessor {
|
|
291
|
+
private readonly iteration;
|
|
292
|
+
private readonly registry;
|
|
293
|
+
private readonly hooks;
|
|
294
|
+
private readonly logger;
|
|
295
|
+
private readonly parser;
|
|
296
|
+
private readonly executor;
|
|
297
|
+
private readonly stopOnGadgetError;
|
|
298
|
+
private readonly shouldContinueAfterError?;
|
|
299
|
+
private accumulatedText;
|
|
300
|
+
private shouldStopExecution;
|
|
301
|
+
private observerFailureCount;
|
|
302
|
+
constructor(options: StreamProcessorOptions);
|
|
303
|
+
/**
|
|
304
|
+
* Process an LLM stream and return structured results.
|
|
305
|
+
*/
|
|
306
|
+
process(stream: AsyncIterable<LLMStreamChunk>): Promise<StreamProcessingResult>;
|
|
307
|
+
/**
|
|
308
|
+
* Process a single parsed event (text or gadget call).
|
|
309
|
+
*/
|
|
310
|
+
private processEvent;
|
|
311
|
+
/**
|
|
312
|
+
* Process a text event through interceptors.
|
|
313
|
+
*/
|
|
314
|
+
private processTextEvent;
|
|
315
|
+
/**
|
|
316
|
+
* Process a gadget call through the full lifecycle.
|
|
317
|
+
*/
|
|
318
|
+
private processGadgetCall;
|
|
319
|
+
/**
|
|
320
|
+
* Safely execute an observer, catching and logging any errors.
|
|
321
|
+
* Observers are non-critical, so errors are logged but don't crash the system.
|
|
322
|
+
*/
|
|
323
|
+
private safeObserve;
|
|
324
|
+
/**
|
|
325
|
+
* Execute multiple observers in parallel.
|
|
326
|
+
* All observers run concurrently and failures are tracked but don't crash.
|
|
327
|
+
*/
|
|
328
|
+
private runObserversInParallel;
|
|
329
|
+
/**
|
|
330
|
+
* Check if execution should continue after an error.
|
|
331
|
+
*
|
|
332
|
+
* Returns true if we should continue processing subsequent gadgets, false if we should stop.
|
|
333
|
+
*
|
|
334
|
+
* Logic:
|
|
335
|
+
* - If custom shouldContinueAfterError is provided, use it
|
|
336
|
+
* - Otherwise, use stopOnGadgetError config:
|
|
337
|
+
* - stopOnGadgetError=true → return false (stop execution)
|
|
338
|
+
* - stopOnGadgetError=false → return true (continue execution)
|
|
339
|
+
*/
|
|
340
|
+
private checkContinueAfterError;
|
|
341
|
+
/**
|
|
342
|
+
* Determine the type of error from a gadget execution.
|
|
343
|
+
*/
|
|
344
|
+
private determineErrorType;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* Model shortcuts and aliases for more expressive DX.
|
|
349
|
+
*
|
|
350
|
+
* This module provides convenient aliases for common model names,
|
|
351
|
+
* allowing developers to use short, memorable names instead of
|
|
352
|
+
* verbose provider:model-id formats.
|
|
353
|
+
*
|
|
354
|
+
* @example
|
|
355
|
+
* ```typescript
|
|
356
|
+
* // Instead of:
|
|
357
|
+
* model: "openai:gpt-5-nano"
|
|
358
|
+
*
|
|
359
|
+
* // You can use:
|
|
360
|
+
* model: "gpt5-nano"
|
|
361
|
+
* // or even:
|
|
362
|
+
* model: "gpt-5-nano" // Auto-detects provider
|
|
363
|
+
* ```
|
|
364
|
+
*/
|
|
365
|
+
/**
|
|
366
|
+
* Map of common model aliases to their full provider:model-id format.
|
|
367
|
+
*/
|
|
368
|
+
declare const MODEL_ALIASES: Record<string, string>;
|
|
369
|
+
/**
|
|
370
|
+
* Options for resolveModel function.
|
|
371
|
+
*/
|
|
372
|
+
interface ResolveModelOptions {
|
|
373
|
+
/**
|
|
374
|
+
* If true, throw an error for unknown model names instead of falling back to OpenAI.
|
|
375
|
+
* This helps catch typos like "gp4" instead of "gpt4".
|
|
376
|
+
* Default: false
|
|
377
|
+
*/
|
|
378
|
+
strict?: boolean;
|
|
379
|
+
/**
|
|
380
|
+
* If true, suppress warnings for unknown model names.
|
|
381
|
+
* Default: false
|
|
382
|
+
*/
|
|
383
|
+
silent?: boolean;
|
|
384
|
+
}
|
|
385
|
+
/**
|
|
386
|
+
* Resolves a model name to its full provider:model format.
|
|
387
|
+
*
|
|
388
|
+
* Supports:
|
|
389
|
+
* - Direct aliases: 'gpt5', 'sonnet', 'flash'
|
|
390
|
+
* - Auto-detection: 'gpt-5-nano' → 'openai:gpt-5-nano'
|
|
391
|
+
* - Pass-through: 'openai:gpt-5' → 'openai:gpt-5'
|
|
392
|
+
*
|
|
393
|
+
* Warnings:
|
|
394
|
+
* - Logs a warning when an unknown model name falls back to OpenAI
|
|
395
|
+
* - Use { strict: true } to throw an error instead
|
|
396
|
+
* - Use { silent: true } to suppress warnings
|
|
397
|
+
*
|
|
398
|
+
* @param model - Model name or alias
|
|
399
|
+
* @param options - Resolution options
|
|
400
|
+
* @returns Full provider:model-id string
|
|
401
|
+
*
|
|
402
|
+
* @example
|
|
403
|
+
* ```typescript
|
|
404
|
+
* resolveModel('gpt5') // → 'openai:gpt-5'
|
|
405
|
+
* resolveModel('sonnet') // → 'anthropic:claude-3-5-sonnet-latest'
|
|
406
|
+
* resolveModel('gpt-5-nano') // → 'openai:gpt-5-nano'
|
|
407
|
+
* resolveModel('openai:gpt-5') // → 'openai:gpt-5' (passthrough)
|
|
408
|
+
* resolveModel('claude-3-5-sonnet') // → 'anthropic:claude-3-5-sonnet'
|
|
409
|
+
*
|
|
410
|
+
* // Typo detection
|
|
411
|
+
* resolveModel('gp5') // ⚠️ Warning: Unknown model 'gp5', falling back to 'openai:gp5'
|
|
412
|
+
*
|
|
413
|
+
* // Strict mode (throws on typos)
|
|
414
|
+
* resolveModel('gp5', { strict: true }) // ❌ Error: Unknown model 'gp5'
|
|
415
|
+
* ```
|
|
416
|
+
*/
|
|
417
|
+
declare function resolveModel(model: string, options?: ResolveModelOptions): string;
|
|
418
|
+
/**
|
|
419
|
+
* Check if a model string is already in provider:model format.
|
|
420
|
+
*
|
|
421
|
+
* @param model - Model string to check
|
|
422
|
+
* @returns True if the model has a provider prefix
|
|
423
|
+
*
|
|
424
|
+
* @example
|
|
425
|
+
* ```typescript
|
|
426
|
+
* hasProviderPrefix('openai:gpt-4o') // → true
|
|
427
|
+
* hasProviderPrefix('gpt4') // → false
|
|
428
|
+
* hasProviderPrefix('claude-3-5-sonnet') // → false
|
|
429
|
+
* ```
|
|
430
|
+
*/
|
|
431
|
+
declare function hasProviderPrefix(model: string): boolean;
|
|
432
|
+
/**
|
|
433
|
+
* Extract the provider from a full model string.
|
|
434
|
+
*
|
|
435
|
+
* @param model - Full model string (provider:model-id)
|
|
436
|
+
* @returns Provider name, or undefined if no prefix
|
|
437
|
+
*
|
|
438
|
+
* @example
|
|
439
|
+
* ```typescript
|
|
440
|
+
* getProvider('openai:gpt-4o') // → 'openai'
|
|
441
|
+
* getProvider('anthropic:claude') // → 'anthropic'
|
|
442
|
+
* getProvider('gpt4') // → undefined
|
|
443
|
+
* ```
|
|
444
|
+
*/
|
|
445
|
+
declare function getProvider(model: string): string | undefined;
|
|
446
|
+
/**
|
|
447
|
+
* Extract the model ID from a full model string.
|
|
448
|
+
*
|
|
449
|
+
* @param model - Full model string (provider:model-id)
|
|
450
|
+
* @returns Model ID, or the original string if no prefix
|
|
451
|
+
*
|
|
452
|
+
* @example
|
|
453
|
+
* ```typescript
|
|
454
|
+
* getModelId('openai:gpt-4o') // → 'gpt-4o'
|
|
455
|
+
* getModelId('anthropic:claude') // → 'claude'
|
|
456
|
+
* getModelId('gpt4') // → 'gpt4'
|
|
457
|
+
* ```
|
|
458
|
+
*/
|
|
459
|
+
declare function getModelId(model: string): string;
|
|
460
|
+
|
|
461
|
+
/**
|
|
462
|
+
* Function-based gadget creation helper.
|
|
463
|
+
*
|
|
464
|
+
* For simple gadgets, use createGadget() instead of defining a class.
|
|
465
|
+
* Parameters are automatically typed from the Zod schema.
|
|
466
|
+
*
|
|
467
|
+
* @example
|
|
468
|
+
* ```typescript
|
|
469
|
+
* const calculator = createGadget({
|
|
470
|
+
* description: "Performs arithmetic operations",
|
|
471
|
+
* schema: z.object({
|
|
472
|
+
* operation: z.enum(["add", "subtract"]),
|
|
473
|
+
* a: z.number(),
|
|
474
|
+
* b: z.number(),
|
|
475
|
+
* }),
|
|
476
|
+
* execute: ({ operation, a, b }) => {
|
|
477
|
+
* // Automatically typed!
|
|
478
|
+
* return operation === "add" ? String(a + b) : String(a - b);
|
|
479
|
+
* },
|
|
480
|
+
* });
|
|
481
|
+
* ```
|
|
482
|
+
*/
|
|
483
|
+
|
|
484
|
+
/**
|
|
485
|
+
* Infer the TypeScript type from a Zod schema.
|
|
486
|
+
*/
|
|
487
|
+
type InferSchema$1<T> = T extends ZodType<infer U> ? U : never;
|
|
488
|
+
/**
|
|
489
|
+
* Configuration for creating a function-based gadget.
|
|
490
|
+
*/
|
|
491
|
+
interface CreateGadgetConfig<TSchema extends ZodType> {
|
|
492
|
+
/** Optional custom name (defaults to "FunctionGadget") */
|
|
493
|
+
name?: string;
|
|
494
|
+
/** Human-readable description of what the gadget does */
|
|
495
|
+
description: string;
|
|
496
|
+
/** Zod schema for parameter validation */
|
|
497
|
+
schema: TSchema;
|
|
498
|
+
/** Execution function with typed parameters */
|
|
499
|
+
execute: (params: InferSchema$1<TSchema>) => string | Promise<string>;
|
|
500
|
+
/** Optional timeout in milliseconds */
|
|
501
|
+
timeoutMs?: number;
|
|
502
|
+
}
|
|
503
|
+
/**
|
|
504
|
+
* Creates a gadget from a function (simpler than class-based approach).
|
|
505
|
+
*
|
|
506
|
+
* This is perfect for simple gadgets where you don't need the full
|
|
507
|
+
* power of a class. Parameters are automatically typed from the schema.
|
|
508
|
+
*
|
|
509
|
+
* @param config - Configuration with execute function and schema
|
|
510
|
+
* @returns Gadget instance ready to be registered
|
|
511
|
+
*
|
|
512
|
+
* @example
|
|
513
|
+
* ```typescript
|
|
514
|
+
* import { z } from 'zod';
|
|
515
|
+
* import { createGadget } from 'llmist';
|
|
516
|
+
*
|
|
517
|
+
* // Simple calculator gadget
|
|
518
|
+
* const calculator = createGadget({
|
|
519
|
+
* description: "Performs arithmetic operations",
|
|
520
|
+
* schema: z.object({
|
|
521
|
+
* operation: z.enum(["add", "subtract", "multiply", "divide"]),
|
|
522
|
+
* a: z.number().describe("First number"),
|
|
523
|
+
* b: z.number().describe("Second number"),
|
|
524
|
+
* }),
|
|
525
|
+
* execute: ({ operation, a, b }) => {
|
|
526
|
+
* // Parameters are automatically typed!
|
|
527
|
+
* switch (operation) {
|
|
528
|
+
* case "add": return String(a + b);
|
|
529
|
+
* case "subtract": return String(a - b);
|
|
530
|
+
* case "multiply": return String(a * b);
|
|
531
|
+
* case "divide": return String(a / b);
|
|
532
|
+
* }
|
|
533
|
+
* },
|
|
534
|
+
* });
|
|
535
|
+
* ```
|
|
536
|
+
*
|
|
537
|
+
* @example
|
|
538
|
+
* ```typescript
|
|
539
|
+
* // Async gadget with custom name and timeout
|
|
540
|
+
* const weather = createGadget({
|
|
541
|
+
* name: "weather",
|
|
542
|
+
* description: "Fetches current weather for a city",
|
|
543
|
+
* schema: z.object({
|
|
544
|
+
* city: z.string().min(1).describe("City name"),
|
|
545
|
+
* }),
|
|
546
|
+
* timeoutMs: 10000,
|
|
547
|
+
* execute: async ({ city }) => {
|
|
548
|
+
* const response = await fetch(`https://api.weather.com/${city}`);
|
|
549
|
+
* const data = await response.json();
|
|
550
|
+
* return `Weather in ${city}: ${data.description}, ${data.temp}°C`;
|
|
551
|
+
* },
|
|
552
|
+
* });
|
|
553
|
+
* ```
|
|
554
|
+
*
|
|
555
|
+
* @example
|
|
556
|
+
* ```typescript
|
|
557
|
+
* // Use with agent
|
|
558
|
+
* const agent = LLMist.createAgent()
|
|
559
|
+
* .withGadgets(calculator, weather)
|
|
560
|
+
* .ask("What's the weather in Paris and what's 10 + 5?");
|
|
561
|
+
* ```
|
|
562
|
+
*/
|
|
563
|
+
declare function createGadget<TSchema extends ZodType>(config: CreateGadgetConfig<TSchema>): BaseGadget;
|
|
564
|
+
|
|
565
|
+
/**
|
|
566
|
+
* Exception that gadgets can throw to signal the agent loop should terminate.
|
|
567
|
+
*
|
|
568
|
+
* When a gadget throws this exception, the agent loop will:
|
|
569
|
+
* 1. Complete the current iteration
|
|
570
|
+
* 2. Return the exception message as the gadget's result
|
|
571
|
+
* 3. Exit the loop instead of continuing to the next iteration
|
|
572
|
+
*
|
|
573
|
+
* @example
|
|
574
|
+
* ```typescript
|
|
575
|
+
* import { z } from 'zod';
|
|
576
|
+
*
|
|
577
|
+
* class FinishGadget extends Gadget({
|
|
578
|
+
* name: 'Finish',
|
|
579
|
+
* description: 'Signals task completion',
|
|
580
|
+
* schema: z.object({
|
|
581
|
+
* message: z.string().optional(),
|
|
582
|
+
* }),
|
|
583
|
+
* }) {
|
|
584
|
+
* execute(params: this['params']): string {
|
|
585
|
+
* const message = params.message || 'Task completed';
|
|
586
|
+
* throw new BreakLoopException(message);
|
|
587
|
+
* }
|
|
588
|
+
* }
|
|
589
|
+
* ```
|
|
590
|
+
*/
|
|
591
|
+
declare class BreakLoopException extends Error {
|
|
592
|
+
constructor(message?: string);
|
|
593
|
+
}
|
|
594
|
+
/**
|
|
595
|
+
* Exception that gadgets can throw to request human input during execution.
|
|
596
|
+
*
|
|
597
|
+
* When a gadget throws this exception, the agent loop will:
|
|
598
|
+
* 1. Pause execution and wait for human input
|
|
599
|
+
* 2. If `onHumanInputRequired` callback is provided, call it and await the answer
|
|
600
|
+
* 3. Return the user's answer as the gadget's result
|
|
601
|
+
* 4. Continue the loop with the answer added to conversation history
|
|
602
|
+
*
|
|
603
|
+
* If no callback is provided, the loop will yield a `human_input_required` event
|
|
604
|
+
* and the caller must handle it externally.
|
|
605
|
+
*
|
|
606
|
+
* @example
|
|
607
|
+
* ```typescript
|
|
608
|
+
* import { z } from 'zod';
|
|
609
|
+
*
|
|
610
|
+
* class AskUserGadget extends Gadget({
|
|
611
|
+
* name: 'AskUser',
|
|
612
|
+
* description: 'Ask the user a question and get their answer',
|
|
613
|
+
* schema: z.object({
|
|
614
|
+
* question: z.string().min(1, 'Question is required'),
|
|
615
|
+
* }),
|
|
616
|
+
* }) {
|
|
617
|
+
* execute(params: this['params']): string {
|
|
618
|
+
* throw new HumanInputException(params.question);
|
|
619
|
+
* }
|
|
620
|
+
* }
|
|
621
|
+
* ```
|
|
622
|
+
*/
|
|
623
|
+
declare class HumanInputException extends Error {
|
|
624
|
+
readonly question: string;
|
|
625
|
+
constructor(question: string);
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
declare class GadgetExecutor {
|
|
629
|
+
private readonly registry;
|
|
630
|
+
private readonly onHumanInputRequired?;
|
|
631
|
+
private readonly defaultGadgetTimeoutMs?;
|
|
632
|
+
private readonly logger;
|
|
633
|
+
constructor(registry: GadgetRegistry, onHumanInputRequired?: ((question: string) => Promise<string>) | undefined, logger?: Logger<ILogObj>, defaultGadgetTimeoutMs?: number | undefined);
|
|
634
|
+
/**
|
|
635
|
+
* Creates a promise that rejects with a TimeoutException after the specified timeout.
|
|
636
|
+
*/
|
|
637
|
+
private createTimeoutPromise;
|
|
638
|
+
execute(call: ParsedGadgetCall): Promise<GadgetExecutionResult>;
|
|
639
|
+
executeAll(calls: ParsedGadgetCall[]): Promise<GadgetExecutionResult[]>;
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
/**
|
|
643
|
+
* Infer the TypeScript type from a Zod schema.
|
|
644
|
+
*/
|
|
645
|
+
type InferSchema<T> = T extends ZodType<infer U> ? U : never;
|
|
646
|
+
/**
|
|
647
|
+
* Configuration for creating a typed gadget.
|
|
648
|
+
*/
|
|
649
|
+
interface GadgetConfig<TSchema extends ZodType> {
|
|
650
|
+
/** Human-readable description of what the gadget does */
|
|
651
|
+
description: string;
|
|
652
|
+
/** Zod schema for parameter validation */
|
|
653
|
+
schema: TSchema;
|
|
654
|
+
/** Optional custom name (defaults to class name) */
|
|
655
|
+
name?: string;
|
|
656
|
+
/** Optional timeout in milliseconds */
|
|
657
|
+
timeoutMs?: number;
|
|
658
|
+
}
|
|
659
|
+
/**
|
|
660
|
+
* Factory function to create a typed gadget base class.
|
|
661
|
+
*
|
|
662
|
+
* The returned class automatically infers parameter types from the Zod schema,
|
|
663
|
+
* eliminating the need for manual type assertions in the execute method.
|
|
664
|
+
*
|
|
665
|
+
* @param config - Configuration with description and schema
|
|
666
|
+
* @returns Base class to extend with typed execute method
|
|
667
|
+
*
|
|
668
|
+
* @example
|
|
669
|
+
* ```typescript
|
|
670
|
+
* import { z } from 'zod';
|
|
671
|
+
* import { Gadget } from 'llmist';
|
|
672
|
+
*
|
|
673
|
+
* class Calculator extends Gadget({
|
|
674
|
+
* description: "Performs arithmetic operations",
|
|
675
|
+
* schema: z.object({
|
|
676
|
+
* operation: z.enum(["add", "subtract", "multiply", "divide"]),
|
|
677
|
+
* a: z.number().describe("First number"),
|
|
678
|
+
* b: z.number().describe("Second number"),
|
|
679
|
+
* }),
|
|
680
|
+
* }) {
|
|
681
|
+
* execute(params: this['params']): string {
|
|
682
|
+
* // params is automatically typed as:
|
|
683
|
+
* // { operation: "add" | "subtract" | "multiply" | "divide"; a: number; b: number }
|
|
684
|
+
* const { operation, a, b } = params;
|
|
685
|
+
*
|
|
686
|
+
* switch (operation) {
|
|
687
|
+
* case "add": return String(a + b);
|
|
688
|
+
* case "subtract": return String(a - b);
|
|
689
|
+
* case "multiply": return String(a * b);
|
|
690
|
+
* case "divide": return String(a / b);
|
|
691
|
+
* }
|
|
692
|
+
* }
|
|
693
|
+
* }
|
|
694
|
+
* ```
|
|
695
|
+
*
|
|
696
|
+
* @example
|
|
697
|
+
* ```typescript
|
|
698
|
+
* // With async execution
|
|
699
|
+
* class WeatherGadget extends Gadget({
|
|
700
|
+
* description: "Fetches weather for a city",
|
|
701
|
+
* schema: z.object({
|
|
702
|
+
* city: z.string().min(1).describe("City name"),
|
|
703
|
+
* }),
|
|
704
|
+
* timeoutMs: 10000,
|
|
705
|
+
* }) {
|
|
706
|
+
* async execute(params: this['params']): Promise<string> {
|
|
707
|
+
* const { city } = params; // Automatically typed as { city: string }
|
|
708
|
+
* const weather = await fetchWeather(city);
|
|
709
|
+
* return `Weather in ${city}: ${weather}`;
|
|
710
|
+
* }
|
|
711
|
+
* }
|
|
712
|
+
* ```
|
|
713
|
+
*/
|
|
714
|
+
declare function Gadget<TSchema extends ZodType>(config: GadgetConfig<TSchema>): {
|
|
715
|
+
new (): {
|
|
716
|
+
description: string;
|
|
717
|
+
parameterSchema: TSchema;
|
|
718
|
+
name: string | undefined;
|
|
719
|
+
timeoutMs: number | undefined;
|
|
720
|
+
/**
|
|
721
|
+
* Type helper property for accessing inferred parameter type.
|
|
722
|
+
* This is used in the execute method signature: `execute(params: this['params'])`
|
|
723
|
+
*
|
|
724
|
+
* Note: This is just for type inference - the actual params in execute()
|
|
725
|
+
* will be Record<string, unknown> which you can safely cast to this['params']
|
|
726
|
+
*/
|
|
727
|
+
readonly params: InferSchema<TSchema>;
|
|
728
|
+
/**
|
|
729
|
+
* Execute the gadget. Subclasses should cast params to this['params'].
|
|
730
|
+
*
|
|
731
|
+
* @param params - Validated parameters from the LLM
|
|
732
|
+
* @returns Result as a string (or Promise<string> for async gadgets)
|
|
733
|
+
*
|
|
734
|
+
* @example
|
|
735
|
+
* ```typescript
|
|
736
|
+
* execute(params: Record<string, unknown>): string {
|
|
737
|
+
* const typed = params as this['params'];
|
|
738
|
+
* // Now 'typed' is fully typed!
|
|
739
|
+
* return String(typed.a + typed.b);
|
|
740
|
+
* }
|
|
741
|
+
* ```
|
|
742
|
+
*/
|
|
743
|
+
execute(params: Record<string, unknown>): string | Promise<string>;
|
|
744
|
+
get instruction(): string;
|
|
745
|
+
getInstruction(format?: ParameterFormat): string;
|
|
746
|
+
} & {
|
|
747
|
+
params: InferSchema<TSchema>;
|
|
748
|
+
};
|
|
749
|
+
};
|
|
750
|
+
|
|
751
|
+
/**
|
|
752
|
+
* Validation utilities for gadget parameters.
|
|
753
|
+
*
|
|
754
|
+
* Provides standalone validation with Zod schema support, including
|
|
755
|
+
* default application and formatted error output.
|
|
756
|
+
*
|
|
757
|
+
* @module gadgets/validation
|
|
758
|
+
*/
|
|
759
|
+
|
|
760
|
+
/**
|
|
761
|
+
* Individual validation issue with path and message.
|
|
762
|
+
*/
|
|
763
|
+
interface ValidationIssue {
|
|
764
|
+
/** Dot-separated path to the invalid field (e.g., "user.email") */
|
|
765
|
+
path: string;
|
|
766
|
+
/** Human-readable error message */
|
|
767
|
+
message: string;
|
|
768
|
+
}
|
|
769
|
+
/**
|
|
770
|
+
* Result of parameter validation.
|
|
771
|
+
* Discriminated union based on `success` field.
|
|
772
|
+
*/
|
|
773
|
+
type ValidationResult<T = Record<string, unknown>> = {
|
|
774
|
+
success: true;
|
|
775
|
+
/** Validated and transformed data with defaults applied */
|
|
776
|
+
data: T;
|
|
777
|
+
} | {
|
|
778
|
+
success: false;
|
|
779
|
+
/** Formatted error message */
|
|
780
|
+
error: string;
|
|
781
|
+
/** Individual validation issues */
|
|
782
|
+
issues: ValidationIssue[];
|
|
783
|
+
};
|
|
784
|
+
/**
|
|
785
|
+
* Validate parameters against a Zod schema and apply defaults/transformations.
|
|
786
|
+
*
|
|
787
|
+
* This replicates the validation behavior from GadgetExecutor, making it
|
|
788
|
+
* available for direct use in tests and other contexts.
|
|
789
|
+
*
|
|
790
|
+
* @param schema - Zod schema to validate against
|
|
791
|
+
* @param params - Raw parameters to validate
|
|
792
|
+
* @returns ValidationResult with either validated data or error details
|
|
793
|
+
*
|
|
794
|
+
* @example
|
|
795
|
+
* ```typescript
|
|
796
|
+
* import { validateAndApplyDefaults } from 'llmist';
|
|
797
|
+
* import { z } from 'zod';
|
|
798
|
+
*
|
|
799
|
+
* const schema = z.object({
|
|
800
|
+
* delay: z.number().default(100),
|
|
801
|
+
* retries: z.number().int().min(0).default(3),
|
|
802
|
+
* });
|
|
803
|
+
*
|
|
804
|
+
* const result = validateAndApplyDefaults(schema, { delay: 50 });
|
|
805
|
+
* if (result.success) {
|
|
806
|
+
* console.log(result.data); // { delay: 50, retries: 3 }
|
|
807
|
+
* }
|
|
808
|
+
* ```
|
|
809
|
+
*/
|
|
810
|
+
declare function validateAndApplyDefaults<T = Record<string, unknown>>(schema: ZodTypeAny, params: Record<string, unknown>): ValidationResult<T>;
|
|
811
|
+
/**
|
|
812
|
+
* Validate gadget parameters using the gadget's schema.
|
|
813
|
+
*
|
|
814
|
+
* Convenience wrapper that extracts the schema from a gadget instance.
|
|
815
|
+
* If the gadget has no schema, validation always succeeds with the
|
|
816
|
+
* original parameters.
|
|
817
|
+
*
|
|
818
|
+
* @param gadget - Gadget instance with optional parameterSchema
|
|
819
|
+
* @param params - Raw parameters to validate
|
|
820
|
+
* @returns ValidationResult with either validated data or error details
|
|
821
|
+
*
|
|
822
|
+
* @example
|
|
823
|
+
* ```typescript
|
|
824
|
+
* import { validateGadgetParams, createGadget } from 'llmist';
|
|
825
|
+
* import { z } from 'zod';
|
|
826
|
+
*
|
|
827
|
+
* const calculator = createGadget({
|
|
828
|
+
* description: 'Add numbers',
|
|
829
|
+
* schema: z.object({
|
|
830
|
+
* a: z.number(),
|
|
831
|
+
* b: z.number().default(0),
|
|
832
|
+
* }),
|
|
833
|
+
* execute: ({ a, b }) => String(a + b),
|
|
834
|
+
* });
|
|
835
|
+
*
|
|
836
|
+
* const result = validateGadgetParams(calculator, { a: 5 });
|
|
837
|
+
* if (result.success) {
|
|
838
|
+
* console.log(result.data); // { a: 5, b: 0 }
|
|
839
|
+
* }
|
|
840
|
+
* ```
|
|
841
|
+
*/
|
|
842
|
+
declare function validateGadgetParams(gadget: BaseGadget, params: Record<string, unknown>): ValidationResult;
|
|
843
|
+
|
|
844
|
+
/**
|
|
845
|
+
* Logger configuration options for the library.
|
|
846
|
+
*/
|
|
847
|
+
interface LoggerOptions {
|
|
848
|
+
/**
|
|
849
|
+
* Log level: 0=silly, 1=trace, 2=debug, 3=info, 4=warn, 5=error, 6=fatal
|
|
850
|
+
* @default 4 (warn)
|
|
851
|
+
*/
|
|
852
|
+
minLevel?: number;
|
|
853
|
+
/**
|
|
854
|
+
* Output type: 'pretty' for development, 'json' for production
|
|
855
|
+
* @default 'pretty'
|
|
856
|
+
*/
|
|
857
|
+
type?: "pretty" | "json" | "hidden";
|
|
858
|
+
/**
|
|
859
|
+
* Logger name (appears in logs)
|
|
860
|
+
*/
|
|
861
|
+
name?: string;
|
|
862
|
+
}
|
|
863
|
+
/**
|
|
864
|
+
* Create a new logger instance for the library.
|
|
865
|
+
*
|
|
866
|
+
* @param options - Logger configuration options
|
|
867
|
+
* @returns Configured Logger instance
|
|
868
|
+
*
|
|
869
|
+
* @example
|
|
870
|
+
* ```typescript
|
|
871
|
+
* // Development logger with pretty output
|
|
872
|
+
* const logger = createLogger({ type: 'pretty', minLevel: 2 });
|
|
873
|
+
*
|
|
874
|
+
* // Production logger with JSON output
|
|
875
|
+
* const logger = createLogger({ type: 'json', minLevel: 3 });
|
|
876
|
+
*
|
|
877
|
+
* // Silent logger for tests
|
|
878
|
+
* const logger = createLogger({ type: 'hidden' });
|
|
879
|
+
* ```
|
|
880
|
+
*/
|
|
881
|
+
declare function createLogger(options?: LoggerOptions): Logger<ILogObj>;
|
|
882
|
+
/**
|
|
883
|
+
* Default logger instance for the library.
|
|
884
|
+
* Users can replace this with their own configured logger.
|
|
885
|
+
*/
|
|
886
|
+
declare const defaultLogger: Logger<ILogObj>;
|
|
887
|
+
|
|
888
|
+
/**
|
|
889
|
+
* Base Provider Adapter
|
|
890
|
+
*
|
|
891
|
+
* Abstract base class for provider adapters that implements the Template Method pattern.
|
|
892
|
+
* This class defines the skeleton of the streaming algorithm, leaving provider-specific
|
|
893
|
+
* details to be implemented by concrete subclasses.
|
|
894
|
+
*
|
|
895
|
+
* The streaming workflow consists of four main steps:
|
|
896
|
+
* 1. Prepare messages (optional transformation for provider-specific requirements)
|
|
897
|
+
* 2. Build the request payload (provider-specific formatting)
|
|
898
|
+
* 3. Execute the stream request (call the provider's SDK)
|
|
899
|
+
* 4. Wrap the stream (transform provider-specific chunks into universal format)
|
|
900
|
+
*/
|
|
901
|
+
|
|
902
|
+
declare abstract class BaseProviderAdapter implements ProviderAdapter {
|
|
903
|
+
protected readonly client: unknown;
|
|
904
|
+
abstract readonly providerId: string;
|
|
905
|
+
constructor(client: unknown);
|
|
906
|
+
abstract supports(descriptor: ModelDescriptor): boolean;
|
|
907
|
+
/**
|
|
908
|
+
* Optionally provide model specifications for this provider.
|
|
909
|
+
* This allows the model registry to discover available models and their capabilities.
|
|
910
|
+
*/
|
|
911
|
+
getModelSpecs?(): ModelSpec[];
|
|
912
|
+
/**
|
|
913
|
+
* Template method that defines the skeleton of the streaming algorithm.
|
|
914
|
+
* This orchestrates the four-step process without dictating provider-specific details.
|
|
915
|
+
*/
|
|
916
|
+
stream(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec?: ModelSpec): LLMStream;
|
|
917
|
+
/**
|
|
918
|
+
* Prepare messages for the request.
|
|
919
|
+
* Default implementation returns messages unchanged.
|
|
920
|
+
* Override this to implement provider-specific message transformations
|
|
921
|
+
* (e.g., Gemini's consecutive message merging, Anthropic's system message extraction).
|
|
922
|
+
*
|
|
923
|
+
* @param messages - The input messages
|
|
924
|
+
* @returns Prepared messages
|
|
925
|
+
*/
|
|
926
|
+
protected prepareMessages(messages: LLMMessage[]): LLMMessage[];
|
|
927
|
+
/**
|
|
928
|
+
* Build the provider-specific request payload.
|
|
929
|
+
* This method must be implemented by each concrete provider.
|
|
930
|
+
*
|
|
931
|
+
* @param options - The generation options
|
|
932
|
+
* @param descriptor - The model descriptor
|
|
933
|
+
* @param spec - Optional model specification with metadata
|
|
934
|
+
* @param messages - The prepared messages
|
|
935
|
+
* @returns Provider-specific payload ready for the API call
|
|
936
|
+
*/
|
|
937
|
+
protected abstract buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): unknown;
|
|
938
|
+
/**
|
|
939
|
+
* Execute the stream request using the provider's SDK.
|
|
940
|
+
* This method must be implemented by each concrete provider.
|
|
941
|
+
*
|
|
942
|
+
* @param payload - The provider-specific payload
|
|
943
|
+
* @returns An async iterable of provider-specific chunks
|
|
944
|
+
*/
|
|
945
|
+
protected abstract executeStreamRequest(payload: unknown): Promise<AsyncIterable<unknown>>;
|
|
946
|
+
/**
|
|
947
|
+
* Wrap the provider-specific stream into the universal LLMStream format.
|
|
948
|
+
* This method must be implemented by each concrete provider.
|
|
949
|
+
*
|
|
950
|
+
* @param rawStream - The provider-specific stream
|
|
951
|
+
* @returns Universal LLMStream
|
|
952
|
+
*/
|
|
953
|
+
protected abstract wrapStream(rawStream: AsyncIterable<unknown>): LLMStream;
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
declare class AnthropicMessagesProvider extends BaseProviderAdapter {
|
|
957
|
+
readonly providerId: "anthropic";
|
|
958
|
+
supports(descriptor: ModelDescriptor): boolean;
|
|
959
|
+
getModelSpecs(): ModelSpec[];
|
|
960
|
+
protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): MessageCreateParamsStreaming;
|
|
961
|
+
protected executeStreamRequest(payload: MessageCreateParamsStreaming): Promise<AsyncIterable<MessageStreamEvent>>;
|
|
962
|
+
protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
|
|
963
|
+
/**
|
|
964
|
+
* Count tokens in messages using Anthropic's native token counting API.
|
|
965
|
+
*
|
|
966
|
+
* This method provides accurate token estimation for Anthropic models by:
|
|
967
|
+
* - Using the native messages.countTokens() API
|
|
968
|
+
* - Properly handling system messages and conversation structure
|
|
969
|
+
* - Transforming messages to Anthropic's expected format
|
|
970
|
+
*
|
|
971
|
+
* @param messages - The messages to count tokens for
|
|
972
|
+
* @param descriptor - Model descriptor containing the model name
|
|
973
|
+
* @param _spec - Optional model specification (currently unused)
|
|
974
|
+
* @returns Promise resolving to the estimated input token count
|
|
975
|
+
*
|
|
976
|
+
* @throws Never throws - falls back to character-based estimation (4 chars/token) on error
|
|
977
|
+
*
|
|
978
|
+
* @example
|
|
979
|
+
* ```typescript
|
|
980
|
+
* const count = await provider.countTokens(
|
|
981
|
+
* [{ role: "user", content: "Hello!" }],
|
|
982
|
+
* { provider: "anthropic", name: "claude-3-5-sonnet-20241022" }
|
|
983
|
+
* );
|
|
984
|
+
* ```
|
|
985
|
+
*/
|
|
986
|
+
countTokens(messages: LLMMessage[], descriptor: ModelDescriptor, _spec?: ModelSpec): Promise<number>;
|
|
987
|
+
}
|
|
988
|
+
declare function createAnthropicProviderFromEnv(): AnthropicMessagesProvider | null;
|
|
989
|
+
|
|
990
|
+
declare function discoverProviderAdapters(): ProviderAdapter[];
|
|
991
|
+
|
|
992
|
+
type GeminiChunk = {
|
|
993
|
+
text?: () => string;
|
|
994
|
+
candidates?: Array<{
|
|
995
|
+
content?: {
|
|
996
|
+
parts?: Array<{
|
|
997
|
+
text?: string;
|
|
998
|
+
}>;
|
|
999
|
+
};
|
|
1000
|
+
finishReason?: string;
|
|
1001
|
+
}>;
|
|
1002
|
+
usageMetadata?: {
|
|
1003
|
+
promptTokenCount?: number;
|
|
1004
|
+
candidatesTokenCount?: number;
|
|
1005
|
+
totalTokenCount?: number;
|
|
1006
|
+
};
|
|
1007
|
+
};
|
|
1008
|
+
declare class GeminiGenerativeProvider extends BaseProviderAdapter {
|
|
1009
|
+
readonly providerId: "gemini";
|
|
1010
|
+
supports(descriptor: ModelDescriptor): boolean;
|
|
1011
|
+
getModelSpecs(): ModelSpec[];
|
|
1012
|
+
protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, _spec: ModelSpec | undefined, messages: LLMMessage[]): {
|
|
1013
|
+
model: string;
|
|
1014
|
+
contents: Array<{
|
|
1015
|
+
role: string;
|
|
1016
|
+
parts: Array<{
|
|
1017
|
+
text: string;
|
|
1018
|
+
}>;
|
|
1019
|
+
}>;
|
|
1020
|
+
config: Record<string, unknown>;
|
|
1021
|
+
};
|
|
1022
|
+
protected executeStreamRequest(payload: {
|
|
1023
|
+
model: string;
|
|
1024
|
+
contents: Array<{
|
|
1025
|
+
role: string;
|
|
1026
|
+
parts: Array<{
|
|
1027
|
+
text: string;
|
|
1028
|
+
}>;
|
|
1029
|
+
}>;
|
|
1030
|
+
config: Record<string, unknown>;
|
|
1031
|
+
}): Promise<AsyncIterable<GeminiChunk>>;
|
|
1032
|
+
private extractSystemAndContents;
|
|
1033
|
+
private mergeConsecutiveMessages;
|
|
1034
|
+
private convertContentsForNewSDK;
|
|
1035
|
+
private buildGenerationConfig;
|
|
1036
|
+
protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
|
|
1037
|
+
private extractText;
|
|
1038
|
+
private extractFinishReason;
|
|
1039
|
+
private extractUsage;
|
|
1040
|
+
/**
|
|
1041
|
+
* Count tokens in messages using Gemini's native token counting API.
|
|
1042
|
+
*
|
|
1043
|
+
* This method provides accurate token estimation for Gemini models by:
|
|
1044
|
+
* - Using the SDK's countTokens() method
|
|
1045
|
+
* - Properly extracting and handling system instructions
|
|
1046
|
+
* - Transforming messages to Gemini's expected format
|
|
1047
|
+
*
|
|
1048
|
+
* @param messages - The messages to count tokens for
|
|
1049
|
+
* @param descriptor - Model descriptor containing the model name
|
|
1050
|
+
* @param _spec - Optional model specification (currently unused)
|
|
1051
|
+
* @returns Promise resolving to the estimated input token count
|
|
1052
|
+
*
|
|
1053
|
+
* @throws Never throws - falls back to character-based estimation (4 chars/token) on error
|
|
1054
|
+
*
|
|
1055
|
+
* @example
|
|
1056
|
+
* ```typescript
|
|
1057
|
+
* const count = await provider.countTokens(
|
|
1058
|
+
* [{ role: "user", content: "Hello!" }],
|
|
1059
|
+
* { provider: "gemini", name: "gemini-1.5-pro" }
|
|
1060
|
+
* );
|
|
1061
|
+
* ```
|
|
1062
|
+
*/
|
|
1063
|
+
countTokens(messages: LLMMessage[], descriptor: ModelDescriptor, _spec?: ModelSpec): Promise<number>;
|
|
1064
|
+
}
|
|
1065
|
+
declare function createGeminiProviderFromEnv(): GeminiGenerativeProvider | null;
|
|
1066
|
+
|
|
1067
|
+
declare class OpenAIChatProvider extends BaseProviderAdapter {
|
|
1068
|
+
readonly providerId: "openai";
|
|
1069
|
+
supports(descriptor: ModelDescriptor): boolean;
|
|
1070
|
+
getModelSpecs(): ModelSpec[];
|
|
1071
|
+
protected buildRequestPayload(options: LLMGenerationOptions, descriptor: ModelDescriptor, spec: ModelSpec | undefined, messages: LLMMessage[]): Parameters<OpenAI["chat"]["completions"]["create"]>[0];
|
|
1072
|
+
protected executeStreamRequest(payload: Parameters<OpenAI["chat"]["completions"]["create"]>[0]): Promise<AsyncIterable<ChatCompletionChunk>>;
|
|
1073
|
+
protected wrapStream(iterable: AsyncIterable<unknown>): LLMStream;
|
|
1074
|
+
/**
|
|
1075
|
+
* Count tokens in messages using OpenAI's tiktoken library.
|
|
1076
|
+
*
|
|
1077
|
+
* This method provides accurate token estimation for OpenAI models by:
|
|
1078
|
+
* - Using the model-specific tokenizer encoding
|
|
1079
|
+
* - Accounting for message formatting overhead
|
|
1080
|
+
* - Falling back to gpt-4o encoding for unknown models
|
|
1081
|
+
*
|
|
1082
|
+
* @param messages - The messages to count tokens for
|
|
1083
|
+
* @param descriptor - Model descriptor containing the model name
|
|
1084
|
+
* @param _spec - Optional model specification (currently unused)
|
|
1085
|
+
* @returns Promise resolving to the estimated input token count
|
|
1086
|
+
*
|
|
1087
|
+
* @throws Never throws - falls back to character-based estimation (4 chars/token) on error
|
|
1088
|
+
*
|
|
1089
|
+
* @example
|
|
1090
|
+
* ```typescript
|
|
1091
|
+
* const count = await provider.countTokens(
|
|
1092
|
+
* [{ role: "user", content: "Hello!" }],
|
|
1093
|
+
* { provider: "openai", name: "gpt-4" }
|
|
1094
|
+
* );
|
|
1095
|
+
* ```
|
|
1096
|
+
*/
|
|
1097
|
+
countTokens(messages: LLMMessage[], descriptor: ModelDescriptor, _spec?: ModelSpec): Promise<number>;
|
|
1098
|
+
}
|
|
1099
|
+
declare function createOpenAIProviderFromEnv(): OpenAIChatProvider | null;
|
|
1100
|
+
|
|
1101
|
+
export { AgentHooks, AnthropicMessagesProvider, BaseGadget, BreakLoopException, ConversationManager, type CreateGadgetConfig, Gadget, type GadgetConfig, GadgetExecutionResult, GadgetExecutor, GadgetRegistry, GeminiGenerativeProvider, HookPresets, HumanInputException, type IConversationManager, LLMGenerationOptions, LLMMessage, LLMStream, LLMStreamChunk, type LoggerOptions, type LoggingOptions, MODEL_ALIASES, ModelDescriptor, ModelSpec, OpenAIChatProvider, ParsedGadgetCall, ProviderAdapter, StreamEvent, type StreamProcessingResult, StreamProcessor, type StreamProcessorOptions, type ValidationIssue, type ValidationResult, createAnthropicProviderFromEnv, createGadget, createGeminiProviderFromEnv, createLogger, createOpenAIProviderFromEnv, defaultLogger, discoverProviderAdapters, getModelId, getProvider, hasProviderPrefix, resolveModel, validateAndApplyDefaults, validateGadgetParams };
|