@samrahimi/smol-js 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,793 @@
1
+ import OpenAI from 'openai';
2
+
3
+ /**
4
+ * Core types for smol-js
5
+ */
6
+ type MessageRole = 'system' | 'user' | 'assistant' | 'tool';
7
+ interface TokenUsage {
8
+ inputTokens: number;
9
+ outputTokens: number;
10
+ totalTokens: number;
11
+ }
12
+ interface Timing {
13
+ startTime: number;
14
+ endTime?: number;
15
+ duration?: number;
16
+ }
17
+ interface ChatMessage {
18
+ role: MessageRole;
19
+ content: string | null;
20
+ name?: string;
21
+ toolCalls?: ToolCall[];
22
+ toolCallId?: string;
23
+ tokenUsage?: TokenUsage;
24
+ }
25
+ interface ToolCall {
26
+ id: string;
27
+ type: 'function';
28
+ function: {
29
+ name: string;
30
+ arguments: string | Record<string, unknown>;
31
+ };
32
+ }
33
+ type ToolInputType = 'string' | 'number' | 'boolean' | 'array' | 'object' | 'any';
34
+ interface ToolInput {
35
+ type: ToolInputType;
36
+ description: string;
37
+ required?: boolean;
38
+ default?: unknown;
39
+ }
40
+ interface ToolInputs {
41
+ [key: string]: ToolInput;
42
+ }
43
+ interface CodeExecutionOutput {
44
+ output: unknown;
45
+ logs: string;
46
+ isFinalAnswer: boolean;
47
+ error?: Error;
48
+ }
49
+ interface ActionOutput {
50
+ output: unknown;
51
+ isFinalAnswer: boolean;
52
+ }
53
+ interface AgentConfig$1 {
54
+ model: Model$1;
55
+ tools?: Tool$1[];
56
+ maxSteps?: number;
57
+ codeExecutionDelay?: number;
58
+ systemPrompt?: string;
59
+ additionalAuthorizedImports?: string[];
60
+ streamOutputs?: boolean;
61
+ verboseLevel?: LogLevel;
62
+ }
63
+ interface ModelConfig {
64
+ modelId?: string;
65
+ apiKey?: string;
66
+ baseUrl?: string;
67
+ maxTokens?: number;
68
+ temperature?: number;
69
+ timeout?: number;
70
+ }
71
+ declare enum LogLevel {
72
+ OFF = -1,
73
+ ERROR = 0,
74
+ INFO = 1,
75
+ DEBUG = 2
76
+ }
77
+ type StepType = 'system' | 'task' | 'action' | 'planning' | 'final';
78
+ interface MemoryStep {
79
+ type: StepType;
80
+ timestamp: number;
81
+ }
82
+ interface SystemPromptStep extends MemoryStep {
83
+ type: 'system';
84
+ content: string;
85
+ }
86
+ interface TaskStep extends MemoryStep {
87
+ type: 'task';
88
+ task: string;
89
+ }
90
+ interface ActionStep extends MemoryStep {
91
+ type: 'action';
92
+ stepNumber: number;
93
+ timing: Timing;
94
+ modelInputMessages: ChatMessage[];
95
+ modelOutputMessage?: ChatMessage;
96
+ codeAction?: string;
97
+ observation?: string;
98
+ actionOutput?: ActionOutput;
99
+ tokenUsage?: TokenUsage;
100
+ error?: Error;
101
+ isFinalAnswer?: boolean;
102
+ }
103
+ interface FinalAnswerStep extends MemoryStep {
104
+ type: 'final';
105
+ answer: unknown;
106
+ }
107
+ interface StreamEvent {
108
+ type: 'delta' | 'toolCall' | 'observation' | 'step' | 'final' | 'error';
109
+ data: unknown;
110
+ }
111
+ interface RunResult {
112
+ output: unknown;
113
+ steps: MemoryStep[];
114
+ tokenUsage: TokenUsage;
115
+ duration: number;
116
+ }
117
+ interface Tool$1 {
118
+ name: string;
119
+ description: string;
120
+ inputs: ToolInputs;
121
+ outputType: string;
122
+ execute: (args: Record<string, unknown>) => Promise<unknown>;
123
+ toCodePrompt: () => string;
124
+ }
125
+ interface Model$1 {
126
+ modelId: string;
127
+ generate: (messages: ChatMessage[], options?: GenerateOptions) => Promise<ChatMessage>;
128
+ generateStream?: (messages: ChatMessage[], options?: GenerateOptions) => AsyncGenerator<string, ChatMessage, undefined>;
129
+ }
130
+ interface GenerateOptions {
131
+ stopSequences?: string[];
132
+ maxTokens?: number;
133
+ temperature?: number;
134
+ tools?: Tool$1[];
135
+ }
136
+
137
+ /**
138
+ * Tool base class for smol-js
139
+ *
140
+ * Tools are the primary way for agents to interact with the outside world.
141
+ * Extend this class and implement the execute() method to create custom tools.
142
+ */
143
+
144
+ declare abstract class Tool {
145
+ /**
146
+ * Unique identifier for the tool
147
+ */
148
+ abstract readonly name: string;
149
+ /**
150
+ * Human-readable description of what the tool does
151
+ */
152
+ abstract readonly description: string;
153
+ /**
154
+ * Input parameter schema
155
+ */
156
+ abstract readonly inputs: ToolInputs;
157
+ /**
158
+ * Output type description
159
+ */
160
+ abstract readonly outputType: string;
161
+ /**
162
+ * Whether the tool has been set up
163
+ */
164
+ protected isSetup: boolean;
165
+ /**
166
+ * Optional setup method called before first use.
167
+ * Override this for expensive initialization (loading models, etc.)
168
+ */
169
+ setup(): Promise<void>;
170
+ /**
171
+ * Execute the tool with the given arguments.
172
+ * Must be implemented by subclasses.
173
+ */
174
+ abstract execute(args: Record<string, unknown>): Promise<unknown>;
175
+ /**
176
+ * Call the tool, ensuring setup is complete and validating arguments.
177
+ */
178
+ call(args: Record<string, unknown>): Promise<unknown>;
179
+ /**
180
+ * Validate that provided arguments match the input schema.
181
+ */
182
+ protected validateArguments(args: Record<string, unknown>): void;
183
+ /**
184
+ * Check if a value matches the expected type.
185
+ */
186
+ protected checkType(value: unknown, expectedType: ToolInputType): boolean;
187
+ /**
188
+ * Generate a code-friendly prompt representation of this tool.
189
+ * Used in the CodeAgent system prompt.
190
+ */
191
+ toCodePrompt(): string;
192
+ /**
193
+ * Convert tool input type to JS/TS type string.
194
+ */
195
+ protected typeToJsType(type: ToolInputType | string): string;
196
+ /**
197
+ * Serialize the tool to a JSON-compatible object.
198
+ */
199
+ toJSON(): Record<string, unknown>;
200
+ }
201
+ /**
202
+ * Helper function to create a tool from a function.
203
+ * This is an alternative to extending the Tool class.
204
+ */
205
+ declare function createTool(config: {
206
+ name: string;
207
+ description: string;
208
+ inputs: ToolInputs;
209
+ outputType: string;
210
+ execute: (args: Record<string, unknown>) => Promise<unknown>;
211
+ }): Tool;
212
+
213
+ /**
214
+ * Model base class for smol-js
215
+ *
216
+ * Models are responsible for generating text responses from LLMs.
217
+ * Extend this class to support different LLM providers.
218
+ */
219
+
220
+ declare abstract class Model {
221
+ /**
222
+ * Model identifier (e.g., "gpt-4", "claude-3-sonnet")
223
+ */
224
+ abstract readonly modelId: string;
225
+ /**
226
+ * Generate a response from the model.
227
+ */
228
+ abstract generate(messages: ChatMessage[], options?: GenerateOptions): Promise<ChatMessage>;
229
+ /**
230
+ * Optional streaming generation.
231
+ * Yields content chunks and returns the final message.
232
+ */
233
+ generateStream?(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string, ChatMessage, undefined>;
234
+ /**
235
+ * Check if the model supports streaming.
236
+ */
237
+ supportsStreaming(): boolean;
238
+ /**
239
+ * Extract token usage from a response message.
240
+ */
241
+ protected extractTokenUsage(_response: unknown): TokenUsage | undefined;
242
+ /**
243
+ * Convert messages to the format expected by the model's API.
244
+ */
245
+ protected formatMessages(messages: ChatMessage[]): unknown[];
246
+ }
247
+
248
+ /**
249
+ * AgentMemory - Tracks agent execution history
250
+ *
251
+ * Stores all steps taken by the agent and converts them to messages
252
+ * that can be sent to the LLM for context.
253
+ */
254
+
255
+ declare class AgentMemory {
256
+ /**
257
+ * System prompt step (always first)
258
+ */
259
+ systemPrompt: SystemPromptStep;
260
+ /**
261
+ * All execution steps
262
+ */
263
+ steps: (TaskStep | ActionStep | FinalAnswerStep)[];
264
+ constructor(systemPrompt: string);
265
+ /**
266
+ * Reset memory, keeping only the system prompt.
267
+ */
268
+ reset(): void;
269
+ /**
270
+ * Add a task step.
271
+ */
272
+ addTask(task: string): TaskStep;
273
+ /**
274
+ * Create a new action step.
275
+ */
276
+ createActionStep(stepNumber: number): ActionStep;
277
+ /**
278
+ * Add a final answer step.
279
+ */
280
+ addFinalAnswer(answer: unknown): FinalAnswerStep;
281
+ /**
282
+ * Get the last step.
283
+ */
284
+ getLastStep(): MemoryStep | undefined;
285
+ /**
286
+ * Get all action steps.
287
+ */
288
+ getActionSteps(): ActionStep[];
289
+ /**
290
+ * Convert memory to messages for LLM context.
291
+ */
292
+ toMessages(): ChatMessage[];
293
+ /**
294
+ * Get total token usage across all steps.
295
+ */
296
+ getTotalTokenUsage(): TokenUsage;
297
+ /**
298
+ * Get a summary of the memory for logging.
299
+ */
300
+ getSummary(): string;
301
+ /**
302
+ * Serialize memory to JSON.
303
+ */
304
+ toJSON(): Record<string, unknown>;
305
+ }
306
+
307
+ /**
308
+ * AgentLogger - Color-coded console logging for agent execution
309
+ *
310
+ * Provides formatted output with different colors for:
311
+ * - Headers (cyan)
312
+ * - Reasoning/Thoughts (yellow)
313
+ * - Code blocks (green)
314
+ * - Output/Results (blue)
315
+ * - Errors (red)
316
+ */
317
+
318
+ declare class AgentLogger {
319
+ private level;
320
+ private logFile?;
321
+ private sessionId;
322
+ constructor(level?: LogLevel);
323
+ /**
324
+ * Generate a unique session ID.
325
+ */
326
+ private generateSessionId;
327
+ /**
328
+ * Initialize the log file.
329
+ */
330
+ private initLogFile;
331
+ /**
332
+ * Write to the log file.
333
+ */
334
+ private writeToFile;
335
+ /**
336
+ * Set the log level.
337
+ */
338
+ setLevel(level: LogLevel): void;
339
+ /**
340
+ * Log a header (task start, step start, etc.)
341
+ */
342
+ header(message: string, level?: LogLevel): void;
343
+ /**
344
+ * Log a subheader.
345
+ */
346
+ subheader(message: string, level?: LogLevel): void;
347
+ /**
348
+ * Log reasoning/thought from the agent.
349
+ */
350
+ reasoning(content: string, level?: LogLevel): void;
351
+ /**
352
+ * Log code block.
353
+ */
354
+ code(content: string, language?: string, level?: LogLevel): void;
355
+ /**
356
+ * Log execution output.
357
+ */
358
+ output(content: string, level?: LogLevel): void;
359
+ /**
360
+ * Log execution logs (print statements).
361
+ */
362
+ logs(content: string, level?: LogLevel): void;
363
+ /**
364
+ * Log an error.
365
+ */
366
+ error(message: string, error?: Error, level?: LogLevel): void;
367
+ /**
368
+ * Log a warning.
369
+ */
370
+ warn(message: string, level?: LogLevel): void;
371
+ /**
372
+ * Log general info.
373
+ */
374
+ info(message: string, level?: LogLevel): void;
375
+ /**
376
+ * Log debug info.
377
+ */
378
+ debug(message: string): void;
379
+ /**
380
+ * Log final answer.
381
+ */
382
+ finalAnswer(answer: unknown, level?: LogLevel): void;
383
+ /**
384
+ * Log step progress.
385
+ */
386
+ stepProgress(current: number, max: number, level?: LogLevel): void;
387
+ /**
388
+ * Log waiting message for code execution delay.
389
+ */
390
+ waiting(seconds: number, level?: LogLevel): void;
391
+ /**
392
+ * Stream content character by character.
393
+ */
394
+ streamChar(char: string): void;
395
+ /**
396
+ * End streaming (add newline).
397
+ */
398
+ streamEnd(): void;
399
+ /**
400
+ * Close the log file.
401
+ */
402
+ close(): void;
403
+ /**
404
+ * Get the log file path.
405
+ */
406
+ getLogPath(): string | undefined;
407
+ }
408
+
409
+ /**
410
+ * Agent - Abstract base class for all agents
411
+ *
412
+ * Provides the foundation for multi-step agents that follow the ReAct framework.
413
+ * Extend this class to create specific agent implementations.
414
+ */
415
+
416
+ interface AgentConfig {
417
+ /**
418
+ * The LLM model to use for generation
419
+ */
420
+ model: Model;
421
+ /**
422
+ * Tools available to the agent
423
+ */
424
+ tools?: Tool[];
425
+ /**
426
+ * Maximum number of steps before stopping
427
+ * @default 20
428
+ */
429
+ maxSteps?: number;
430
+ /**
431
+ * Delay in milliseconds before executing code (for user interruption)
432
+ * @default 5000
433
+ */
434
+ codeExecutionDelay?: number;
435
+ /**
436
+ * Custom system prompt (will be merged with generated prompt)
437
+ */
438
+ customInstructions?: string;
439
+ /**
440
+ * Log level for output
441
+ * @default LogLevel.INFO
442
+ */
443
+ verboseLevel?: LogLevel;
444
+ /**
445
+ * Whether to stream model outputs
446
+ * @default true
447
+ */
448
+ streamOutputs?: boolean;
449
+ }
450
+ declare abstract class Agent {
451
+ /**
452
+ * The LLM model for generation
453
+ */
454
+ protected model: Model;
455
+ /**
456
+ * Available tools mapped by name
457
+ */
458
+ protected tools: Map<string, Tool>;
459
+ /**
460
+ * Agent memory tracking all steps
461
+ */
462
+ protected memory: AgentMemory;
463
+ /**
464
+ * Logger for formatted output
465
+ */
466
+ protected logger: AgentLogger;
467
+ /**
468
+ * Configuration options
469
+ */
470
+ protected config: Required<Omit<AgentConfig, 'model' | 'tools'>>;
471
+ /**
472
+ * Current step number
473
+ */
474
+ protected currentStep: number;
475
+ /**
476
+ * Whether the agent is currently running
477
+ */
478
+ protected isRunning: boolean;
479
+ constructor(config: AgentConfig);
480
+ /**
481
+ * Initialize the system prompt for the agent.
482
+ * Must be implemented by subclasses.
483
+ */
484
+ protected abstract initializeSystemPrompt(): string;
485
+ /**
486
+ * Execute a single step in the agent loop.
487
+ * Must be implemented by subclasses.
488
+ *
489
+ * @param memoryStep - The memory step to populate with execution results
490
+ * @returns The action output from this step
491
+ */
492
+ protected abstract executeStep(memoryStep: ActionStep): Promise<ActionOutput>;
493
+ /**
494
+ * Run the agent on a task.
495
+ *
496
+ * @param task - The task description
497
+ * @param reset - Whether to reset memory before running
498
+ * @returns The final result
499
+ */
500
+ run(task: string, reset?: boolean): Promise<RunResult>;
501
+ /**
502
+ * Generate a final answer when max steps is reached.
503
+ */
504
+ protected provideFinalAnswer(task: string): Promise<unknown>;
505
+ /**
506
+ * Stop the agent.
507
+ */
508
+ stop(): void;
509
+ /**
510
+ * Get the current memory.
511
+ */
512
+ getMemory(): AgentMemory;
513
+ /**
514
+ * Get registered tools.
515
+ */
516
+ getTools(): Map<string, Tool>;
517
+ /**
518
+ * Add a tool to the agent.
519
+ */
520
+ addTool(tool: Tool): void;
521
+ /**
522
+ * Remove a tool from the agent.
523
+ */
524
+ removeTool(name: string): boolean;
525
+ /**
526
+ * Sleep for a specified duration.
527
+ */
528
+ protected sleep(ms: number): Promise<void>;
529
+ }
530
+
531
+ /**
532
+ * LocalExecutor - JavaScript code execution engine using Node's vm module
533
+ *
534
+ * Executes JavaScript code chunks in an isolated context with:
535
+ * - State persistence between steps (variables carry forward)
536
+ * - Tool injection (tools available as async functions)
537
+ * - Dynamic imports via CDN (esm.sh)
538
+ * - Print capture and logging
539
+ * - Safety timeouts
540
+ */
541
+
542
+ interface ExecutorConfig {
543
+ /**
544
+ * Maximum execution time in milliseconds
545
+ * @default 30000
546
+ */
547
+ timeout?: number;
548
+ /**
549
+ * Additional authorized imports (npm packages to allow)
550
+ */
551
+ authorizedImports?: string[];
552
+ /**
553
+ * Whether to allow fs module access
554
+ * @default true
555
+ */
556
+ allowFs?: boolean;
557
+ /**
558
+ * Working directory for fs operations
559
+ */
560
+ workingDirectory?: string;
561
+ }
562
+ declare class LocalExecutor {
563
+ private context;
564
+ private state;
565
+ private tools;
566
+ private config;
567
+ private capturedLogs;
568
+ constructor(config?: ExecutorConfig);
569
+ /**
570
+ * Create the VM context with available globals.
571
+ */
572
+ private createContext;
573
+ /**
574
+ * Add tools to the executor context.
575
+ */
576
+ sendTools(tools: Record<string, Tool>): void;
577
+ /**
578
+ * Send variables to the executor state.
579
+ */
580
+ sendVariables(variables: Record<string, unknown>): void;
581
+ /**
582
+ * Execute JavaScript code and return the result.
583
+ */
584
+ execute(code: string): Promise<CodeExecutionOutput>;
585
+ /**
586
+ * Wrap code to handle async execution and final_answer calls.
587
+ */
588
+ private wrapCode;
589
+ /**
590
+ * Instrument code to capture the last expression value and convert
591
+ * let/const/var declarations to global assignments for state persistence.
592
+ */
593
+ private instrumentCode;
594
+ /**
595
+ * Check if this is the last meaningful line of code.
596
+ */
597
+ private isLastMeaningfulLine;
598
+ /**
599
+ * Update internal state from context after execution.
600
+ */
601
+ private updateStateFromContext;
602
+ /**
603
+ * Stringify a value for logging.
604
+ */
605
+ private stringify;
606
+ /**
607
+ * Reset the executor state.
608
+ */
609
+ reset(): void;
610
+ /**
611
+ * Get the current state.
612
+ */
613
+ getState(): Record<string, unknown>;
614
+ }
615
+
616
+ /**
617
+ * CodeAgent - Executes tasks by generating and running JavaScript code
618
+ *
619
+ * This is the main agent implementation for smol-js. It follows the ReAct pattern:
620
+ * 1. Receives a task
621
+ * 2. Generates reasoning and code
622
+ * 3. Executes the code in a sandboxed environment
623
+ * 4. Observes the result and continues or returns final answer
624
+ */
625
+
626
+ interface CodeAgentConfig extends AgentConfig {
627
+ /**
628
+ * Additional npm packages that can be imported dynamically
629
+ */
630
+ additionalAuthorizedImports?: string[];
631
+ /**
632
+ * Executor configuration
633
+ */
634
+ executorConfig?: ExecutorConfig;
635
+ /**
636
+ * Working directory for file operations
637
+ */
638
+ workingDirectory?: string;
639
+ }
640
+ declare class CodeAgent extends Agent {
641
+ /**
642
+ * The JavaScript code executor
643
+ */
644
+ private executor;
645
+ /**
646
+ * Authorized imports for dynamic npm package loading
647
+ */
648
+ private authorizedImports;
649
+ constructor(config: CodeAgentConfig);
650
+ /**
651
+ * Initialize the system prompt with tool definitions.
652
+ */
653
+ protected initializeSystemPrompt(): string;
654
+ /**
655
+ * Execute a single step: get LLM response, extract code, execute it.
656
+ */
657
+ protected executeStep(memoryStep: ActionStep): Promise<ActionOutput>;
658
+ /**
659
+ * Generate response from the LLM, optionally streaming.
660
+ */
661
+ private generateResponse;
662
+ /**
663
+ * Format output for display.
664
+ */
665
+ private formatOutput;
666
+ /**
667
+ * Format the observation to send back to the LLM.
668
+ */
669
+ private formatObservation;
670
+ /**
671
+ * Reset the agent and executor state.
672
+ */
673
+ reset(): void;
674
+ /**
675
+ * Get the executor instance.
676
+ */
677
+ getExecutor(): LocalExecutor;
678
+ /**
679
+ * Override addTool to also register with executor.
680
+ */
681
+ addTool(tool: Tool): void;
682
+ }
683
+
684
+ /**
685
+ * OpenAI-compatible Model implementation
686
+ *
687
+ * Supports any API that follows the OpenAI chat completions format,
688
+ * including OpenRouter, Azure OpenAI, local servers, etc.
689
+ */
690
+
691
+ interface OpenAIModelConfig {
692
+ /**
693
+ * Model identifier (e.g., "gpt-4", "anthropic/claude-sonnet-4.5")
694
+ */
695
+ modelId?: string;
696
+ /**
697
+ * API key for authentication
698
+ */
699
+ apiKey?: string;
700
+ /**
701
+ * Base URL for the API endpoint
702
+ * @default "https://openrouter.ai/api/v1"
703
+ */
704
+ baseUrl?: string;
705
+ /**
706
+ * Maximum tokens to generate
707
+ */
708
+ maxTokens?: number;
709
+ /**
710
+ * Temperature for generation (0-2)
711
+ */
712
+ temperature?: number;
713
+ /**
714
+ * Request timeout in milliseconds
715
+ */
716
+ timeout?: number;
717
+ /**
718
+ * Default headers to include in requests
719
+ */
720
+ defaultHeaders?: Record<string, string>;
721
+ }
722
+ declare class OpenAIModel extends Model {
723
+ readonly modelId: string;
724
+ private client;
725
+ private config;
726
+ constructor(config?: OpenAIModelConfig);
727
+ /**
728
+ * Generate a response from the model.
729
+ */
730
+ generate(messages: ChatMessage[], options?: GenerateOptions): Promise<ChatMessage>;
731
+ /**
732
+ * Generate a streaming response from the model.
733
+ */
734
+ generateStream(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string, ChatMessage, undefined>;
735
+ /**
736
+ * Format messages for the OpenAI API.
737
+ */
738
+ protected formatMessages(messages: ChatMessage[]): OpenAI.Chat.ChatCompletionMessageParam[];
739
+ }
740
+
741
+ /**
742
+ * Default tools provided to all agents
743
+ */
744
+
745
+ /**
746
+ * FinalAnswerTool - Used by the agent to return the final answer.
747
+ * This is always available to CodeAgent.
748
+ */
749
+ declare class FinalAnswerTool extends Tool {
750
+ readonly name = "final_answer";
751
+ readonly description = "Returns the final answer to the user query. Use this when you have completed the task and have the final result.";
752
+ readonly inputs: ToolInputs;
753
+ readonly outputType = "any";
754
+ execute(args: Record<string, unknown>): Promise<unknown>;
755
+ }
756
+ /**
757
+ * UserInputTool - Allows the agent to ask the user for input.
758
+ */
759
+ declare class UserInputTool extends Tool {
760
+ readonly name = "user_input";
761
+ readonly description = "Asks the user for additional input or clarification.";
762
+ readonly inputs: ToolInputs;
763
+ readonly outputType = "string";
764
+ private inputHandler?;
765
+ constructor(inputHandler?: (question: string) => Promise<string>);
766
+ execute(args: Record<string, unknown>): Promise<string>;
767
+ }
768
+ declare const finalAnswerTool: FinalAnswerTool;
769
+
770
+ /**
771
+ * System prompts for CodeAgent
772
+ *
773
+ * Adapted from smolagents Python prompts but optimized for JavaScript execution.
774
+ */
775
+ interface PromptVariables {
776
+ tools: string;
777
+ authorizedImports: string;
778
+ customInstructions?: string;
779
+ }
780
+ /**
781
+ * Generate the system prompt for CodeAgent.
782
+ */
783
+ declare function generateSystemPrompt(variables: PromptVariables): string;
784
+ /**
785
+ * Prompt for generating a final answer when max steps is reached.
786
+ */
787
+ declare const FINAL_ANSWER_PROMPT = "Based on the steps you've taken so far, provide the best answer you can to the original task.\nIf you couldn't fully complete the task, explain what you accomplished and what remains to be done.\nCall final_answer() with your response.";
788
+ /**
789
+ * Error recovery prompt.
790
+ */
791
+ declare function getErrorRecoveryPrompt(error: string): string;
792
+
793
+ export { type ActionOutput, type ActionStep, Agent, type AgentConfig, type AgentConfig$1 as AgentConfigType, AgentLogger, AgentMemory, type ChatMessage, CodeAgent, type CodeAgentConfig, type CodeExecutionOutput, type ExecutorConfig, FINAL_ANSWER_PROMPT, type FinalAnswerStep, FinalAnswerTool, type GenerateOptions, LocalExecutor, LogLevel, type MemoryStep, type MessageRole, Model, type ModelConfig, OpenAIModel, type OpenAIModelConfig, type PromptVariables, type RunResult, type StreamEvent, type SystemPromptStep, type TaskStep, type Timing, type TokenUsage, Tool, type ToolCall, type ToolInput, type ToolInputType, type ToolInputs, UserInputTool, createTool, finalAnswerTool, generateSystemPrompt, getErrorRecoveryPrompt };