@samrahimi/smol-js 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -36,10 +36,23 @@ interface ToolInput {
36
36
  description: string;
37
37
  required?: boolean;
38
38
  default?: unknown;
39
+ enum?: string[];
39
40
  }
40
41
  interface ToolInputs {
41
42
  [key: string]: ToolInput;
42
43
  }
44
+ interface OpenAIToolDefinition {
45
+ type: 'function';
46
+ function: {
47
+ name: string;
48
+ description: string;
49
+ parameters: {
50
+ type: 'object';
51
+ properties: Record<string, unknown>;
52
+ required?: string[];
53
+ };
54
+ };
55
+ }
43
56
  interface CodeExecutionOutput {
44
57
  output: unknown;
45
58
  logs: string;
@@ -50,6 +63,7 @@ interface ActionOutput {
50
63
  output: unknown;
51
64
  isFinalAnswer: boolean;
52
65
  }
66
+ type MemoryStrategy = 'truncate' | 'compact';
53
67
  interface AgentConfig$1 {
54
68
  model: Model$1;
55
69
  tools?: Tool$1[];
@@ -59,6 +73,12 @@ interface AgentConfig$1 {
59
73
  additionalAuthorizedImports?: string[];
60
74
  streamOutputs?: boolean;
61
75
  verboseLevel?: LogLevel;
76
+ persistent?: boolean;
77
+ maxContextLength?: number;
78
+ memoryStrategy?: MemoryStrategy;
79
+ customInstructions?: string;
80
+ maxTokens?: number;
81
+ temperature?: number;
62
82
  }
63
83
  interface ModelConfig {
64
84
  modelId?: string;
@@ -94,12 +114,20 @@ interface ActionStep extends MemoryStep {
94
114
  modelInputMessages: ChatMessage[];
95
115
  modelOutputMessage?: ChatMessage;
96
116
  codeAction?: string;
117
+ toolCalls?: ToolCall[];
118
+ toolResults?: ToolCallResult[];
97
119
  observation?: string;
98
120
  actionOutput?: ActionOutput;
99
121
  tokenUsage?: TokenUsage;
100
122
  error?: Error;
101
123
  isFinalAnswer?: boolean;
102
124
  }
125
+ interface ToolCallResult {
126
+ toolCallId: string;
127
+ toolName: string;
128
+ result: unknown;
129
+ error?: string;
130
+ }
103
131
  interface FinalAnswerStep extends MemoryStep {
104
132
  type: 'final';
105
133
  answer: unknown;
@@ -121,6 +149,7 @@ interface Tool$1 {
121
149
  outputType: string;
122
150
  execute: (args: Record<string, unknown>) => Promise<unknown>;
123
151
  toCodePrompt: () => string;
152
+ toOpenAITool: () => OpenAIToolDefinition;
124
153
  }
125
154
  interface Model$1 {
126
155
  modelId: string;
@@ -132,6 +161,51 @@ interface GenerateOptions {
132
161
  maxTokens?: number;
133
162
  temperature?: number;
134
163
  tools?: Tool$1[];
164
+ toolDefinitions?: OpenAIToolDefinition[];
165
+ }
166
+ interface YAMLAgentDefinition {
167
+ name: string;
168
+ type: 'ToolUseAgent' | 'CodeAgent';
169
+ description?: string;
170
+ model?: YAMLModelDefinition;
171
+ tools?: string[];
172
+ agents?: string[];
173
+ maxSteps?: number;
174
+ maxTokens?: number;
175
+ temperature?: number;
176
+ persistent?: boolean;
177
+ maxContextLength?: number;
178
+ memoryStrategy?: MemoryStrategy;
179
+ customInstructions?: string;
180
+ systemPrompt?: string;
181
+ }
182
+ interface YAMLModelDefinition {
183
+ modelId?: string;
184
+ baseUrl?: string;
185
+ apiKey?: string;
186
+ maxTokens?: number;
187
+ temperature?: number;
188
+ timeout?: number;
189
+ }
190
+ interface YAMLWorkflowDefinition {
191
+ name: string;
192
+ description?: string;
193
+ model?: YAMLModelDefinition;
194
+ tools?: Record<string, YAMLToolDefinition>;
195
+ agents?: Record<string, YAMLAgentDefinition>;
196
+ entrypoint: string;
197
+ globalMaxContextLength?: number;
198
+ }
199
+ interface YAMLToolDefinition {
200
+ type: string;
201
+ config?: Record<string, unknown>;
202
+ }
203
+ interface OrchestratorEvent {
204
+ type: 'agent_start' | 'agent_step' | 'agent_tool_call' | 'agent_observation' | 'agent_end' | 'agent_error';
205
+ agentName: string;
206
+ depth: number;
207
+ data: unknown;
208
+ timestamp: number;
135
209
  }
136
210
 
137
211
  /**
@@ -189,6 +263,14 @@ declare abstract class Tool {
189
263
  * Used in the CodeAgent system prompt.
190
264
  */
191
265
  toCodePrompt(): string;
266
+ /**
267
+ * Generate an OpenAI-compatible tool definition for function calling.
268
+ */
269
+ toOpenAITool(): OpenAIToolDefinition;
270
+ /**
271
+ * Convert tool input type to JSON Schema type.
272
+ */
273
+ protected typeToJsonSchemaType(type: ToolInputType): string;
192
274
  /**
193
275
  * Convert tool input type to JS/TS type string.
194
276
  */
@@ -246,61 +328,59 @@ declare abstract class Model {
246
328
  }
247
329
 
248
330
  /**
249
- * AgentMemory - Tracks agent execution history
250
- *
251
- * Stores all steps taken by the agent and converts them to messages
252
- * that can be sent to the LLM for context.
331
+ * AgentMemory - Tracks agent execution history with context management
253
332
  */
254
333
 
334
+ interface MemoryConfig {
335
+ maxContextLength?: number;
336
+ memoryStrategy?: MemoryStrategy;
337
+ model?: Model$1;
338
+ }
255
339
  declare class AgentMemory {
256
- /**
257
- * System prompt step (always first)
258
- */
340
+ /** System prompt step (always first) */
259
341
  systemPrompt: SystemPromptStep;
260
- /**
261
- * All execution steps
262
- */
342
+ /** All execution steps */
263
343
  steps: (TaskStep | ActionStep | FinalAnswerStep)[];
264
- constructor(systemPrompt: string);
265
- /**
266
- * Reset memory, keeping only the system prompt.
267
- */
344
+ private maxContextLength;
345
+ private memoryStrategy;
346
+ private model?;
347
+ constructor(systemPrompt: string, config?: MemoryConfig);
348
+ /** Reset memory, keeping only the system prompt */
268
349
  reset(): void;
269
- /**
270
- * Add a task step.
271
- */
350
+ /** Add a task step */
272
351
  addTask(task: string): TaskStep;
273
- /**
274
- * Create a new action step.
275
- */
352
+ /** Create a new action step */
276
353
  createActionStep(stepNumber: number): ActionStep;
277
- /**
278
- * Add a final answer step.
279
- */
354
+ /** Add a final answer step */
280
355
  addFinalAnswer(answer: unknown): FinalAnswerStep;
281
- /**
282
- * Get the last step.
283
- */
356
+ /** Get the last step */
284
357
  getLastStep(): MemoryStep | undefined;
285
- /**
286
- * Get all action steps.
287
- */
358
+ /** Get all action steps */
288
359
  getActionSteps(): ActionStep[];
289
360
  /**
290
361
  * Convert memory to messages for LLM context.
362
+ * Handles both CodeAgent (observation-based) and ToolUseAgent (tool_call-based) patterns.
291
363
  */
292
364
  toMessages(): ChatMessage[];
293
365
  /**
294
- * Get total token usage across all steps.
366
+ * Manage context length - truncate or compact if exceeded.
295
367
  */
296
- getTotalTokenUsage(): TokenUsage;
368
+ manageContext(): Promise<void>;
297
369
  /**
298
- * Get a summary of the memory for logging.
370
+ * Truncate older action steps to fit within context.
299
371
  */
300
- getSummary(): string;
372
+ private truncateOlderMessages;
301
373
  /**
302
- * Serialize memory to JSON.
374
+ * Compact older messages into a summary.
303
375
  */
376
+ private compactMessages;
377
+ /** Get total token usage across all steps */
378
+ getTotalTokenUsage(): TokenUsage;
379
+ /** Get current estimated token count */
380
+ getEstimatedTokenCount(): number;
381
+ /** Get a summary of the memory for logging */
382
+ getSummary(): string;
383
+ /** Serialize memory to JSON */
304
384
  toJSON(): Record<string, unknown>;
305
385
  }
306
386
 
@@ -414,68 +494,71 @@ declare class AgentLogger {
414
494
  */
415
495
 
416
496
  interface AgentConfig {
417
- /**
418
- * The LLM model to use for generation
419
- */
497
+ /** The LLM model to use for generation */
420
498
  model: Model;
421
- /**
422
- * Tools available to the agent
423
- */
499
+ /** Tools available to the agent */
424
500
  tools?: Tool[];
425
- /**
426
- * Maximum number of steps before stopping
427
- * @default 20
428
- */
501
+ /** Maximum number of steps before stopping (default: 20) */
429
502
  maxSteps?: number;
430
- /**
431
- * Delay in milliseconds before executing code (for user interruption)
432
- * @default 5000
433
- */
503
+ /** Delay in ms before executing code (default: 5000) */
434
504
  codeExecutionDelay?: number;
435
- /**
436
- * Custom system prompt (will be merged with generated prompt)
437
- */
505
+ /** Custom instructions appended to system prompt */
438
506
  customInstructions?: string;
439
- /**
440
- * Log level for output
441
- * @default LogLevel.INFO
442
- */
507
+ /** Log level for output (default: INFO) */
443
508
  verboseLevel?: LogLevel;
444
- /**
445
- * Whether to stream model outputs
446
- * @default true
447
- */
509
+ /** Whether to stream model outputs (default: true) */
448
510
  streamOutputs?: boolean;
511
+ /** Whether the agent retains memory between run() calls (default: false) */
512
+ persistent?: boolean;
513
+ /** Max context length in tokens (default: 100000) */
514
+ maxContextLength?: number;
515
+ /** Memory management strategy when context is exceeded (default: 'truncate') */
516
+ memoryStrategy?: MemoryStrategy;
517
+ /** Max tokens for generation (passed to model if set) */
518
+ maxTokens?: number;
519
+ /** Temperature for generation (passed to model if set) */
520
+ temperature?: number;
521
+ /** Agent name for logging */
522
+ name?: string;
523
+ /** Callback for orchestration events */
524
+ onEvent?: (event: {
525
+ type: string;
526
+ data: unknown;
527
+ }) => void;
449
528
  }
450
529
  declare abstract class Agent {
451
- /**
452
- * The LLM model for generation
453
- */
530
+ /** The LLM model for generation */
454
531
  protected model: Model;
455
- /**
456
- * Available tools mapped by name
457
- */
532
+ /** Available tools mapped by name */
458
533
  protected tools: Map<string, Tool>;
459
- /**
460
- * Agent memory tracking all steps
461
- */
534
+ /** Agent memory tracking all steps */
462
535
  protected memory: AgentMemory;
463
- /**
464
- * Logger for formatted output
465
- */
536
+ /** Logger for formatted output */
466
537
  protected logger: AgentLogger;
467
- /**
468
- * Configuration options
469
- */
470
- protected config: Required<Omit<AgentConfig, 'model' | 'tools'>>;
471
- /**
472
- * Current step number
473
- */
538
+ /** Configuration options */
539
+ protected config: {
540
+ maxSteps: number;
541
+ codeExecutionDelay: number;
542
+ customInstructions: string;
543
+ verboseLevel: LogLevel;
544
+ streamOutputs: boolean;
545
+ persistent: boolean;
546
+ maxContextLength: number;
547
+ memoryStrategy: MemoryStrategy;
548
+ maxTokens?: number;
549
+ temperature?: number;
550
+ name: string;
551
+ onEvent?: (event: {
552
+ type: string;
553
+ data: unknown;
554
+ }) => void;
555
+ };
556
+ /** Current step number */
474
557
  protected currentStep: number;
475
- /**
476
- * Whether the agent is currently running
477
- */
558
+ /** Whether the agent is currently running */
478
559
  protected isRunning: boolean;
560
+ /** Whether the agent has been initialized at least once */
561
+ private initialized;
479
562
  constructor(config: AgentConfig);
480
563
  /**
481
564
  * Initialize the system prompt for the agent.
@@ -485,46 +568,31 @@ declare abstract class Agent {
485
568
  /**
486
569
  * Execute a single step in the agent loop.
487
570
  * Must be implemented by subclasses.
488
- *
489
- * @param memoryStep - The memory step to populate with execution results
490
- * @returns The action output from this step
491
571
  */
492
572
  protected abstract executeStep(memoryStep: ActionStep): Promise<ActionOutput>;
493
573
  /**
494
574
  * Run the agent on a task.
495
- *
496
- * @param task - The task description
497
- * @param reset - Whether to reset memory before running
498
- * @returns The final result
499
575
  */
500
576
  run(task: string, reset?: boolean): Promise<RunResult>;
501
577
  /**
502
578
  * Generate a final answer when max steps is reached.
503
579
  */
504
580
  protected provideFinalAnswer(task: string): Promise<unknown>;
505
- /**
506
- * Stop the agent.
507
- */
581
+ /** Emit an orchestration event */
582
+ protected emitEvent(type: string, data: unknown): void;
583
+ /** Stop the agent */
508
584
  stop(): void;
509
- /**
510
- * Get the current memory.
511
- */
585
+ /** Get the current memory */
512
586
  getMemory(): AgentMemory;
513
- /**
514
- * Get registered tools.
515
- */
587
+ /** Get registered tools */
516
588
  getTools(): Map<string, Tool>;
517
- /**
518
- * Add a tool to the agent.
519
- */
589
+ /** Add a tool to the agent */
520
590
  addTool(tool: Tool): void;
521
- /**
522
- * Remove a tool from the agent.
523
- */
591
+ /** Remove a tool from the agent */
524
592
  removeTool(name: string): boolean;
525
- /**
526
- * Sleep for a specified duration.
527
- */
593
+ /** Get agent name */
594
+ getName(): string;
595
+ /** Sleep for a specified duration */
528
596
  protected sleep(ms: number): Promise<void>;
529
597
  }
530
598
 
@@ -681,6 +749,44 @@ declare class CodeAgent extends Agent {
681
749
  addTool(tool: Tool): void;
682
750
  }
683
751
 
752
+ /**
753
+ * ToolUseAgent - Executes tasks using standard OpenAI-style tool calls
754
+ *
755
+ * Unlike CodeAgent which generates and executes JavaScript code,
756
+ * ToolUseAgent operates by making tool calls through the LLM's native
757
+ * function calling capabilities, following the ReACT pattern:
758
+ * Think -> Act (tool call) -> Observe (result) -> repeat
759
+ */
760
+
761
+ interface ToolUseAgentConfig extends AgentConfig {
762
+ /** Whether to run independent tool calls in parallel (default: true) */
763
+ parallelToolCalls?: boolean;
764
+ }
765
+ declare class ToolUseAgent extends Agent {
766
+ private parallelToolCalls;
767
+ constructor(config: ToolUseAgentConfig);
768
+ /**
769
+ * Initialize the system prompt with tool descriptions.
770
+ */
771
+ protected initializeSystemPrompt(): string;
772
+ /**
773
+ * Execute a single step: send messages with tool definitions, process tool calls.
774
+ */
775
+ protected executeStep(memoryStep: ActionStep): Promise<ActionOutput>;
776
+ /**
777
+ * Process tool calls from the model response.
778
+ */
779
+ private processToolCalls;
780
+ /**
781
+ * Override provideFinalAnswer to use tool calling format.
782
+ */
783
+ protected provideFinalAnswer(task: string): Promise<unknown>;
784
+ /**
785
+ * Add a tool, which can also be an Agent instance (auto-wraps with AgentTool).
786
+ */
787
+ addTool(tool: Tool): void;
788
+ }
789
+
684
790
  /**
685
791
  * OpenAI-compatible Model implementation
686
792
  *
@@ -703,11 +809,11 @@ interface OpenAIModelConfig {
703
809
  */
704
810
  baseUrl?: string;
705
811
  /**
706
- * Maximum tokens to generate
812
+ * Maximum tokens to generate (omitted from requests by default)
707
813
  */
708
814
  maxTokens?: number;
709
815
  /**
710
- * Temperature for generation (0-2)
816
+ * Temperature for generation (omitted from requests by default)
711
817
  */
712
818
  temperature?: number;
713
819
  /**
@@ -725,7 +831,7 @@ declare class OpenAIModel extends Model {
725
831
  private config;
726
832
  constructor(config?: OpenAIModelConfig);
727
833
  /**
728
- * Generate a response from the model.
834
+ * Generate a response from the model (supports tool calling).
729
835
  */
730
836
  generate(messages: ChatMessage[], options?: GenerateOptions): Promise<ChatMessage>;
731
837
  /**
@@ -733,7 +839,7 @@ declare class OpenAIModel extends Model {
733
839
  */
734
840
  generateStream(messages: ChatMessage[], options?: GenerateOptions): AsyncGenerator<string, ChatMessage, undefined>;
735
841
  /**
736
- * Format messages for the OpenAI API.
842
+ * Format messages for the OpenAI API, including tool call/response messages.
737
843
  */
738
844
  protected formatMessages(messages: ChatMessage[]): OpenAI.Chat.ChatCompletionMessageParam[];
739
845
  }
@@ -826,6 +932,115 @@ declare class AgentTool extends Tool {
826
932
  */
827
933
  declare function agentAsTool(agent: Agent, options?: Omit<AgentToolConfig, 'agent'>): AgentTool;
828
934
 
935
+ /**
936
+ * ReadFileTool - Read contents from a file
937
+ */
938
+
939
+ declare class ReadFileTool extends Tool {
940
+ readonly name = "read_file";
941
+ readonly description = "Read the contents of a file at the specified path. Returns the file content as a string.";
942
+ readonly inputs: ToolInputs;
943
+ readonly outputType = "string";
944
+ private workingDirectory;
945
+ constructor(config?: {
946
+ workingDirectory?: string;
947
+ });
948
+ execute(args: Record<string, unknown>): Promise<string>;
949
+ }
950
+
951
+ /**
952
+ * WriteFileTool - Write content to a file
953
+ */
954
+
955
+ declare class WriteFileTool extends Tool {
956
+ readonly name = "write_file";
957
+ readonly description = "Write content to a file at the specified path. Creates the file if it does not exist, and creates parent directories as needed. Overwrites existing content by default.";
958
+ readonly inputs: ToolInputs;
959
+ readonly outputType = "string";
960
+ private workingDirectory;
961
+ constructor(config?: {
962
+ workingDirectory?: string;
963
+ });
964
+ execute(args: Record<string, unknown>): Promise<string>;
965
+ }
966
+
967
+ /**
968
+ * CurlTool - HTTP requests (GET/POST) using fetch
969
+ */
970
+
971
+ declare class CurlTool extends Tool {
972
+ readonly name = "curl";
973
+ readonly description = "Make HTTP requests to any URL. Supports GET and POST methods with custom headers and body. Returns the response body as text.";
974
+ readonly inputs: ToolInputs;
975
+ readonly outputType = "string";
976
+ private timeout;
977
+ constructor(config?: {
978
+ timeout?: number;
979
+ });
980
+ execute(args: Record<string, unknown>): Promise<string>;
981
+ }
982
+
983
+ /**
984
+ * ExaSearchTool - Web search using the Exa.ai API
985
+ *
986
+ * Uses Exa's embeddings-based search for semantically intelligent results.
987
+ */
988
+
989
+ interface ExaSearchConfig {
990
+ apiKey?: string;
991
+ }
992
+ declare class ExaSearchTool extends Tool {
993
+ readonly name = "exa_search";
994
+ readonly description = "Search the web using Exa.ai semantic search. Returns relevant web pages with titles, URLs, and optionally content snippets. Use this for finding information, research, and fact-checking.";
995
+ readonly inputs: ToolInputs;
996
+ readonly outputType = "string";
997
+ private apiKey;
998
+ constructor(config?: ExaSearchConfig);
999
+ setup(): Promise<void>;
1000
+ execute(args: Record<string, unknown>): Promise<string>;
1001
+ }
1002
+
1003
+ /**
1004
+ * ExaGetContentsTool - Get webpage contents using Exa.ai API
1005
+ *
1006
+ * Fetches and extracts clean text content from web pages.
1007
+ */
1008
+
1009
+ interface ExaGetContentsConfig {
1010
+ apiKey?: string;
1011
+ }
1012
+ declare class ExaGetContentsTool extends Tool {
1013
+ readonly name = "exa_get_contents";
1014
+ readonly description = "Get the full text content of one or more web pages using Exa.ai. Returns cleaned, readable text extracted from the HTML. Use this to read articles, documentation, or any web page content.";
1015
+ readonly inputs: ToolInputs;
1016
+ readonly outputType = "string";
1017
+ private apiKey;
1018
+ constructor(config?: ExaGetContentsConfig);
1019
+ setup(): Promise<void>;
1020
+ execute(args: Record<string, unknown>): Promise<string>;
1021
+ }
1022
+
1023
+ /**
1024
+ * ExaResearchTool - Deep research on a topic using Exa.ai
1025
+ *
1026
+ * Performs multi-step research by combining search and content retrieval
1027
+ * to produce comprehensive findings on a topic.
1028
+ */
1029
+
1030
+ interface ExaResearchConfig {
1031
+ apiKey?: string;
1032
+ }
1033
+ declare class ExaResearchTool extends Tool {
1034
+ readonly name = "exa_research";
1035
+ readonly description = "Perform deep research on a single topic using Exa.ai. Searches for relevant sources, retrieves their content, and finds similar pages for comprehensive coverage. Returns a structured research summary with sources. Use this for thorough research on any topic.";
1036
+ readonly inputs: ToolInputs;
1037
+ readonly outputType = "string";
1038
+ private apiKey;
1039
+ constructor(config?: ExaResearchConfig);
1040
+ setup(): Promise<void>;
1041
+ execute(args: Record<string, unknown>): Promise<string>;
1042
+ }
1043
+
829
1044
  /**
830
1045
  * System prompts for CodeAgent
831
1046
  *
@@ -849,4 +1064,137 @@ declare const FINAL_ANSWER_PROMPT = "Based on the steps you've taken so far, pro
849
1064
  */
850
1065
  declare function getErrorRecoveryPrompt(error: string): string;
851
1066
 
852
- export { type ActionOutput, type ActionStep, Agent, type AgentConfig, type AgentConfig$1 as AgentConfigType, AgentLogger, AgentMemory, AgentTool, type AgentToolConfig, type ChatMessage, CodeAgent, type CodeAgentConfig, type CodeExecutionOutput, type ExecutorConfig, FINAL_ANSWER_PROMPT, type FinalAnswerStep, FinalAnswerTool, type GenerateOptions, LocalExecutor, LogLevel, type MemoryStep, type MessageRole, Model, type ModelConfig, OpenAIModel, type OpenAIModelConfig, type PromptVariables, type RunResult, type StreamEvent, type SystemPromptStep, type TaskStep, type Timing, type TokenUsage, Tool, type ToolCall, type ToolInput, type ToolInputType, type ToolInputs, UserInputTool, agentAsTool, createTool, finalAnswerTool, generateSystemPrompt, getErrorRecoveryPrompt };
1067
+ /**
1068
+ * System prompts for ToolUseAgent
1069
+ */
1070
+ interface ToolUsePromptVariables {
1071
+ tools: string;
1072
+ customInstructions?: string;
1073
+ /** Whether this agent has sub-agents (AgentTool instances) */
1074
+ hasSubAgents?: boolean;
1075
+ /** Whether this agent has file tools (read_file, write_file) */
1076
+ hasFileTools?: boolean;
1077
+ }
1078
+ /**
1079
+ * Generate the system prompt for ToolUseAgent.
1080
+ */
1081
+ declare function generateToolUseSystemPrompt(variables: ToolUsePromptVariables): string;
1082
+ /**
1083
+ * Format tool descriptions for the system prompt.
1084
+ */
1085
+ declare function formatToolDescriptions(tools: Array<{
1086
+ name: string;
1087
+ description: string;
1088
+ inputs: Record<string, {
1089
+ type: string;
1090
+ description: string;
1091
+ required?: boolean;
1092
+ }>;
1093
+ }>): string;
1094
+
1095
+ /**
1096
+ * YAMLLoader - Loads and parses YAML workflow definitions into runnable agents
1097
+ */
1098
+
1099
+ interface LoadedWorkflow {
1100
+ name: string;
1101
+ description?: string;
1102
+ entrypointAgent: Agent;
1103
+ agents: Map<string, Agent>;
1104
+ tools: Map<string, Tool>;
1105
+ }
1106
+ declare class YAMLLoader {
1107
+ private customTools;
1108
+ /**
1109
+ * Register a custom tool type for use in YAML definitions.
1110
+ */
1111
+ registerToolType(typeName: string, toolClass: new (config?: Record<string, unknown>) => Tool): void;
1112
+ /**
1113
+ * Load a workflow from a YAML file path.
1114
+ */
1115
+ loadFromFile(filePath: string): LoadedWorkflow;
1116
+ /**
1117
+ * Load a workflow from a YAML string.
1118
+ */
1119
+ loadFromString(yamlContent: string): LoadedWorkflow;
1120
+ /**
1121
+ * Build a runnable workflow from a parsed definition.
1122
+ */
1123
+ private buildWorkflow;
1124
+ /**
1125
+ * Build a tool instance from a type name and config.
1126
+ */
1127
+ private buildTool;
1128
+ /**
1129
+ * Build an agent instance from a YAML definition.
1130
+ */
1131
+ private buildAgent;
1132
+ }
1133
+
1134
+ /**
1135
+ * Orchestrator - Loads, runs, and provides real-time visibility into agent execution
1136
+ */
1137
+
1138
+ interface OrchestratorConfig {
1139
+ /** Whether to display real-time output (default: true) */
1140
+ verbose?: boolean;
1141
+ /** Callback for orchestrator events */
1142
+ onEvent?: (event: OrchestratorEvent) => void;
1143
+ }
1144
+ declare class Orchestrator {
1145
+ private loader;
1146
+ private config;
1147
+ private activeAgents;
1148
+ private eventLog;
1149
+ constructor(config?: OrchestratorConfig);
1150
+ /**
1151
+ * Load a workflow from a YAML file.
1152
+ */
1153
+ loadWorkflow(filePath: string): LoadedWorkflow;
1154
+ /**
1155
+ * Load a workflow from YAML string.
1156
+ */
1157
+ loadWorkflowFromString(yamlContent: string): LoadedWorkflow;
1158
+ /**
1159
+ * Run a loaded workflow with a task.
1160
+ */
1161
+ runWorkflow(workflow: LoadedWorkflow, task: string): Promise<RunResult>;
1162
+ /**
1163
+ * Run a standalone agent with a task.
1164
+ */
1165
+ runAgent(agent: Agent, task: string): Promise<RunResult>;
1166
+ /**
1167
+ * Instrument an agent with orchestrator event tracking.
1168
+ */
1169
+ private instrumentAgent;
1170
+ /**
1171
+ * Display workflow info at startup.
1172
+ */
1173
+ private displayWorkflowInfo;
1174
+ /**
1175
+ * Display run start info.
1176
+ */
1177
+ private displayRunStart;
1178
+ /**
1179
+ * Display run completion info.
1180
+ */
1181
+ private displayRunEnd;
1182
+ /**
1183
+ * Display an error.
1184
+ */
1185
+ private displayError;
1186
+ /**
1187
+ * Log an orchestration event.
1188
+ */
1189
+ logEvent(event: OrchestratorEvent): void;
1190
+ /**
1191
+ * Get the event log.
1192
+ */
1193
+ getEventLog(): OrchestratorEvent[];
1194
+ /**
1195
+ * Get the YAML loader for registering custom tools.
1196
+ */
1197
+ getLoader(): YAMLLoader;
1198
+ }
1199
+
1200
+ export { type ActionOutput, type ActionStep, Agent, type AgentConfig, type AgentConfig$1 as AgentConfigType, AgentLogger, AgentMemory, AgentTool, type AgentToolConfig, type ChatMessage, CodeAgent, type CodeAgentConfig, type CodeExecutionOutput, CurlTool, ExaGetContentsTool, ExaResearchTool, ExaSearchTool, type ExecutorConfig, FINAL_ANSWER_PROMPT, type FinalAnswerStep, FinalAnswerTool, type GenerateOptions, type LoadedWorkflow, LocalExecutor, LogLevel, type MemoryStep, type MemoryStrategy, type MessageRole, Model, type ModelConfig, OpenAIModel, type OpenAIModelConfig, type OpenAIToolDefinition, Orchestrator, type OrchestratorConfig, type OrchestratorEvent, type PromptVariables, ReadFileTool, type RunResult, type StreamEvent, type SystemPromptStep, type TaskStep, type Timing, type TokenUsage, Tool, type ToolCall, type ToolCallResult, type ToolInput, type ToolInputType, type ToolInputs, ToolUseAgent, type ToolUseAgentConfig, type ToolUsePromptVariables, UserInputTool, WriteFileTool, type YAMLAgentDefinition, YAMLLoader, type YAMLModelDefinition, type YAMLToolDefinition, type YAMLWorkflowDefinition, agentAsTool, createTool, finalAnswerTool, formatToolDescriptions, generateSystemPrompt, generateToolUseSystemPrompt, getErrorRecoveryPrompt };