art-framework 0.2.4 → 0.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,3 +1,93 @@
1
+ type UnsubscribeFunction = () => void;
2
+ interface Subscription<DataType, FilterType> {
3
+ id: string;
4
+ callback: (data: DataType) => void;
5
+ filter?: FilterType;
6
+ options?: {
7
+ threadId?: string;
8
+ };
9
+ }
10
+ /**
11
+ * A generic class for implementing a publish/subscribe pattern with filtering capabilities.
12
+ * Designed for decoupling components, particularly UI updates from backend events.
13
+ */
14
+ declare class TypedSocket$1<DataType, FilterType = any> {
15
+ protected subscriptions: Map<string, Subscription<DataType, FilterType>>;
16
+ constructor();
17
+ /**
18
+ * Subscribes a callback function to receive notifications.
19
+ * @param callback - The function to call when new data is notified.
20
+ * @param filter - An optional filter to only receive specific types of data.
21
+ * @param options - Optional configuration, like a threadId for filtering.
22
+ * @returns An unsubscribe function.
23
+ */
24
+ subscribe(callback: (data: DataType) => void, filter?: FilterType, options?: {
25
+ threadId?: string;
26
+ }): UnsubscribeFunction;
27
+ /**
28
+ * Notifies all relevant subscribers with new data.
29
+ * @param data - The data payload to send to subscribers.
30
+ * @param options - Optional targeting options (e.g., targetThreadId).
31
+ * @param filterCheck - A function to check if a subscription's filter matches the data.
32
+ */
33
+ notify(data: DataType, options?: {
34
+ targetThreadId?: string;
35
+ targetSessionId?: string;
36
+ }, // targetSessionId might be useful later
37
+ filterCheck?: (data: DataType, filter?: FilterType) => boolean): void;
38
+ /**
39
+ * Optional: Retrieves historical data. This base implementation is empty.
40
+ * Subclasses might implement this by interacting with repositories.
41
+ */
42
+ getHistory?(_filter?: FilterType, _options?: {
43
+ threadId?: string;
44
+ limit?: number;
45
+ }): Promise<DataType[]>;
46
+ /**
47
+ * Clears all subscriptions. Useful for cleanup.
48
+ */
49
+ clearAllSubscriptions(): void;
50
+ }
51
+
52
+ /** Entry defining an available provider adapter */
53
+ interface AvailableProviderEntry {
54
+ name: string;
55
+ adapter: new (options: any) => ProviderAdapter;
56
+ baseOptions?: any;
57
+ isLocal?: boolean;
58
+ }
59
+ /** Configuration for the ProviderManager passed during ART initialization */
60
+ interface ProviderManagerConfig {
61
+ availableProviders: AvailableProviderEntry[];
62
+ /** Max concurrent ACTIVE instances per API-based provider NAME. Default: 5 */
63
+ maxParallelApiInstancesPerProvider?: number;
64
+ /** Time in seconds an API adapter instance can be idle before being eligible for removal. Default: 300 */
65
+ apiInstanceIdleTimeoutSeconds?: number;
66
+ }
67
+ /** Configuration passed AT RUNTIME for a specific LLM call */
68
+ interface RuntimeProviderConfig {
69
+ providerName: string;
70
+ modelId: string;
71
+ adapterOptions: any;
72
+ }
73
+ /** Object returned by ProviderManager granting access to an adapter instance */
74
+ interface ManagedAdapterAccessor {
75
+ adapter: ProviderAdapter;
76
+ /** Signals that the current call using this adapter instance is finished. */
77
+ release: () => void;
78
+ }
79
+ /** Interface for the ProviderManager */
80
+ interface IProviderManager {
81
+ /** Returns identifiers for all registered potential providers */
82
+ getAvailableProviders(): string[];
83
+ /**
84
+ * Gets a managed adapter instance based on the runtime config.
85
+ * Handles instance creation, caching, pooling limits, and singleton constraints.
86
+ * May queue requests or throw errors based on concurrency limits.
87
+ */
88
+ getAdapter(config: RuntimeProviderConfig): Promise<ManagedAdapterAccessor>;
89
+ }
90
+
1
91
  /**
2
92
  * Represents the role of a message sender in a conversation.
3
93
  */
@@ -11,40 +101,128 @@ declare enum MessageRole {
11
101
  * Represents a single message within a conversation thread.
12
102
  */
13
103
  interface ConversationMessage {
104
+ /** A unique identifier for this specific message. */
14
105
  messageId: string;
106
+ /** The identifier of the conversation thread this message belongs to. */
15
107
  threadId: string;
108
+ /** The role of the sender (User, AI, System, or Tool). */
16
109
  role: MessageRole;
110
+ /** The textual content of the message. */
17
111
  content: string;
112
+ /** A Unix timestamp (in milliseconds) indicating when the message was created. */
18
113
  timestamp: number;
114
+ /** Optional metadata associated with the message (e.g., related observation IDs, tool call info, UI state). */
19
115
  metadata?: Record<string, any>;
20
116
  }
21
117
  /**
22
- * Represents the type of an observation record.
118
+ * Represents the type of an observation record, capturing significant events during agent execution.
23
119
  */
24
120
  declare enum ObservationType {
25
121
  INTENT = "INTENT",
26
122
  PLAN = "PLAN",
27
123
  THOUGHTS = "THOUGHTS",
28
- TOOL_CALL = "TOOL_CALL",// Renamed from checklist for clarity
124
+ /** Records the LLM's decision to call one or more tools (part of the plan). */
125
+ TOOL_CALL = "TOOL_CALL",
126
+ /** Records the actual execution attempt and result of a specific tool call. */
29
127
  TOOL_EXECUTION = "TOOL_EXECUTION",
30
- SYNTHESIS = "SYNTHESIS",// Added for final synthesis step
128
+ /** Records events specifically related to the synthesis phase (e.g., the LLM call). */
129
+ SYNTHESIS = "SYNTHESIS",
130
+ /** Records an error encountered during any phase of execution. */
31
131
  ERROR = "ERROR",
32
- FINAL_RESPONSE = "FINAL_RESPONSE",// Added for the final AI response message
33
- STATE_UPDATE = "STATE_UPDATE"
132
+ /** Records the final AI response message generated by the agent. */
133
+ FINAL_RESPONSE = "FINAL_RESPONSE",
134
+ /** Records changes made to the agent's persistent state. */
135
+ STATE_UPDATE = "STATE_UPDATE",
136
+ /** Logged by Agent Core when LLM stream consumption begins. */
137
+ LLM_STREAM_START = "LLM_STREAM_START",
138
+ /** Logged by Agent Core upon receiving a METADATA stream event. Content should be LLMMetadata. */
139
+ LLM_STREAM_METADATA = "LLM_STREAM_METADATA",
140
+ /** Logged by Agent Core upon receiving an END stream event. */
141
+ LLM_STREAM_END = "LLM_STREAM_END",
142
+ /** Logged by Agent Core upon receiving an ERROR stream event. Content should be Error object or message. */
143
+ LLM_STREAM_ERROR = "LLM_STREAM_ERROR"
144
+ }
145
+ /**
146
+ * Represents the different capabilities a model might possess.
147
+ * Used for model selection and validation.
148
+ */
149
+ declare enum ModelCapability {
150
+ TEXT = "text",// Basic text generation/understanding
151
+ VISION = "vision",// Ability to process and understand images
152
+ STREAMING = "streaming",// Supports streaming responses chunk by chunk
153
+ TOOL_USE = "tool_use",// Capable of using tools/function calling
154
+ RAG = "rag",// Built-in or optimized for Retrieval-Augmented Generation
155
+ CODE = "code",// Specialized in understanding or generating code
156
+ REASONING = "reasoning"
34
157
  }
35
158
  /**
36
159
  * Represents a recorded event during the agent's execution.
37
160
  */
38
161
  interface Observation {
162
+ /** A unique identifier for this specific observation record. */
39
163
  id: string;
164
+ /** The identifier of the conversation thread this observation relates to. */
40
165
  threadId: string;
166
+ /** An optional identifier for tracing a request across multiple systems or components. */
41
167
  traceId?: string;
168
+ /** A Unix timestamp (in milliseconds) indicating when the observation was recorded. */
42
169
  timestamp: number;
170
+ /** The category of the event being observed (e.g., PLAN, THOUGHTS, TOOL_EXECUTION). */
43
171
  type: ObservationType;
172
+ /** A concise, human-readable title summarizing the observation (often generated based on type/metadata). */
44
173
  title: string;
174
+ /** The main data payload of the observation, structure depends on the `type`. */
45
175
  content: any;
176
+ /** Optional metadata providing additional context (e.g., source phase, related IDs, status). */
46
177
  metadata?: Record<string, any>;
47
178
  }
179
+ /**
180
+ * Represents a single event emitted from an asynchronous LLM stream (`ReasoningEngine.call`).
181
+ * Allows for real-time delivery of tokens, metadata, errors, and lifecycle signals.
182
+ * Adapters are responsible for translating provider-specific stream chunks into these standard events.
183
+ */
184
+ interface StreamEvent {
185
+ /**
186
+ * The type of the stream event:
187
+ * - `TOKEN`: A chunk of text generated by the LLM.
188
+ * - `METADATA`: Information about the LLM call (e.g., token counts, stop reason), typically sent once at the end.
189
+ * - `ERROR`: An error occurred during the LLM call or stream processing. `data` will contain the Error object.
190
+ * - `END`: Signals the successful completion of the stream. `data` is typically null.
191
+ */
192
+ type: 'TOKEN' | 'METADATA' | 'ERROR' | 'END';
193
+ /**
194
+ * The actual content of the event.
195
+ * - For `TOKEN`: string (the text chunk).
196
+ * - For `METADATA`: `LLMMetadata` object.
197
+ * - For `ERROR`: `Error` object or error details.
198
+ * - For `END`: null.
199
+ */
200
+ data: any;
201
+ /**
202
+ * Optional: Provides a more specific classification for `TOKEN` events,
203
+ * combining LLM-level detection (thinking/response, if available from adapter)
204
+ * and agent-level context (`callContext` from `CallOptions`).
205
+ * Used by consumers (like UI) to differentiate between intermediate thoughts and the final response.
206
+ *
207
+ * - `LLM_THINKING`: Token identified by the adapter as part of the LLM's internal reasoning/thought process.
208
+ * - `LLM_RESPONSE`: Token identified by the adapter as part of the LLM's final response content.
209
+ * - `AGENT_THOUGHT_LLM_THINKING`: Token from an LLM call made in the 'AGENT_THOUGHT' context, identified as thinking.
210
+ * - `AGENT_THOUGHT_LLM_RESPONSE`: Token from an LLM call made in the 'AGENT_THOUGHT' context, identified as response (e.g., the raw planning output).
211
+ * - `FINAL_SYNTHESIS_LLM_THINKING`: Token from an LLM call made in the 'FINAL_SYNTHESIS' context, identified as thinking.
212
+ * - `FINAL_SYNTHESIS_LLM_RESPONSE`: Token from an LLM call made in the 'FINAL_SYNTHESIS' context, identified as response (part of the final answer to the user).
213
+ *
214
+ * Note: Not all adapters can reliably distinguish 'LLM_THINKING' vs 'LLM_RESPONSE'.
215
+ * Adapters should prioritize setting the agent context part (`AGENT_THOUGHT_...` or `FINAL_SYNTHESIS_...`) based on `CallOptions.callContext`.
216
+ * If thinking detection is unavailable, adapters should default to `AGENT_THOUGHT_LLM_RESPONSE` or `FINAL_SYNTHESIS_LLM_RESPONSE`.
217
+ */
218
+ tokenType?: 'LLM_THINKING' | 'LLM_RESPONSE' | 'AGENT_THOUGHT_LLM_THINKING' | 'AGENT_THOUGHT_LLM_RESPONSE' | 'FINAL_SYNTHESIS_LLM_THINKING' | 'FINAL_SYNTHESIS_LLM_RESPONSE';
219
+ /** The identifier of the conversation thread this event belongs to. */
220
+ threadId: string;
221
+ /** The identifier tracing the specific agent execution cycle this event is part of. */
222
+ traceId: string;
223
+ /** Optional identifier linking the event to a specific UI tab/window. */
224
+ sessionId?: string;
225
+ }
48
226
  /**
49
227
  * Represents a basic JSON Schema definition, focusing on object types commonly used for tool inputs/outputs.
50
228
  * This is a simplified representation and doesn't cover all JSON Schema features.
@@ -74,15 +252,42 @@ type JsonSchema = JsonObjectSchema | {
74
252
  type: 'string' | 'number' | 'boolean' | 'array';
75
253
  [key: string]: any;
76
254
  };
255
+ /**
256
+ * Structure for holding metadata about an LLM call, typically received via a `METADATA` `StreamEvent`
257
+ * or parsed from a non-streaming response. Fields are optional as availability varies by provider and stream state.
258
+ */
259
+ interface LLMMetadata {
260
+ /** The number of tokens in the input prompt, if available. */
261
+ inputTokens?: number;
262
+ /** The number of tokens generated in the output response, if available. */
263
+ outputTokens?: number;
264
+ /** The number of tokens identified as part of the LLM's internal thinking process (if available from provider). */
265
+ thinkingTokens?: number;
266
+ /** The time elapsed (in milliseconds) until the first token was generated in a streaming response, if applicable and available. */
267
+ timeToFirstTokenMs?: number;
268
+ /** The total time elapsed (in milliseconds) for the entire generation process, if available. */
269
+ totalGenerationTimeMs?: number;
270
+ /** The reason the LLM stopped generating tokens (e.g., 'stop_sequence', 'max_tokens', 'tool_calls'), if available. */
271
+ stopReason?: string;
272
+ /** Optional raw usage data provided directly by the LLM provider for extensibility (structure depends on provider). */
273
+ providerRawUsage?: any;
274
+ /** The trace ID associated with the LLM call, useful for correlating metadata with the specific request. */
275
+ traceId?: string;
276
+ }
77
277
  /**
78
278
  * Defines the schema for a tool, including its input parameters.
79
279
  * Uses JSON Schema format for inputSchema.
80
280
  */
81
281
  interface ToolSchema {
282
+ /** A unique name identifying the tool (used in LLM prompts and registry lookups). Must be unique. */
82
283
  name: string;
284
+ /** A clear description of what the tool does, intended for the LLM to understand its purpose and usage. */
83
285
  description: string;
286
+ /** A JSON Schema object defining the structure, types, and requirements of the input arguments the tool expects. */
84
287
  inputSchema: JsonSchema;
288
+ /** An optional JSON Schema object defining the expected structure of the data returned in the `output` field of a successful `ToolResult`. */
85
289
  outputSchema?: JsonSchema;
290
+ /** Optional array of examples demonstrating how to use the tool, useful for few-shot prompting of the LLM. */
86
291
  examples?: Array<{
87
292
  input: any;
88
293
  output?: any;
@@ -93,186 +298,331 @@ interface ToolSchema {
93
298
  * Represents the structured result of a tool execution.
94
299
  */
95
300
  interface ToolResult {
301
+ /** The unique identifier of the corresponding `ParsedToolCall` that initiated this execution attempt. */
96
302
  callId: string;
303
+ /** The name of the tool that was executed. */
97
304
  toolName: string;
305
+ /** Indicates whether the tool execution succeeded or failed. */
98
306
  status: 'success' | 'error';
307
+ /** The data returned by the tool upon successful execution. Structure may be validated against `outputSchema`. */
99
308
  output?: any;
309
+ /** A descriptive error message if the execution failed (`status` is 'error'). */
100
310
  error?: string;
311
+ /** Optional metadata about the execution (e.g., duration, cost, logs). */
101
312
  metadata?: Record<string, any>;
102
313
  }
103
314
  /**
104
315
  * Represents a parsed request from the LLM to call a specific tool.
105
316
  */
106
317
  interface ParsedToolCall {
318
+ /** A unique identifier generated by the OutputParser for this specific tool call request within a plan. */
107
319
  callId: string;
320
+ /** The name of the tool the LLM intends to call. Must match a registered tool's schema name. */
108
321
  toolName: string;
322
+ /** The arguments object, parsed from the LLM response, intended to be passed to the tool's `execute` method after validation. */
109
323
  arguments: any;
110
324
  }
111
325
  /**
112
326
  * Configuration specific to a conversation thread.
113
327
  */
114
328
  interface ThreadConfig {
115
- reasoning: {
116
- provider: string;
117
- model: string;
118
- parameters?: Record<string, any>;
119
- };
329
+ /** Default provider configuration for this thread. */
330
+ providerConfig: RuntimeProviderConfig;
331
+ /** An array of tool names (matching `ToolSchema.name`) that are permitted for use within this thread. */
120
332
  enabledTools: string[];
333
+ /** The maximum number of past messages (`ConversationMessage` objects) to retrieve for context. */
121
334
  historyLimit: number;
122
- systemPrompt?: string;
123
335
  }
124
336
  /**
125
337
  * Represents non-configuration state associated with an agent or thread.
126
338
  * Could include user preferences, accumulated knowledge, etc. (Less defined for v1.0)
127
339
  */
128
340
  interface AgentState {
341
+ /** A flexible object to store persistent, non-configuration data associated with a thread or user (e.g., preferences, summaries, intermediate results). Structure is application-defined. */
129
342
  [key: string]: any;
130
343
  }
131
344
  /**
132
345
  * Encapsulates the configuration and state for a specific thread.
133
346
  */
134
347
  interface ThreadContext {
348
+ /** The configuration settings (`ThreadConfig`) currently active for the thread. */
135
349
  config: ThreadConfig;
350
+ /** The persistent state (`AgentState`) associated with the thread, or `null` if no state exists. */
136
351
  state: AgentState | null;
137
352
  }
138
353
  /**
139
354
  * Properties required to initiate an agent processing cycle.
140
355
  */
141
356
  interface AgentProps {
357
+ /** The user's input query or request to the agent. */
142
358
  query: string;
359
+ /** The mandatory identifier for the conversation thread. All context is scoped to this ID. */
143
360
  threadId: string;
361
+ /** An optional identifier for the specific UI session, useful for targeting UI updates. */
144
362
  sessionId?: string;
363
+ /** An optional identifier for the user interacting with the agent. */
145
364
  userId?: string;
365
+ /** An optional identifier used for tracing a request across multiple systems or services. */
146
366
  traceId?: string;
367
+ /** Optional runtime options that can override default behaviors for this specific `process` call. */
147
368
  options?: AgentOptions;
148
369
  }
149
370
  /**
150
371
  * Options to override agent behavior at runtime.
151
372
  */
152
373
  interface AgentOptions {
374
+ /** Override specific LLM parameters (e.g., temperature, max_tokens) for this call only. */
153
375
  llmParams?: Record<string, any>;
376
+ /** Override provider configuration for this specific call. */
377
+ providerConfig?: RuntimeProviderConfig;
378
+ /** Force the use of specific tools, potentially overriding the thread's `enabledTools` for this call (use with caution). */
154
379
  forceTools?: string[];
380
+ /** Specify a particular reasoning model to use for this call, overriding the thread's default. */
381
+ overrideModel?: {
382
+ provider: string;
383
+ model: string;
384
+ };
385
+ /** Request a streaming response for this specific agent process call. */
386
+ stream?: boolean;
387
+ /** Override the prompt template used for this specific call. */
388
+ promptTemplateId?: string;
155
389
  }
156
390
  /**
157
391
  * The final structured response returned by the agent core after processing.
158
392
  */
159
393
  interface AgentFinalResponse {
394
+ /** The final `ConversationMessage` generated by the AI, which has also been persisted. */
160
395
  response: ConversationMessage;
396
+ /** Metadata summarizing the execution cycle that produced this response. */
161
397
  metadata: ExecutionMetadata;
162
398
  }
163
399
  /**
164
- * Metadata summarizing an agent execution cycle.
400
+ * Metadata summarizing an agent execution cycle, including performance metrics and outcomes.
165
401
  */
166
402
  interface ExecutionMetadata {
403
+ /** The thread ID associated with this execution cycle. */
167
404
  threadId: string;
405
+ /** The trace ID used during this execution, if provided. */
168
406
  traceId?: string;
407
+ /** The user ID associated with the execution, if provided. */
169
408
  userId?: string;
409
+ /** The overall status of the execution ('success', 'error', or 'partial' if some steps failed but a response was generated). */
170
410
  status: 'success' | 'error' | 'partial';
411
+ /** The total duration of the `agent.process()` call in milliseconds. */
171
412
  totalDurationMs: number;
413
+ /** The number of calls made to the `ReasoningEngine`. */
172
414
  llmCalls: number;
415
+ /** The number of tool execution attempts made by the `ToolSystem`. */
173
416
  toolCalls: number;
417
+ /** An optional estimated cost for the LLM calls made during this execution. */
174
418
  llmCost?: number;
419
+ /** A top-level error message if the overall status is 'error' or 'partial'. */
175
420
  error?: string;
421
+ /** Aggregated metadata from LLM calls made during the execution. */
422
+ llmMetadata?: LLMMetadata;
176
423
  }
177
424
  /**
178
425
  * Context provided to a tool during its execution.
179
426
  */
180
427
  interface ExecutionContext {
428
+ /** The ID of the thread in which the tool is being executed. */
181
429
  threadId: string;
430
+ /** The trace ID for this execution cycle, if available. */
182
431
  traceId?: string;
432
+ /** The user ID associated with the execution, if available. */
183
433
  userId?: string;
184
434
  }
185
435
  /**
186
- * Options for configuring an LLM call.
436
+ * Options for configuring an LLM call, including streaming and context information.
187
437
  */
188
438
  interface CallOptions {
439
+ /** The mandatory thread ID, used by the ReasoningEngine to fetch thread-specific configuration (e.g., model, params) via StateManager. */
189
440
  threadId: string;
441
+ /** Optional trace ID for correlation. */
190
442
  traceId?: string;
443
+ /** Optional user ID. */
191
444
  userId?: string;
192
- onThought?: (thought: string) => void;
445
+ /** Optional session ID. */
446
+ sessionId?: string;
447
+ /**
448
+ * Request a streaming response from the LLM provider.
449
+ * Adapters MUST check this flag.
450
+ */
451
+ stream?: boolean;
452
+ /**
453
+ * Provides context for the LLM call, allowing adapters to differentiate
454
+ * between agent-level thoughts and final synthesis calls for token typing.
455
+ * Agent Core MUST provide this.
456
+ */
457
+ callContext?: 'AGENT_THOUGHT' | 'FINAL_SYNTHESIS' | string;
458
+ /** An optional callback function invoked when the LLM streams intermediate 'thoughts' or reasoning steps.
459
+ * @deprecated Prefer using StreamEvent with appropriate tokenType for thoughts. Kept for potential transitional compatibility.
460
+ */
461
+ /** Carries the specific target provider and configuration for this call. */
462
+ providerConfig: RuntimeProviderConfig;
463
+ /** Additional key-value pairs representing provider-specific parameters (e.g., `temperature`, `max_tokens`, `top_p`). These often override defaults set in `ThreadConfig`. */
464
+ [key: string]: any;
465
+ }
466
+ /**
467
+ * Defines the standard roles for messages within the `ArtStandardPrompt` format.
468
+ * These roles are chosen for broad compatibility across major LLM providers (like OpenAI, Anthropic, Gemini).
469
+ * Provider Adapters are responsible for translating these standard roles into the specific formats
470
+ * required by their respective APIs (e.g., 'assistant' might become 'model' for Gemini).
471
+ *
472
+ * - `system`: Instructions or context provided to the AI, typically at the beginning.
473
+ * - `user`: Input or queries from the end-user. Also used to wrap `tool_result` content for some providers (e.g., Gemini).
474
+ * - `assistant`: Responses generated by the AI model. Can contain text content and/or `tool_calls`.
475
+ * - `tool_request`: Represents the LLM's request to use tools (often implicitly part of an `assistant` message with `tool_calls`). Included for potential future explicit use.
476
+ * - `tool_result`: The outcome (output or error) of executing a requested tool call.
477
+ */
478
+ type ArtStandardMessageRole = 'system' | 'user' | 'assistant' | 'tool_request' | 'tool_result' | 'tool';
479
+ /**
480
+ * Represents a single message in the standardized, provider-agnostic `ArtStandardPrompt` format.
481
+ * This structure aims to capture common message elements used by various LLM APIs.
482
+ */
483
+ interface ArtStandardMessage {
484
+ /** The role indicating the source or type of the message. */
485
+ role: ArtStandardMessageRole;
486
+ /**
487
+ * The primary content of the message. The type and interpretation depend on the `role`:
488
+ * - `system`: string (The system instruction).
489
+ * - `user`: string (The user's text input).
490
+ * - `assistant`: string | null (The AI's text response, or null/empty if only making `tool_calls`).
491
+ * - `tool_request`: object | null (Structured representation of the tool call, often implicitly handled via `assistant` message's `tool_calls`).
492
+ * - `tool_result`: string (Stringified JSON output or error message from the tool execution).
493
+ */
494
+ content: string | object | null;
495
+ /** Optional name associated with the message. Primarily used for `tool_result` role to specify the name of the tool that was executed. */
496
+ name?: string;
497
+ /**
498
+ * Optional array of tool calls requested by the assistant.
499
+ * Only relevant for 'assistant' role messages that trigger tool usage.
500
+ * Structure mirrors common provider formats (e.g., OpenAI).
501
+ */
502
+ tool_calls?: Array<{
503
+ /** A unique identifier for this specific tool call request. */
504
+ id: string;
505
+ /** The type of the tool call, typically 'function'. */
506
+ type: 'function';
507
+ /** Details of the function to be called. */
508
+ function: {
509
+ /** The name of the function/tool to call. */
510
+ name: string;
511
+ /** A stringified JSON object representing the arguments for the function. */
512
+ arguments: string;
513
+ };
514
+ }>;
515
+ /**
516
+ * Optional identifier linking a 'tool_result' message back to the specific 'tool_calls' entry
517
+ * in the preceding 'assistant' message that requested it.
518
+ * Required for 'tool_result' role.
519
+ */
520
+ tool_call_id?: string;
521
+ }
522
+ /**
523
+ * Represents the entire prompt as an array of standardized messages (`ArtStandardMessage`).
524
+ * This is the standard format produced by `PromptManager.assemblePrompt` and consumed
525
+ * by `ProviderAdapter.call` for translation into provider-specific API formats.
526
+ */
527
+ type ArtStandardPrompt = ArtStandardMessage[];
528
+ /**
529
+ * Represents the contextual data gathered by Agent Logic (e.g., `PESAgent`) to be injected
530
+ * into a Mustache blueprint/template by the `PromptManager.assemblePrompt` method.
531
+ *
532
+ * Contains standard fields commonly needed for prompts, plus allows for arbitrary
533
+ * additional properties required by specific agent blueprints. Agent logic is responsible
534
+ * for populating this context appropriately before calling `assemblePrompt`.
535
+ */
536
+ interface PromptContext {
537
+ /** The user's current query or input relevant to this prompt generation step. */
538
+ query?: string;
539
+ /**
540
+ * The conversation history, typically formatted as an array suitable for the blueprint
541
+ * (e.g., array of objects with `role` and `content`). Agent logic should pre-format this.
542
+ * Note: While `ArtStandardPrompt` could be used, simpler structures might be preferred for blueprints.
543
+ */
544
+ history?: Array<{
545
+ role: string;
546
+ content: string;
547
+ [key: string]: any;
548
+ }>;
549
+ /**
550
+ * The schemas of the tools available for use, potentially pre-formatted for the blueprint
551
+ * (e.g., with `inputSchemaJson` pre-stringified).
552
+ */
553
+ availableTools?: Array<ToolSchema & {
554
+ inputSchemaJson?: string;
555
+ }>;
556
+ /**
557
+ * The results from any tools executed in a previous step, potentially pre-formatted for the blueprint
558
+ * (e.g., with `outputJson` pre-stringified).
559
+ */
560
+ toolResults?: Array<ToolResult & {
561
+ outputJson?: string;
562
+ }>;
563
+ /** The system prompt string to be used (resolved by agent logic from config or defaults). */
564
+ systemPrompt?: string;
565
+ /** Allows agent patterns (like PES) to pass any other custom data needed by their specific blueprints (e.g., `intent`, `plan`). */
193
566
  [key: string]: any;
194
567
  }
195
568
  /**
196
569
  * Represents the prompt data formatted for a specific LLM provider.
197
570
  * Can be a simple string or a complex object (e.g., for OpenAI Chat Completion API).
571
+ * @deprecated Use `ArtStandardPrompt` as the standard intermediate format. ProviderAdapters handle final formatting.
198
572
  */
199
- type FormattedPrompt = string | object | Array<object>;
573
+ type FormattedPrompt = ArtStandardPrompt;
200
574
  /**
201
575
  * Options for filtering data retrieved from storage.
202
576
  * Structure depends heavily on the underlying adapter's capabilities.
203
577
  */
204
578
  interface FilterOptions {
579
+ /** An object defining filter criteria (e.g., `{ threadId: 'abc', type: 'TOOL_EXECUTION' }`). Structure may depend on adapter capabilities. */
205
580
  filter?: Record<string, any>;
581
+ /** An object defining sorting criteria (e.g., `{ timestamp: 'desc' }`). */
206
582
  sort?: Record<string, 'asc' | 'desc'>;
583
+ /** The maximum number of records to return. */
207
584
  limit?: number;
585
+ /** The number of records to skip (for pagination). */
208
586
  skip?: number;
209
587
  }
210
588
  /**
211
589
  * Options for retrieving conversation messages.
212
590
  */
213
591
  interface MessageOptions {
592
+ /** The maximum number of messages to retrieve. */
214
593
  limit?: number;
594
+ /** Retrieve messages created before this Unix timestamp (milliseconds). */
215
595
  beforeTimestamp?: number;
596
+ /** Retrieve messages created after this Unix timestamp (milliseconds). */
216
597
  afterTimestamp?: number;
598
+ /** Optionally filter messages by role (e.g., retrieve only 'AI' messages). */
599
+ roles?: MessageRole[];
217
600
  }
218
601
  /**
219
602
  * Options for filtering observations.
220
603
  */
221
604
  interface ObservationFilter {
605
+ /** An array of `ObservationType` enums to filter by. If provided, only observations matching these types are returned. */
222
606
  types?: ObservationType[];
607
+ /** Retrieve observations recorded before this Unix timestamp (milliseconds). */
223
608
  beforeTimestamp?: number;
609
+ /** Retrieve observations recorded after this Unix timestamp (milliseconds). */
224
610
  afterTimestamp?: number;
225
611
  }
226
612
 
227
- type UnsubscribeFunction = () => void;
228
- interface Subscription<DataType, FilterType> {
229
- id: string;
230
- callback: (data: DataType) => void;
231
- filter?: FilterType;
232
- options?: {
233
- threadId?: string;
234
- };
235
- }
613
+ type StreamEventTypeFilter = StreamEvent['type'] | Array<StreamEvent['type']>;
236
614
  /**
237
- * A generic class for implementing a publish/subscribe pattern with filtering capabilities.
238
- * Designed for decoupling components, particularly UI updates from backend events.
615
+ * A dedicated socket for broadcasting LLM stream events (`StreamEvent`) to UI subscribers.
616
+ * Extends the generic TypedSocket and implements filtering based on `StreamEvent.type`.
239
617
  */
240
- declare class TypedSocket$1<DataType, FilterType = any> {
241
- protected subscriptions: Map<string, Subscription<DataType, FilterType>>;
618
+ declare class LLMStreamSocket extends TypedSocket$1<StreamEvent, StreamEventTypeFilter> {
242
619
  constructor();
243
620
  /**
244
- * Subscribes a callback function to receive notifications.
245
- * @param callback - The function to call when new data is notified.
246
- * @param filter - An optional filter to only receive specific types of data.
247
- * @param options - Optional configuration, like a threadId for filtering.
248
- * @returns An unsubscribe function.
621
+ * Notifies subscribers about a new LLM stream event.
622
+ * Filters based on event type if a filter is provided during subscription.
623
+ * @param event - The StreamEvent data.
249
624
  */
250
- subscribe(callback: (data: DataType) => void, filter?: FilterType, options?: {
251
- threadId?: string;
252
- }): UnsubscribeFunction;
253
- /**
254
- * Notifies all relevant subscribers with new data.
255
- * @param data - The data payload to send to subscribers.
256
- * @param options - Optional targeting options (e.g., targetThreadId).
257
- * @param filterCheck - A function to check if a subscription's filter matches the data.
258
- */
259
- notify(data: DataType, options?: {
260
- targetThreadId?: string;
261
- targetSessionId?: string;
262
- }, // targetSessionId might be useful later
263
- filterCheck?: (data: DataType, filter?: FilterType) => boolean): void;
264
- /**
265
- * Optional: Retrieves historical data. This base implementation is empty.
266
- * Subclasses might implement this by interacting with repositories.
267
- */
268
- getHistory?(_filter?: FilterType, _options?: {
269
- threadId?: string;
270
- limit?: number;
271
- }): Promise<DataType[]>;
272
- /**
273
- * Clears all subscriptions. Useful for cleanup.
274
- */
275
- clearAllSubscriptions(): void;
625
+ notifyStreamEvent(event: StreamEvent): void;
276
626
  }
277
627
 
278
628
  /**
@@ -331,6 +681,13 @@ declare class ConversationSocket$1 extends TypedSocket$1<ConversationMessage, Me
331
681
  * Interface for the central agent orchestrator.
332
682
  */
333
683
  interface IAgentCore {
684
+ /**
685
+ * Processes a user query through the configured agent reasoning pattern (e.g., PES).
686
+ * Orchestrates interactions between various ART subsystems.
687
+ * @param props - The input properties for the agent execution, including the query, thread ID, and injected dependencies.
688
+ * @returns A promise that resolves with the final agent response and execution metadata.
689
+ * @throws {ARTError} If a critical error occurs during orchestration that prevents completion.
690
+ */
334
691
  process(props: AgentProps): Promise<AgentFinalResponse>;
335
692
  }
336
693
  /**
@@ -338,50 +695,55 @@ interface IAgentCore {
338
695
  */
339
696
  interface ReasoningEngine {
340
697
  /**
341
- * Calls the underlying LLM provider.
342
- * @param prompt The formatted prompt for the provider.
343
- * @param options Call-specific options, including threadId and callbacks.
344
- * @returns The raw string response from the LLM.
698
+ * Executes a call to the configured Large Language Model (LLM).
699
+ * This method is typically implemented by a specific `ProviderAdapter`.
700
+ * When streaming is requested via `options.stream`, it returns an AsyncIterable
701
+ * that yields `StreamEvent` objects as they are generated by the LLM provider.
702
+ * When streaming is not requested, it should still return an AsyncIterable
703
+ * that yields a minimal sequence of events (e.g., a single TOKEN event with the full response,
704
+ * a METADATA event if available, and an END event).
705
+ * @param prompt - The prompt to send to the LLM, potentially formatted specifically for the provider.
706
+ * @param options - Options controlling the LLM call, including mandatory `threadId`, tracing IDs, model parameters (like temperature), streaming preference, and call context.
707
+ * @returns A promise resolving to an AsyncIterable of `StreamEvent` objects.
708
+ * @throws {ARTError} If a critical error occurs during the initial call setup or if the stream itself errors out (typically code `LLM_PROVIDER_ERROR`).
345
709
  */
346
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
710
+ call(prompt: FormattedPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
347
711
  }
348
712
  /**
349
- * Interface for managing and constructing prompts for the LLM.
713
+ * Interface for the stateless prompt assembler.
714
+ * Uses a blueprint (template) and context provided by Agent Logic
715
+ * to create a standardized prompt format (`ArtStandardPrompt`).
350
716
  */
351
717
  interface PromptManager {
352
718
  /**
353
- * Creates the prompt for the planning phase.
354
- * @param query User query.
355
- * @param history Conversation history.
356
- * @param systemPrompt System prompt string.
357
- * @param availableTools Schemas of available tools.
358
- * @param threadContext Current thread context.
359
- * @returns Formatted prompt suitable for the ReasoningEngine.
719
+ * Retrieves a named prompt fragment (e.g., a piece of instruction text).
720
+ * Optionally allows for simple variable substitution if the fragment is a basic template.
721
+ *
722
+ * @param name - The unique identifier for the fragment.
723
+ * @param context - Optional data for simple variable substitution within the fragment.
724
+ * @returns The processed prompt fragment string.
725
+ * @throws {ARTError} If the fragment is not found.
360
726
  */
361
- createPlanningPrompt(query: string, history: ConversationMessage[], systemPrompt: string | undefined, availableTools: ToolSchema[], threadContext: ThreadContext): Promise<FormattedPrompt>;
727
+ getFragment(name: string, context?: Record<string, any>): string;
362
728
  /**
363
- * Creates the prompt for the synthesis phase.
364
- * @param query User query.
365
- * @param intent Parsed intent from planning.
366
- * @param plan Parsed plan from planning.
367
- * @param toolResults Results from tool execution.
368
- * @param history Conversation history.
369
- * @param systemPrompt System prompt string.
370
- * @param threadContext Current thread context.
371
- * @returns Formatted prompt suitable for the ReasoningEngine.
729
+ * Validates a constructed prompt object against the standard schema.
730
+ *
731
+ * @param prompt - The ArtStandardPrompt object constructed by the agent.
732
+ * @returns The validated prompt object (potentially after normalization if the schema does that).
733
+ * @throws {ZodError} If validation fails (can be caught and wrapped in ARTError).
372
734
  */
373
- createSynthesisPrompt(query: string, intent: string | undefined, // Or a structured intent object
374
- plan: string | undefined, // Or a structured plan object
375
- toolResults: ToolResult[], history: ConversationMessage[], systemPrompt: string | undefined, threadContext: ThreadContext): Promise<FormattedPrompt>;
735
+ validatePrompt(prompt: ArtStandardPrompt): ArtStandardPrompt;
376
736
  }
377
737
  /**
378
738
  * Interface for parsing structured output from LLM responses.
379
739
  */
380
740
  interface OutputParser {
381
741
  /**
382
- * Parses the output of the planning LLM call.
383
- * @param output Raw LLM output string.
384
- * @returns Structured planning data (intent, plan, tool calls).
742
+ * Parses the raw string output from the planning LLM call to extract structured information.
743
+ * Implementations should be robust to variations in LLM output formatting.
744
+ * @param output - The raw string response from the planning LLM call.
745
+ * @returns A promise resolving to an object containing the extracted intent, plan description, and an array of parsed tool calls.
746
+ * @throws {ARTError} If the output cannot be parsed into the expected structure (typically code `OUTPUT_PARSING_FAILED`).
385
747
  */
386
748
  parsePlanningOutput(output: string): Promise<{
387
749
  intent?: string;
@@ -389,9 +751,11 @@ interface OutputParser {
389
751
  toolCalls?: ParsedToolCall[];
390
752
  }>;
391
753
  /**
392
- * Parses the output of the synthesis LLM call.
393
- * @param output Raw LLM output string.
394
- * @returns The final synthesized response content.
754
+ * Parses the raw string output from the synthesis LLM call to extract the final, user-facing response content.
755
+ * This might involve removing extraneous tags or formatting.
756
+ * @param output - The raw string response from the synthesis LLM call.
757
+ * @returns A promise resolving to the clean, final response string.
758
+ * @throws {ARTError} If the final response cannot be extracted (typically code `OUTPUT_PARSING_FAILED`).
395
759
  */
396
760
  parseSynthesisOutput(output: string): Promise<string>;
397
761
  }
@@ -400,7 +764,10 @@ interface OutputParser {
400
764
  * Implementations will handle provider-specific API calls, authentication, etc.
401
765
  */
402
766
  interface ProviderAdapter extends ReasoningEngine {
767
+ /** The unique identifier name for this provider (e.g., 'openai', 'anthropic'). */
403
768
  readonly providerName: string;
769
+ /** Optional: Method for graceful shutdown */
770
+ shutdown?(): Promise<void>;
404
771
  }
405
772
  /**
406
773
  * Interface for the executable logic of a tool.
@@ -421,20 +788,21 @@ interface IToolExecutor {
421
788
  */
422
789
  interface ToolRegistry {
423
790
  /**
424
- * Registers a tool executor.
425
- * @param executor The tool executor instance.
791
+ * Registers a tool executor instance, making it available for use.
792
+ * @param executor - The instance of the class implementing `IToolExecutor`.
793
+ * @throws {Error} If a tool with the same name is already registered.
426
794
  */
427
795
  registerTool(executor: IToolExecutor): Promise<void>;
428
796
  /**
429
- * Retrieves a tool executor by its name.
430
- * @param toolName The unique name of the tool.
431
- * @returns The executor instance or undefined if not found.
797
+ * Retrieves a registered tool executor instance by its unique name.
798
+ * @param toolName - The `name` property defined in the tool's schema.
799
+ * @returns A promise resolving to the executor instance, or `undefined` if no tool with that name is registered.
432
800
  */
433
801
  getToolExecutor(toolName: string): Promise<IToolExecutor | undefined>;
434
802
  /**
435
- * Retrieves the schemas of all registered tools, potentially filtered.
436
- * @param filter Optional criteria (e.g., only enabled tools for a thread).
437
- * @returns An array of tool schemas.
803
+ * Retrieves the schemas of available tools. Can be filtered, e.g., to get only tools enabled for a specific thread.
804
+ * @param filter - Optional filter criteria. If `enabledForThreadId` is provided, it should consult the `StateManager` to return only schemas for tools enabled in that thread's configuration.
805
+ * @returns A promise resolving to an array of `ToolSchema` objects.
438
806
  */
439
807
  getAvailableTools(filter?: {
440
808
  enabledForThreadId?: string;
@@ -445,11 +813,12 @@ interface ToolRegistry {
445
813
  */
446
814
  interface ToolSystem {
447
815
  /**
448
- * Executes a list of parsed tool calls.
449
- * @param toolCalls Array of tool calls requested by the LLM.
450
- * @param threadId The current thread ID for context and permissions.
451
- * @param traceId Optional trace ID.
452
- * @returns A promise resolving to an array of tool results.
816
+ * Orchestrates the execution of a sequence of tool calls determined during the planning phase.
817
+ * This involves verifying permissions, validating inputs, calling the tool executor, and recording observations.
818
+ * @param toolCalls - An array of `ParsedToolCall` objects generated by the `OutputParser`.
819
+ * @param threadId - The ID of the current thread, used for context and checking tool permissions via `StateManager`.
820
+ * @param traceId - Optional trace ID for correlating observations.
821
+ * @returns A promise resolving to an array of `ToolResult` objects, one for each attempted tool call (including errors).
453
822
  */
454
823
  executeTools(toolCalls: ParsedToolCall[], threadId: string, traceId?: string): Promise<ToolResult[]>;
455
824
  }
@@ -458,36 +827,44 @@ interface ToolSystem {
458
827
  */
459
828
  interface StateManager {
460
829
  /**
461
- * Loads the full context (config + state) for a given thread.
462
- * @param threadId The ID of the thread.
463
- * @param userId Optional user ID for access control.
464
- * @returns The thread context.
830
+ * Loads the complete context (`ThreadConfig` and `AgentState`) for a specific thread.
831
+ * This is typically called at the beginning of an agent execution cycle.
832
+ * @param threadId - The unique identifier for the thread.
833
+ * @param userId - Optional user identifier, potentially used for retrieving user-specific state or config overrides.
834
+ * @returns A promise resolving to the `ThreadContext` object containing the loaded configuration and state.
835
+ * @throws {ARTError} If the context for the thread cannot be loaded (e.g., code `THREAD_NOT_FOUND`).
465
836
  */
466
837
  loadThreadContext(threadId: string, userId?: string): Promise<ThreadContext>;
467
838
  /**
468
- * Checks if a specific tool is enabled for the given thread based on its config.
469
- * @param threadId The ID of the thread.
470
- * @param toolName The name of the tool.
471
- * @returns True if the tool is enabled, false otherwise.
839
+ * Verifies if a specific tool is permitted for use within a given thread.
840
+ * Checks against the `enabledTools` array in the thread's loaded `ThreadConfig`.
841
+ * @param threadId - The ID of the thread.
842
+ * @param toolName - The name of the tool to check.
843
+ * @returns A promise resolving to `true` if the tool is enabled for the thread, `false` otherwise.
472
844
  */
473
845
  isToolEnabled(threadId: string, toolName: string): Promise<boolean>;
474
846
  /**
475
- * Retrieves a specific configuration value for the thread.
476
- * @param threadId The ID of the thread.
477
- * @param key The configuration key (potentially nested, e.g., 'reasoning.model').
478
- * @returns The configuration value or undefined.
847
+ * Retrieves a specific value from the thread's configuration (`ThreadConfig`).
848
+ * Supports accessing nested properties using dot notation (e.g., 'reasoning.model').
849
+ * @template T - The expected type of the configuration value.
850
+ * @param threadId - The ID of the thread.
851
+ * @param key - The key (potentially nested) of the configuration value to retrieve.
852
+ * @returns A promise resolving to the configuration value, or `undefined` if the key doesn't exist or the thread config isn't loaded.
479
853
  */
480
854
  getThreadConfigValue<T>(threadId: string, key: string): Promise<T | undefined>;
481
855
  /**
482
- * Saves the thread's state if it has been modified during execution.
483
- * Implementations should track changes to avoid unnecessary writes.
484
- * @param threadId The ID of the thread.
856
+ * Persists the `AgentState` for the thread, but only if it has been marked as modified during the current execution cycle.
857
+ * This prevents unnecessary writes to the storage layer.
858
+ * @param threadId - The ID of the thread whose state should potentially be saved.
859
+ * @returns A promise that resolves when the save operation is complete (or skipped).
485
860
  */
486
861
  saveStateIfModified(threadId: string): Promise<void>;
487
862
  /**
488
- * Sets or updates the configuration for a specific thread.
489
- * @param threadId The ID of the thread.
490
- * @param config The complete configuration object to set.
863
+ * Sets or completely replaces the configuration (`ThreadConfig`) for a specific thread.
864
+ * Use with caution, as this overwrites the existing configuration. Consider methods for partial updates if needed.
865
+ * @param threadId - The ID of the thread whose configuration is being set.
866
+ * @param config - The complete `ThreadConfig` object to save.
867
+ * @returns A promise that resolves when the configuration is saved.
491
868
  */
492
869
  setThreadConfig(threadId: string, config: ThreadConfig): Promise<void>;
493
870
  }
@@ -496,16 +873,18 @@ interface StateManager {
496
873
  */
497
874
  interface ConversationManager {
498
875
  /**
499
- * Adds one or more messages to a thread's history.
500
- * @param threadId The ID of the thread.
501
- * @param messages An array of messages to add.
876
+ * Appends one or more `ConversationMessage` objects to the history of a specific thread.
877
+ * Typically called at the end of an execution cycle to save the user query and the final AI response.
878
+ * @param threadId - The ID of the thread to add messages to.
879
+ * @param messages - An array containing the `ConversationMessage` objects to add.
880
+ * @returns A promise that resolves when the messages have been successfully added to storage.
502
881
  */
503
882
  addMessages(threadId: string, messages: ConversationMessage[]): Promise<void>;
504
883
  /**
505
- * Retrieves messages from a thread's history.
506
- * @param threadId The ID of the thread.
507
- * @param options Filtering and pagination options.
508
- * @returns An array of conversation messages.
884
+ * Retrieves messages from a specific thread's history, usually in reverse chronological order.
885
+ * @param threadId - The ID of the thread whose history is needed.
886
+ * @param options - Optional parameters to control retrieval, such as `limit` (max number of messages) or `beforeTimestamp` (for pagination). See `MessageOptions` type.
887
+ * @returns A promise resolving to an array of `ConversationMessage` objects, ordered according to the implementation (typically newest first if not specified otherwise).
509
888
  */
510
889
  getMessages(threadId: string, options?: MessageOptions): Promise<ConversationMessage[]>;
511
890
  }
@@ -514,16 +893,18 @@ interface ConversationManager {
514
893
  */
515
894
  interface ObservationManager {
516
895
  /**
517
- * Records a new observation. Automatically assigns ID, timestamp, and potentially title.
518
- * Notifies the ObservationSocket.
519
- * @param observationData Data for the observation (excluding id, timestamp, title).
896
+ * Creates, persists, and broadcasts a new observation record.
897
+ * This is the primary method used by other systems to log significant events.
898
+ * It automatically generates a unique ID, timestamp, and potentially a title.
899
+ * @param observationData - An object containing the core data for the observation (`threadId`, `type`, `content`, `metadata`, etc.), excluding fields generated by the manager (`id`, `timestamp`, `title`).
900
+ * @returns A promise that resolves when the observation has been recorded and notified.
520
901
  */
521
902
  record(observationData: Omit<Observation, 'id' | 'timestamp' | 'title'>): Promise<void>;
522
903
  /**
523
- * Retrieves observations for a specific thread, with optional filtering.
524
- * @param threadId The ID of the thread.
525
- * @param filter Optional filtering criteria.
526
- * @returns An array of observations.
904
+ * Retrieves historical observations stored for a specific thread.
905
+ * @param threadId - The ID of the thread whose observations are to be retrieved.
906
+ * @param filter - Optional criteria to filter the observations, e.g., by `ObservationType`. See `ObservationFilter`.
907
+ * @returns A promise resolving to an array of `Observation` objects matching the criteria.
527
908
  */
528
909
  getObservations(threadId: string, filter?: ObservationFilter): Promise<Observation[]>;
529
910
  }
@@ -577,8 +958,12 @@ interface ConversationSocket extends TypedSocket<ConversationMessage, MessageRol
577
958
  * Interface for the system providing access to UI communication sockets.
578
959
  */
579
960
  interface UISystem {
961
+ /** Returns the singleton instance of the ObservationSocket. */
580
962
  getObservationSocket(): ObservationSocket$1;
963
+ /** Returns the singleton instance of the ConversationSocket. */
581
964
  getConversationSocket(): ConversationSocket$1;
965
+ /** Returns the singleton instance of the LLMStreamSocket. */
966
+ getLLMStreamSocket(): LLMStreamSocket;
582
967
  }
583
968
  /**
584
969
  * Interface for a storage adapter, providing a generic persistence layer.
@@ -638,124 +1023,313 @@ interface IStateRepository {
638
1023
  setThreadContext(threadId: string, context: ThreadContext): Promise<void>;
639
1024
  }
640
1025
  /**
641
- * Represents the initialized ART instance returned by the factory function.
1026
+ * Represents the fully initialized and configured ART Framework client instance.
1027
+ * This object is the main entry point for interacting with the framework after setup.
1028
+ * It provides access to the core processing method and key subsystems.
642
1029
  */
643
1030
  interface ArtInstance {
644
- process: IAgentCore['process'];
645
- uiSystem: UISystem;
646
- stateManager: StateManager;
647
- conversationManager: ConversationManager;
648
- toolRegistry: ToolRegistry;
649
- observationManager: ObservationManager;
1031
+ /** The main method to process a user query using the configured Agent Core. */
1032
+ readonly process: IAgentCore['process'];
1033
+ /** Accessor for the UI System, used to get sockets for subscriptions. */
1034
+ readonly uiSystem: UISystem;
1035
+ /** Accessor for the State Manager, used for managing thread configuration and state. */
1036
+ readonly stateManager: StateManager;
1037
+ /** Accessor for the Conversation Manager, used for managing message history. */
1038
+ readonly conversationManager: ConversationManager;
1039
+ /** Accessor for the Tool Registry, used for managing available tools. */
1040
+ readonly toolRegistry: ToolRegistry;
1041
+ /** Accessor for the Observation Manager, used for recording and retrieving observations. */
1042
+ readonly observationManager: ObservationManager;
1043
+ }
1044
+
1045
+ /**
1046
+ * Defines the available logging levels, ordered from most verbose to least verbose.
1047
+ */
1048
+ declare enum LogLevel {
1049
+ /** Detailed debugging information, useful for development. */
1050
+ DEBUG = 0,
1051
+ /** General informational messages about application flow. */
1052
+ INFO = 1,
1053
+ /** Potential issues or unexpected situations that don't prevent execution. */
1054
+ WARN = 2,
1055
+ /** Errors that indicate a failure or problem. */
1056
+ ERROR = 3
1057
+ }
1058
+ /**
1059
+ * Configuration options for the static Logger class.
1060
+ */
1061
+ interface LoggerConfig {
1062
+ /** The minimum log level to output messages for. Messages below this level will be ignored. */
1063
+ level: LogLevel;
1064
+ /** An optional prefix string to prepend to all log messages (e.g., '[MyApp]'). Defaults to '[ART]'. */
1065
+ prefix?: string;
1066
+ }
1067
+ /**
1068
+ * A simple static logger class for outputting messages to the console at different levels.
1069
+ * Configuration is global via the static `configure` method.
1070
+ */
1071
+ declare class Logger {
1072
+ private static config;
1073
+ /**
1074
+ * Configures the static logger settings.
1075
+ * @param config - A partial `LoggerConfig` object. Provided settings will override defaults.
1076
+ */
1077
+ static configure(config: Partial<LoggerConfig>): void;
1078
+ /**
1079
+ * Logs a message at the DEBUG level.
1080
+ * Only outputs if the configured log level is DEBUG.
1081
+ * @param message - The main log message string.
1082
+ * @param args - Additional arguments to include in the console output (e.g., objects, arrays).
1083
+ */
1084
+ static debug(message: string, ...args: any[]): void;
1085
+ /**
1086
+ * Logs a message at the INFO level.
1087
+ * Outputs if the configured log level is INFO or DEBUG.
1088
+ * @param message - The main log message string.
1089
+ * @param args - Additional arguments to include in the console output.
1090
+ */
1091
+ static info(message: string, ...args: any[]): void;
1092
+ /**
1093
+ * Logs a message at the WARN level.
1094
+ * Outputs if the configured log level is WARN, INFO, or DEBUG.
1095
+ * @param message - The main log message string.
1096
+ * @param args - Additional arguments to include in the console output.
1097
+ */
1098
+ static warn(message: string, ...args: any[]): void;
1099
+ /**
1100
+ * Logs a message at the ERROR level.
1101
+ * Outputs if the configured log level is ERROR, WARN, INFO, or DEBUG.
1102
+ * @param message - The main log message string.
1103
+ * @param args - Additional arguments to include in the console output (often an error object).
1104
+ */
1105
+ static error(message: string, ...args: any[]): void;
650
1106
  }
651
1107
 
1108
+ /**
1109
+ * Configuration for the Storage System adapter.
1110
+ */
652
1111
  interface StorageConfig {
1112
+ /** Specifies the type of storage adapter to use. */
653
1113
  type: 'memory' | 'indexedDB';
1114
+ /** The name of the database to use (required for 'indexedDB'). */
654
1115
  dbName?: string;
1116
+ /** Optional: Database version for schema migrations (for 'indexedDB'). Defaults might apply. */
1117
+ version?: number;
1118
+ /** Optional: Advanced configuration for IndexedDB object stores and indexes. Defaults are usually sufficient. */
1119
+ objectStores?: any[];
655
1120
  }
656
- interface ReasoningConfig {
657
- provider: 'openai' | 'gemini' | 'anthropic' | 'openrouter' | 'deepseek';
658
- apiKey: string;
659
- model?: string;
660
- baseURL?: string;
661
- }
1121
+ /**
1122
+ * Configuration object required by the AgentFactory and createArtInstance function.
1123
+ */
662
1124
  interface AgentFactoryConfig {
1125
+ /** Configuration for the storage adapter. */
663
1126
  storage: StorageConfig;
664
- reasoning: ReasoningConfig;
1127
+ /** Configuration for the Provider Manager, defining available adapters and rules. */
1128
+ providers: ProviderManagerConfig;
1129
+ /** Optional array of tool executor instances to register at initialization. */
665
1130
  tools?: IToolExecutor[];
1131
+ /** Optional: Specify a different Agent Core implementation class (defaults to PESAgent). */
666
1132
  agentCore?: new (dependencies: any) => IAgentCore;
1133
+ /** Optional: Configuration for the logger. */
1134
+ logger?: {
1135
+ level?: LogLevel;
1136
+ };
667
1137
  }
668
1138
  /**
669
- * Creates and initializes an ART instance with the specified configuration.
670
- * This is the recommended way to get started with the ART framework.
671
- * @param config The configuration for the ART instance.
672
- * @returns A promise resolving to the initialized ArtInstance.
1139
+ * High-level factory function to create and initialize a complete ART framework instance.
1140
+ * This simplifies the setup process by handling the instantiation and wiring of all
1141
+ * necessary components based on the provided configuration.
1142
+ * @param config - The configuration object specifying storage, reasoning, tools, etc.
1143
+ * @returns A promise that resolves to a ready-to-use `ArtInstance` object, providing access to the core `process` method and essential managers/systems.
1144
+ * @throws {Error} If initialization fails (e.g., invalid config, storage connection error).
1145
+ * @example
1146
+ * const art = await createArtInstance({
1147
+ * storage: { type: 'indexedDB', dbName: 'myAgentDb' },
1148
+ * reasoning: { provider: 'openai', apiKey: '...' },
1149
+ * tools: [new CalculatorTool()]
1150
+ * });
1151
+ * const response = await art.process({ query: "Calculate 5*5", threadId: "thread1" });
673
1152
  */
674
1153
  declare function createArtInstance(config: AgentFactoryConfig): Promise<ArtInstance>;
675
1154
 
1155
+ /**
1156
+ * Defines the dependencies required by the PESAgent constructor.
1157
+ * These are typically provided by the AgentFactory during instantiation.
1158
+ */
676
1159
  interface PESAgentDependencies {
1160
+ /** Manages thread configuration and state. */
677
1161
  stateManager: StateManager;
1162
+ /** Manages conversation history. */
678
1163
  conversationManager: ConversationManager;
1164
+ /** Registry for available tools. */
679
1165
  toolRegistry: ToolRegistry;
680
- promptManager: PromptManager;
1166
+ /** Handles interaction with the LLM provider. */
681
1167
  reasoningEngine: ReasoningEngine;
1168
+ /** Parses LLM responses. */
682
1169
  outputParser: OutputParser;
1170
+ /** Records agent execution observations. */
683
1171
  observationManager: ObservationManager;
1172
+ /** Orchestrates tool execution. */
684
1173
  toolSystem: ToolSystem;
1174
+ /** Provides access to UI communication sockets. */
1175
+ uiSystem: UISystem;
685
1176
  }
686
1177
  /**
687
1178
  * Implements the Plan-Execute-Synthesize (PES) agent orchestration logic.
1179
+ * This agent follows a structured approach:
1180
+ * 1. **Plan:** Understand the user query, determine intent, and create a plan (potentially involving tool calls).
1181
+ * 2. **Execute:** Run any necessary tools identified in the planning phase.
1182
+ * 3. **Synthesize:** Generate a final response based on the query, plan, and tool results.
1183
+ *
1184
+ * It constructs standardized prompts (`ArtStandardPrompt`) directly as JavaScript objects
1185
+ * for the `ReasoningEngine`. It processes the `StreamEvent` output from the reasoning engine for both planning and synthesis.
1186
+ *
1187
+ * @implements {IAgentCore}
1188
+ * // @see {PromptManager} // Removed
1189
+ * @see {ReasoningEngine}
1190
+ * @see {ArtStandardPrompt}
1191
+ * @see {StreamEvent}
688
1192
  */
689
1193
  declare class PESAgent implements IAgentCore {
690
1194
  private readonly deps;
1195
+ private readonly defaultSystemPrompt;
1196
+ /**
1197
+ * Creates an instance of the PESAgent.
1198
+ * @param dependencies - An object containing instances of all required subsystems (managers, registries, etc.).
1199
+ */
691
1200
  constructor(dependencies: PESAgentDependencies);
1201
+ /**
1202
+ * Executes the full Plan-Execute-Synthesize cycle for a given user query.
1203
+ *
1204
+ * **Workflow:**
1205
+ * 1. **Initiation & Config:** Loads thread configuration and system prompt.
1206
+ * 2. **Data Gathering:** Gathers history, available tools, system prompt, and query.
1207
+ * 3. **Planning Prompt Construction:** Directly constructs the `ArtStandardPrompt` object/array for planning.
1208
+ * 4. **Planning LLM Call:** Sends the planning prompt object to the `reasoningEngine` (requesting streaming). Consumes the `StreamEvent` stream, buffers the output text, and handles potential errors.
1209
+ * 5. **Planning Output Parsing:** Parses the buffered planning output text to extract intent, plan, and tool calls using `outputParser.parsePlanningOutput`.
1210
+ * 6. **Tool Execution:** Executes identified tool calls via the `toolSystem`.
1211
+ * 7. **Data Gathering (Synthesis):** Gathers the original query, plan, tool results, history, etc.
1212
+ * 8. **Synthesis Prompt Construction:** Directly constructs the `ArtStandardPrompt` object/array for synthesis.
1213
+ * 9. **Synthesis LLM Call:** Sends the synthesis prompt object to the `reasoningEngine` (requesting streaming). Consumes the `StreamEvent` stream, buffers the final response text, and handles potential errors.
1214
+ * 10. **Finalization:** Saves the final AI message, updates state if needed, records observations, and returns the result.
1215
+ *
1216
+ * **Error Handling:**
1217
+ * - Errors during critical phases (planning/synthesis LLM call) will throw an `ARTError`. Prompt construction errors are less likely but possible if data is malformed.
1218
+ * - Errors during tool execution or synthesis LLM call might result in a 'partial' success status, potentially using the error message as the final response content.
1219
+ *
1220
+ * @param {AgentProps} props - The input properties containing the user query, threadId, userId, traceId, etc.
1221
+ * @returns {Promise<AgentFinalResponse>} A promise resolving to the final response, including the AI message and execution metadata.
1222
+ * @throws {ARTError} If a critical error occurs that prevents the agent from completing the process (e.g., config loading, planning failure).
1223
+ * @see {AgentProps}
1224
+ * @see {AgentFinalResponse}
1225
+ * // @see {PromptContext} // Removed - context is implicit in object construction
1226
+ * @see {ArtStandardPrompt}
1227
+ * @see {StreamEvent}
1228
+ */
692
1229
  process(props: AgentProps): Promise<AgentFinalResponse>;
1230
+ /**
1231
+ * Formats conversation history messages for direct inclusion in ArtStandardPrompt.
1232
+ * Converts internal MessageRole to ArtStandardMessageRole.
1233
+ * @param history - Array of ConversationMessage objects.
1234
+ * @returns Array of messages suitable for ArtStandardPrompt.
1235
+ */
1236
+ private formatHistoryForPrompt;
693
1237
  }
694
1238
 
695
1239
  /**
696
- * An in-memory implementation of the StorageAdapter interface.
697
- * Useful for testing, development, or simple scenarios where persistence
698
- * across sessions is not required.
1240
+ * An in-memory implementation of the `StorageAdapter` interface.
1241
+ * Stores all data in JavaScript Maps within the current process memory.
1242
+ * Data is **not persisted** and will be lost when the application session ends.
1243
+ *
1244
+ * Useful for:
1245
+ * - Unit and integration testing (fast, no external dependencies).
1246
+ * - Simple demos or examples where persistence isn't needed.
1247
+ * - Ephemeral agents that don't require long-term memory.
1248
+ *
1249
+ * @implements {StorageAdapter}
699
1250
  */
700
1251
  declare class InMemoryStorageAdapter implements StorageAdapter {
701
1252
  private storage;
702
1253
  /**
703
- * Initializes the adapter (no-op for in-memory).
704
- * @param _config Optional configuration (ignored).
1254
+ * Initializes the adapter. This is a no-op for the in-memory adapter.
1255
+ * @param _config - Optional configuration (ignored by this adapter).
1256
+ * @returns A promise that resolves immediately.
705
1257
  */
706
1258
  init(_config?: any): Promise<void>;
707
1259
  /**
708
- * Retrieves a single item from a collection by its ID.
709
- * @param collection The name of the data collection.
710
- * @param id The unique ID of the item.
711
- * @returns The item or null if not found.
1260
+ * Retrieves a single item (as a deep copy) from a specified collection by its ID.
1261
+ * @template T - The expected type of the retrieved item.
1262
+ * @param collection - The name of the data collection (e.g., 'messages', 'observations').
1263
+ * @param id - The unique ID of the item within the collection.
1264
+ * @returns A promise resolving to a deep copy of the item if found, or `null` otherwise.
712
1265
  */
713
1266
  get<T>(collection: string, id: string): Promise<T | null>;
714
1267
  /**
715
- * Saves (creates or updates) an item in a collection.
716
- * @param collection The name of the collection.
717
- * @param id The unique ID of the item.
718
- * @param data The data to save (will be deep copied).
1268
+ * Saves (creates or updates) an item in a specified collection.
1269
+ * Stores a deep copy of the provided data to prevent external mutations.
1270
+ * @template T - The type of the data being saved.
1271
+ * @param collection - The name of the collection.
1272
+ * @param id - The unique ID for the item.
1273
+ * @param data - The data object to save.
1274
+ * @returns A promise that resolves when the data is saved in memory.
719
1275
  */
720
1276
  set<T>(collection: string, id: string, data: T): Promise<void>;
721
1277
  /**
722
- * Deletes an item from a collection by its ID.
723
- * @param collection The name of the collection.
724
- * @param id The unique ID of the item.
1278
+ * Deletes an item from a specified collection using its ID.
1279
+ * If the collection or item does not exist, the operation completes silently.
1280
+ * @param collection - The name of the collection.
1281
+ * @param id - The unique ID of the item to delete.
1282
+ * @returns A promise that resolves when the deletion attempt is complete.
725
1283
  */
726
1284
  delete(collection: string, id: string): Promise<void>;
727
1285
  /**
728
- * Queries items in a collection based on simple filter options.
729
- * Note: This is a basic implementation. It only supports exact matches
730
- * on top-level properties defined in the filter object. It does not
731
- * support complex queries, sorting, or deep filtering.
732
- * @param collection The name of the collection.
733
- * @param filterOptions Filtering options. Only `filter` is partially supported.
734
- * @returns An array of matching items (deep copies).
1286
+ * Queries items within a collection based on provided filter options.
1287
+ * **Note:** This in-memory implementation provides basic filtering capabilities:
1288
+ * - Supports exact matches on top-level properties specified in `filterOptions.filter`.
1289
+ * - Supports limiting results via `filterOptions.limit`.
1290
+ * - **Does not** support sorting (`filterOptions.sort`), skipping (`filterOptions.skip`), complex operators (like $gt, $in), or nested property filtering.
1291
+ * @template T - The expected type of the items in the collection.
1292
+ * @param collection - The name of the collection to query.
1293
+ * @param filterOptions - Options for filtering and limiting the results.
1294
+ * @returns A promise resolving to an array of deep copies of the matching items.
735
1295
  */
736
1296
  query<T>(collection: string, filterOptions: FilterOptions): Promise<T[]>;
737
1297
  /**
738
- * Clears all items from a specific collection.
739
- * @param collection The name of the collection to clear.
1298
+ * Removes all items from a specific collection within the in-memory store.
1299
+ * @param collection - The name of the collection to clear.
1300
+ * @returns A promise that resolves when the collection is cleared.
740
1301
  */
741
1302
  clearCollection(collection: string): Promise<void>;
742
1303
  /**
743
- * Clears all data managed by the adapter.
1304
+ * Removes all collections and all data stored within the adapter instance.
1305
+ * Use with caution, especially during testing.
1306
+ * @returns A promise that resolves when all data is cleared.
744
1307
  */
745
1308
  clearAll(): Promise<void>;
746
1309
  }
747
1310
 
748
1311
  /**
749
- * Configuration options for the IndexedDBStorageAdapter.
1312
+ * Configuration options for initializing the `IndexedDBStorageAdapter`.
750
1313
  */
751
1314
  interface IndexedDBConfig {
1315
+ /** The name of the IndexedDB database to use. Defaults to 'ART_Framework_DB'. */
752
1316
  dbName?: string;
1317
+ /** The version of the database schema. Increment this when changing `objectStores` or indexes to trigger an upgrade. Defaults to 1. */
753
1318
  dbVersion?: number;
1319
+ /** An array of strings specifying the names of the object stores (collections) required by the application. Core stores like 'conversations', 'observations', 'state' are usually added automatically. */
754
1320
  objectStores: string[];
755
1321
  }
756
1322
  /**
757
- * An implementation of the StorageAdapter interface using IndexedDB
758
- * for persistent storage in the browser.
1323
+ * An implementation of the `StorageAdapter` interface that uses the browser's
1324
+ * IndexedDB API for persistent, client-side storage.
1325
+ *
1326
+ * This adapter is suitable for web applications where conversation history,
1327
+ * agent state, and observations need to persist across sessions.
1328
+ *
1329
+ * **Important:** The `init()` method *must* be called and awaited before performing
1330
+ * any other database operations (get, set, delete, query).
1331
+ *
1332
+ * @implements {StorageAdapter}
759
1333
  */
760
1334
  declare class IndexedDBStorageAdapter implements StorageAdapter {
761
1335
  private db;
@@ -763,78 +1337,235 @@ declare class IndexedDBStorageAdapter implements StorageAdapter {
763
1337
  private dbVersion;
764
1338
  private requiredObjectStores;
765
1339
  private initPromise;
1340
+ /**
1341
+ * Creates an instance of IndexedDBStorageAdapter.
1342
+ * Note: The database connection is not opened until `init()` is called.
1343
+ * @param config - Configuration options including database name, version, and required object stores.
1344
+ */
766
1345
  constructor(config: IndexedDBConfig);
767
1346
  /**
768
- * Initializes the IndexedDB database connection and ensures object stores exist.
769
- * This method should be called before any other operations.
1347
+ * Opens the IndexedDB database connection and ensures the required object stores
1348
+ * are created or updated based on the configured `dbVersion`.
1349
+ * This method MUST be called and awaited successfully before using other adapter methods.
1350
+ * It handles the `onupgradeneeded` event to create stores.
1351
+ * @returns A promise that resolves when the database is successfully opened and ready, or rejects on error.
770
1352
  */
771
1353
  init(): Promise<void>;
1354
+ /**
1355
+ * Helper method to create and return an IndexedDB transaction.
1356
+ * Ensures the database is initialized and the requested store(s) exist.
1357
+ * @param storeName - The name of the object store or an array of store names for the transaction.
1358
+ * @param mode - The transaction mode ('readonly' or 'readwrite').
1359
+ * @returns The initiated IDBTransaction.
1360
+ * @throws {Error} If the database is not initialized or if a requested store does not exist.
1361
+ */
772
1362
  private getTransaction;
1363
+ /**
1364
+ * Retrieves a single item by its ID from the specified object store (collection).
1365
+ * @template T - The expected type of the retrieved item.
1366
+ * @param collection - The name of the object store.
1367
+ * @param id - The ID (key) of the item to retrieve.
1368
+ * @returns A promise resolving to a copy of the item if found, or `null` otherwise.
1369
+ * @throws {Error} If the database is not initialized, the store doesn't exist, or a database error occurs.
1370
+ */
773
1371
  get<T>(collection: string, id: string): Promise<T | null>;
1372
+ /**
1373
+ * Saves (creates or updates) an item in the specified object store (collection).
1374
+ * Assumes the object store uses 'id' as its keyPath. The `id` parameter provided
1375
+ * should match the `id` property within the `data` object.
1376
+ * Uses `structuredClone` to store a deep copy.
1377
+ * @template T - The type of the data being saved. Must have an 'id' property.
1378
+ * @param collection - The name of the object store.
1379
+ * @param id - The unique ID of the item (should match `data.id`).
1380
+ * @param data - The data object to save. Must contain an `id` property matching the `id` parameter.
1381
+ * @returns A promise that resolves when the data is successfully saved.
1382
+ * @throws {Error} If the database is not initialized, the store doesn't exist, data is missing the 'id' property, or a database error occurs.
1383
+ */
774
1384
  set<T>(collection: string, id: string, data: T): Promise<void>;
1385
+ /**
1386
+ * Deletes an item from the specified object store (collection) by its ID.
1387
+ * @param collection - The name of the object store.
1388
+ * @param id - The ID (key) of the item to delete.
1389
+ * @returns A promise that resolves when the deletion is successful.
1390
+ * @throws {Error} If the database is not initialized, the store doesn't exist, or a database error occurs.
1391
+ */
775
1392
  delete(collection: string, id: string): Promise<void>;
1393
+ /**
1394
+ * Queries items within a collection based on provided filter options.
1395
+ * **Note:** This implementation uses `getAll()` and performs filtering, sorting,
1396
+ * and limiting **client-side**. For large datasets, performance may be suboptimal.
1397
+ * A more advanced version would leverage IndexedDB indexes and cursors for
1398
+ * efficient querying directly within the database.
1399
+ * Supports basic exact-match filtering and single-key sorting.
1400
+ * @template T - The expected type of the items in the collection.
1401
+ * @param collection - The name of the object store to query.
1402
+ * @param filterOptions - Options for filtering, sorting, skipping, and limiting results.
1403
+ * @returns A promise resolving to an array of deep copies of the matching items.
1404
+ * @throws {Error} If the database is not initialized, the store doesn't exist, or a database error occurs.
1405
+ */
776
1406
  query<T>(collection: string, filterOptions: FilterOptions): Promise<T[]>;
1407
+ /**
1408
+ * Removes all items from a specific object store (collection).
1409
+ * @param collection - The name of the object store to clear.
1410
+ * @returns A promise that resolves when the collection is successfully cleared.
1411
+ * @throws {Error} If the database is not initialized, the store doesn't exist, or a database error occurs.
1412
+ */
777
1413
  clearCollection(collection: string): Promise<void>;
1414
+ /**
1415
+ * Removes all data from all object stores managed by this adapter instance within the database.
1416
+ * Use with caution as this is destructive.
1417
+ * @returns A promise that resolves when all specified object stores have been cleared.
1418
+ * @throws {Error} If the database is not initialized or a transaction error occurs.
1419
+ */
778
1420
  clearAll(): Promise<void>;
779
1421
  }
780
1422
 
1423
+ /**
1424
+ * Configuration options required for the `GeminiAdapter`.
1425
+ */
781
1426
  interface GeminiAdapterOptions {
1427
+ /** Your Google AI API key (e.g., from Google AI Studio). Handle securely. */
782
1428
  apiKey: string;
1429
+ /** The default Gemini model ID to use (e.g., 'gemini-1.5-flash-latest', 'gemini-pro'). Defaults to 'gemini-1.5-flash-latest' if not provided. */
783
1430
  model?: string;
1431
+ /** Optional: Override the base URL for the Google Generative AI API. */
784
1432
  apiBaseUrl?: string;
1433
+ /** Optional: Specify the API version to use (e.g., 'v1beta'). Defaults to 'v1beta'. */
785
1434
  apiVersion?: string;
786
1435
  }
787
1436
  declare class GeminiAdapter implements ProviderAdapter {
788
1437
  readonly providerName = "gemini";
789
1438
  private apiKey;
790
- private model;
791
- private apiBaseUrl;
792
- private apiVersion;
1439
+ private defaultModel;
1440
+ private genAI;
1441
+ /**
1442
+ * Creates an instance of GeminiAdapter.
1443
+ * @param {GeminiAdapterOptions} options - Configuration options for the adapter.
1444
+ * @throws {Error} If `apiKey` is missing in the options.
1445
+ */
793
1446
  constructor(options: GeminiAdapterOptions);
794
1447
  /**
795
- * Calls the Google Generative AI API (Gemini).
796
- * Note: Assumes prompt is a string for basic user input.
797
- * Does not yet handle complex history or system prompts.
798
- * `onThought` is not implemented (requires streaming API).
799
- * @param prompt - Treated as the user message content.
800
- * @param options - Call options including LLM parameters.
801
- * @returns The content string from the API response.
1448
+ * Makes a call to the configured Gemini model.
1449
+ * Translates the `ArtStandardPrompt` into the Gemini API format, sends the request
1450
+ * using the `@google/genai` SDK, and yields `StreamEvent` objects representing
1451
+ * the response (tokens, metadata, errors, end signal).
1452
+ *
1453
+ * Handles both streaming and non-streaming requests based on `options.stream`.
1454
+ *
1455
+ * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
1456
+ * @param {CallOptions} options - Options for the LLM call, including streaming preference, model override, and execution context.
1457
+ * @returns {Promise<AsyncIterable<StreamEvent>>} An async iterable that yields `StreamEvent` objects.
1458
+ * - `TOKEN`: Contains a chunk of the response text. `tokenType` indicates if it's part of agent thought or final synthesis.
1459
+ * - `METADATA`: Contains information like stop reason, token counts, and timing, yielded once at the end.
1460
+ * - `ERROR`: Contains any error encountered during translation, SDK call, or response processing.
1461
+ * - `END`: Signals the completion of the stream.
1462
+ * @see {ArtStandardPrompt}
1463
+ * @see {CallOptions}
1464
+ * @see {StreamEvent}
1465
+ * @see {LLMMetadata}
1466
+ */
1467
+ call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
1468
+ /**
1469
+ * Translates the provider-agnostic `ArtStandardPrompt` into the Gemini API's `Content[]` format.
1470
+ *
1471
+ * Key translations:
1472
+ * - `system` role: Merged into the first `user` message.
1473
+ * - `user` role: Maps to Gemini's `user` role.
1474
+ * - `assistant` role: Maps to Gemini's `model` role. Handles text content and `tool_calls` (mapped to `functionCall`).
1475
+ * - `tool_result` role: Maps to Gemini's `user` role with a `functionResponse` part.
1476
+ * - `tool_request` role: Skipped (implicitly handled by `assistant`'s `tool_calls`).
1477
+ *
1478
+ * Adds validation to ensure the conversation doesn't start with a 'model' role.
1479
+ *
1480
+ * @private
1481
+ * @param {ArtStandardPrompt} artPrompt - The input `ArtStandardPrompt` array.
1482
+ * @returns {Content[]} The `Content[]` array formatted for the Gemini API.
1483
+ * @throws {ARTError} If translation encounters an issue, such as a `tool_result` missing required fields (ErrorCode.PROMPT_TRANSLATION_FAILED).
1484
+ * @see https://ai.google.dev/api/rest/v1beta/Content
802
1485
  */
803
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
1486
+ private translateToGemini;
804
1487
  }
805
1488
 
1489
+ /**
1490
+ * Configuration options required for the `OpenAIAdapter`.
1491
+ */
806
1492
  interface OpenAIAdapterOptions {
1493
+ /** Your OpenAI API key. Handle securely. */
807
1494
  apiKey: string;
1495
+ /** The default OpenAI model ID to use (e.g., 'gpt-4o', 'gpt-4o-mini'). Defaults to 'gpt-3.5-turbo' if not provided. */
808
1496
  model?: string;
1497
+ /** Optional: Override the base URL for the OpenAI API (e.g., for Azure OpenAI or custom proxies). */
809
1498
  apiBaseUrl?: string;
810
1499
  }
1500
+ /**
1501
+ * Implements the `ProviderAdapter` interface for interacting with OpenAI's
1502
+ * Chat Completions API (compatible models like GPT-3.5, GPT-4, GPT-4o).
1503
+ *
1504
+ * Handles formatting requests and parsing responses for OpenAI.
1505
+ * Uses raw `fetch` for now.
1506
+ *
1507
+ * @implements {ProviderAdapter}
1508
+ */
811
1509
  declare class OpenAIAdapter implements ProviderAdapter {
812
1510
  readonly providerName = "openai";
813
1511
  private apiKey;
814
1512
  private model;
815
1513
  private apiBaseUrl;
1514
+ /**
1515
+ * Creates an instance of the OpenAIAdapter.
1516
+ * @param options - Configuration options including the API key and optional model/baseURL overrides.
1517
+ * @throws {Error} If the API key is missing.
1518
+ */
816
1519
  constructor(options: OpenAIAdapterOptions);
817
1520
  /**
818
- * Calls the OpenAI Chat Completions API.
819
- * Note: This basic implementation assumes the FormattedPrompt is a string
820
- * representing the user's message or a pre-formatted structure.
821
- * It doesn't yet handle complex history formatting or system prompts
822
- * directly from the FormattedPrompt type itself.
823
- * The `onThought` callback is not implemented in this non-streaming version.
824
- * @param prompt - For this basic version, treated as the primary user message content.
825
- * A more robust version would parse a structured prompt object.
826
- * @param options - Call options, including threadId, traceId, and LLM parameters.
827
- * @returns The content string from the API response.
1521
+ * Sends a request to the OpenAI Chat Completions API.
1522
+ * Translates `ArtStandardPrompt` to the OpenAI format, handles streaming and non-streaming responses.
1523
+ *
1524
+ * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
1525
+ * @param {CallOptions} options - Call options, including `threadId`, `traceId`, `stream` preference, and any OpenAI-specific parameters (like `temperature`, `max_tokens`) passed through.
1526
+ * @returns {Promise<AsyncIterable<StreamEvent>>} A promise resolving to an AsyncIterable of StreamEvent objects.
1527
+ */
1528
+ call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
1529
+ /**
1530
+ * Processes the Server-Sent Events (SSE) stream from OpenAI.
1531
+ * @param stream - The ReadableStream from the fetch response.
1532
+ * @param options - The original CallOptions containing threadId, traceId, sessionId, and callContext.
1533
+ * @returns An AsyncIterable yielding StreamEvent objects.
1534
+ */
1535
+ private processStream;
1536
+ /**
1537
+ * Translates the provider-agnostic `ArtStandardPrompt` into the OpenAI API's `OpenAIMessage[]` format.
1538
+ *
1539
+ * @private
1540
+ * @param {ArtStandardPrompt} artPrompt - The input `ArtStandardPrompt` array.
1541
+ * @returns {OpenAIMessage[]} The `OpenAIMessage[]` array formatted for the OpenAI API.
1542
+ * @throws {ARTError} If translation encounters an issue (ErrorCode.PROMPT_TRANSLATION_FAILED).
828
1543
  */
829
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
1544
+ private translateToOpenAI;
830
1545
  }
831
1546
 
1547
+ /**
1548
+ * Configuration options required for the `AnthropicAdapter`.
1549
+ */
832
1550
  interface AnthropicAdapterOptions {
1551
+ /** Your Anthropic API key. Handle securely. */
833
1552
  apiKey: string;
1553
+ /** The default Anthropic model ID to use (e.g., 'claude-3-opus-20240229', 'claude-3-5-sonnet-20240620'). Defaults to 'claude-3-haiku-20240307' if not provided. */
834
1554
  model?: string;
1555
+ /** Optional: The Anthropic API version to target (e.g., '2023-06-01'). Defaults to '2023-06-01'. */
835
1556
  apiVersion?: string;
1557
+ /** Optional: Override the base URL for the Anthropic API. */
836
1558
  apiBaseUrl?: string;
837
1559
  }
1560
+ /**
1561
+ * Implements the `ProviderAdapter` interface for interacting with Anthropic's
1562
+ * Messages API (Claude models).
1563
+ *
1564
+ * Handles formatting requests and parsing responses for Anthropic.
1565
+ * Note: Streaming is **not yet implemented** for this adapter. Calls requesting streaming will yield an error and end.
1566
+ *
1567
+ * @implements {ProviderAdapter}
1568
+ */
838
1569
  declare class AnthropicAdapter implements ProviderAdapter {
839
1570
  readonly providerName = "anthropic";
840
1571
  private apiKey;
@@ -842,26 +1573,65 @@ declare class AnthropicAdapter implements ProviderAdapter {
842
1573
  private apiVersion;
843
1574
  private apiBaseUrl;
844
1575
  private defaultMaxTokens;
1576
+ /**
1577
+ * Creates an instance of the AnthropicAdapter.
1578
+ * @param options - Configuration options including the API key and optional model/apiVersion/baseURL overrides.
1579
+ * @throws {Error} If the API key is missing.
1580
+ */
845
1581
  constructor(options: AnthropicAdapterOptions);
846
1582
  /**
847
- * Calls the Anthropic Messages API.
848
- * Note: Assumes prompt is a string for basic user input.
849
- * Does not yet handle complex history or system prompts robustly.
850
- * `onThought` is not implemented (requires streaming API).
851
- * @param prompt - Treated as the user message content.
852
- * @param options - Call options including LLM parameters. Requires max_tokens/maxOutputTokens.
853
- * @returns The content string from the API response.
1583
+ * Sends a request to the Anthropic Messages API.
1584
+ * Translates `ArtStandardPrompt` to the Anthropic format.
1585
+ *
1586
+ * **Note:** Streaming is **not yet implemented**.
1587
+ *
1588
+ * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
1589
+ * @param {CallOptions} options - Call options, including `threadId`, `traceId`, `stream`, and any Anthropic-specific generation parameters.
1590
+ * @returns {Promise<AsyncIterable<StreamEvent>>} A promise resolving to an AsyncIterable of StreamEvent objects. If streaming is requested, it yields an error event and ends.
1591
+ * @throws {ARTError} If `max_tokens` is missing in options (required by Anthropic).
1592
+ */
1593
+ call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
1594
+ /**
1595
+ * Translates the provider-agnostic `ArtStandardPrompt` into the Anthropic Messages API format.
1596
+ *
1597
+ * @private
1598
+ * @param {ArtStandardPrompt} artPrompt - The input `ArtStandardPrompt` array.
1599
+ * @returns {{ systemPrompt?: string; messages: AnthropicMessage[] }} The system prompt string and the `AnthropicMessage[]` array.
1600
+ * @throws {ARTError} If translation encounters an issue (ErrorCode.PROMPT_TRANSLATION_FAILED).
854
1601
  */
855
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
1602
+ private translateToAnthropic;
1603
+ /**
1604
+ * Helper to map ArtStandardMessage content/tool_calls to Anthropic Content Blocks.
1605
+ * @private
1606
+ */
1607
+ private mapArtContentToAnthropicBlocks;
856
1608
  }
857
1609
 
1610
+ /**
1611
+ * Configuration options required for the `OpenRouterAdapter`.
1612
+ */
858
1613
  interface OpenRouterAdapterOptions {
1614
+ /** Your OpenRouter API key. Handle securely. */
859
1615
  apiKey: string;
1616
+ /** The required OpenRouter model identifier string (e.g., 'google/gemini-pro', 'anthropic/claude-3-haiku', 'openai/gpt-4o'). This specifies which underlying model OpenRouter should use. */
860
1617
  model: string;
1618
+ /** Optional: Override the base URL for the OpenRouter API. Defaults to 'https://openrouter.ai/api/v1'. */
861
1619
  apiBaseUrl?: string;
1620
+ /** Optional: Your application's site URL, sent as the 'HTTP-Referer' header (recommended by OpenRouter). */
862
1621
  siteUrl?: string;
1622
+ /** Optional: Your application's name, sent as the 'X-Title' header (recommended by OpenRouter). */
863
1623
  appName?: string;
864
1624
  }
1625
+ /**
1626
+ * Implements the `ProviderAdapter` interface for interacting with the OpenRouter API,
1627
+ * which provides access to various LLMs through an OpenAI-compatible interface.
1628
+ *
1629
+ * Handles formatting requests and parsing responses for OpenRouter's chat completions endpoint.
1630
+ * Handles formatting requests and parsing responses for OpenRouter's chat completions endpoint.
1631
+ * Note: Streaming is **not yet implemented** for this adapter. Calls requesting streaming will yield an error and end.
1632
+ *
1633
+ * @implements {ProviderAdapter}
1634
+ */
865
1635
  declare class OpenRouterAdapter implements ProviderAdapter {
866
1636
  readonly providerName = "openrouter";
867
1637
  private apiKey;
@@ -869,69 +1639,139 @@ declare class OpenRouterAdapter implements ProviderAdapter {
869
1639
  private apiBaseUrl;
870
1640
  private siteUrl?;
871
1641
  private appName?;
1642
+ /**
1643
+ * Creates an instance of the OpenRouterAdapter.
1644
+ * @param options - Configuration options including the API key, the specific OpenRouter model identifier, and optional headers/baseURL.
1645
+ * @throws {Error} If the API key or model identifier is missing.
1646
+ */
872
1647
  constructor(options: OpenRouterAdapterOptions);
873
1648
  /**
874
- * Calls the OpenRouter Chat Completions API (OpenAI compatible).
875
- * Note: Assumes prompt is a string for basic user input.
876
- * Does not yet handle complex history or system prompts robustly.
877
- * `onThought` is not implemented (requires streaming API).
878
- * @param prompt - Treated as the user message content.
879
- * @param options - Call options including LLM parameters.
880
- * @returns The content string from the API response.
1649
+ * Sends a request to the OpenRouter Chat Completions API endpoint.
1650
+ * Translates `ArtStandardPrompt` to the OpenAI-compatible format.
1651
+ *
1652
+ * **Note:** Streaming is **not yet implemented**.
1653
+ *
1654
+ * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
1655
+ * @param {CallOptions} options - Call options, including `threadId`, `traceId`, `stream`, and any OpenAI-compatible generation parameters.
1656
+ * @returns {Promise<AsyncIterable<StreamEvent>>} A promise resolving to an AsyncIterable of StreamEvent objects. If streaming is requested, it yields an error event and ends.
1657
+ */
1658
+ call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
1659
+ /**
1660
+ * Translates the provider-agnostic `ArtStandardPrompt` into the OpenAI API's `OpenAIMessage[]` format.
1661
+ * (Copied from OpenAIAdapter - assumes OpenRouter compatibility)
1662
+ *
1663
+ * @private
1664
+ * @param {ArtStandardPrompt} artPrompt - The input `ArtStandardPrompt` array.
1665
+ * @returns {OpenAIMessage[]} The `OpenAIMessage[]` array formatted for the OpenAI API.
1666
+ * @throws {ARTError} If translation encounters an issue (ErrorCode.PROMPT_TRANSLATION_FAILED).
881
1667
  */
882
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
1668
+ private translateToOpenAI;
883
1669
  }
884
1670
 
1671
+ /**
1672
+ * Configuration options required for the `DeepSeekAdapter`.
1673
+ */
885
1674
  interface DeepSeekAdapterOptions {
1675
+ /** Your DeepSeek API key. Handle securely. */
886
1676
  apiKey: string;
1677
+ /** The default DeepSeek model ID to use (e.g., 'deepseek-chat', 'deepseek-coder'). Defaults to 'deepseek-chat' if not provided. */
887
1678
  model?: string;
1679
+ /** Optional: Override the base URL for the DeepSeek API. Defaults to 'https://api.deepseek.com/v1'. */
888
1680
  apiBaseUrl?: string;
889
1681
  }
1682
+ /**
1683
+ * Implements the `ProviderAdapter` interface for interacting with the DeepSeek API,
1684
+ * which uses an OpenAI-compatible Chat Completions endpoint.
1685
+ *
1686
+ * Handles formatting requests and parsing responses for DeepSeek models.
1687
+ * Note: Streaming is **not yet implemented** for this adapter. Calls requesting streaming will yield an error and end.
1688
+ *
1689
+ * @implements {ProviderAdapter}
1690
+ */
890
1691
  declare class DeepSeekAdapter implements ProviderAdapter {
891
1692
  readonly providerName = "deepseek";
892
1693
  private apiKey;
893
1694
  private model;
894
1695
  private apiBaseUrl;
1696
+ /**
1697
+ * Creates an instance of the DeepSeekAdapter.
1698
+ * @param options - Configuration options including the API key and optional model/baseURL overrides.
1699
+ * @throws {Error} If the API key is missing.
1700
+ */
895
1701
  constructor(options: DeepSeekAdapterOptions);
896
1702
  /**
897
- * Calls the DeepSeek Chat Completions API (OpenAI compatible).
898
- * Note: Assumes prompt is a string for basic user input.
899
- * Does not yet handle complex history or system prompts robustly.
900
- * `onThought` is not implemented (requires streaming API).
901
- * @param prompt - Treated as the user message content.
902
- * @param options - Call options including LLM parameters.
903
- * @returns The content string from the API response.
1703
+ * Sends a request to the DeepSeek Chat Completions API endpoint.
1704
+ * Translates `ArtStandardPrompt` to the OpenAI-compatible format.
1705
+ *
1706
+ * **Note:** Streaming is **not yet implemented**.
1707
+ *
1708
+ * @param {ArtStandardPrompt} prompt - The standardized prompt messages.
1709
+ * @param {CallOptions} options - Call options, including `threadId`, `traceId`, `stream`, and any OpenAI-compatible generation parameters.
1710
+ * @returns {Promise<AsyncIterable<StreamEvent>>} A promise resolving to an AsyncIterable of StreamEvent objects. If streaming is requested, it yields an error event and ends.
1711
+ */
1712
+ call(prompt: ArtStandardPrompt, options: CallOptions): Promise<AsyncIterable<StreamEvent>>;
1713
+ /**
1714
+ * Translates the provider-agnostic `ArtStandardPrompt` into the OpenAI API's `OpenAIMessage[]` format.
1715
+ * (Copied from OpenAIAdapter - assumes DeepSeek compatibility)
1716
+ *
1717
+ * @private
1718
+ * @param {ArtStandardPrompt} artPrompt - The input `ArtStandardPrompt` array.
1719
+ * @returns {OpenAIMessage[]} The `OpenAIMessage[]` array formatted for the OpenAI API.
1720
+ * @throws {ARTError} If translation encounters an issue (ErrorCode.PROMPT_TRANSLATION_FAILED).
904
1721
  */
905
- call(prompt: FormattedPrompt, options: CallOptions): Promise<string>;
1722
+ private translateToOpenAI;
906
1723
  }
907
1724
 
1725
+ /**
1726
+ * An ART Framework tool that safely evaluates mathematical expressions using the mathjs library.
1727
+ * It supports basic arithmetic, variables via a scope, complex numbers, and a predefined list of safe functions.
1728
+ *
1729
+ * @implements {IToolExecutor}
1730
+ */
908
1731
  declare class CalculatorTool implements IToolExecutor {
1732
+ /** The unique name identifier for this tool. */
909
1733
  static readonly toolName = "calculator";
1734
+ /** Store for previous calculation results by threadId */
1735
+ private resultStore;
1736
+ /**
1737
+ * The schema definition for the CalculatorTool, conforming to the `ToolSchema` interface.
1738
+ * It defines the tool's name, description, input parameters (expression and optional scope),
1739
+ * and provides examples for the LLM.
1740
+ */
910
1741
  readonly schema: ToolSchema;
1742
+ /**
1743
+ * Executes the calculator tool by evaluating the provided mathematical expression.
1744
+ * It uses a restricted scope including only allowed mathjs functions and any variables
1745
+ * passed in the `input.scope`. Handles basic number and complex number results.
1746
+ *
1747
+ * @param input - An object containing the `expression` (string) and optional `scope` (object). Must match `inputSchema`.
1748
+ * @param context - The execution context containing `threadId`, `traceId`, etc.
1749
+ * @returns A promise resolving to a `ToolResult` object.
1750
+ * On success, `status` is 'success' and `output` is `{ result: number | string }`.
1751
+ * On failure, `status` is 'error' and `error` contains the error message.
1752
+ */
911
1753
  execute(input: any, context: ExecutionContext): Promise<ToolResult>;
912
1754
  }
913
1755
 
914
- declare enum LogLevel {
915
- DEBUG = 0,
916
- INFO = 1,
917
- WARN = 2,
918
- ERROR = 3
919
- }
920
- interface LoggerConfig {
921
- level: LogLevel;
922
- prefix?: string;
923
- }
924
- declare class Logger {
925
- private static config;
926
- static configure(config: Partial<LoggerConfig>): void;
927
- static debug(message: string, ...args: any[]): void;
928
- static info(message: string, ...args: any[]): void;
929
- static warn(message: string, ...args: any[]): void;
930
- static error(message: string, ...args: any[]): void;
931
- }
932
-
1756
+ /**
1757
+ * Generates a unique Version 4 UUID (Universally Unique Identifier) string.
1758
+ * Uses the underlying 'uuid' library's v4 implementation.
1759
+ * @returns A randomly generated UUID string (e.g., "f47ac10b-58cc-4372-a567-0e02b2c3d479").
1760
+ */
933
1761
  declare const generateUUID: () => string;
934
1762
 
1763
+ /**
1764
+ * Main entry point for the ART Framework library.
1765
+ * This file exports the primary factory function (`createArtInstance`),
1766
+ * core components, adapters, types, interfaces, and utilities needed
1767
+ * to build and run ART agents.
1768
+ */
1769
+ /**
1770
+ * The main function to create and initialize an ART instance.
1771
+ * @see {@link ./core/agent-factory.ts} for implementation details.
1772
+ */
1773
+
1774
+ /** The current version of the ART Framework package. */
935
1775
  declare const VERSION = "0.2.4";
936
1776
 
937
- export { type AgentFinalResponse, type AgentOptions, type AgentProps, type AgentState, AnthropicAdapter, type ArtInstance, CalculatorTool, type CallOptions, type ConversationManager, type ConversationMessage, type ConversationSocket, DeepSeekAdapter, type ExecutionContext, type ExecutionMetadata, type FilterOptions, type FormattedPrompt, GeminiAdapter, type IAgentCore, type IConversationRepository, type IObservationRepository, type IStateRepository, type IToolExecutor, InMemoryStorageAdapter, IndexedDBStorageAdapter, type JsonObjectSchema, type JsonSchema, LogLevel, Logger, type MessageOptions, MessageRole, type Observation, type ObservationFilter, type ObservationManager, type ObservationSocket, ObservationType, OpenAIAdapter, OpenRouterAdapter, type OutputParser, PESAgent, type ParsedToolCall, type PromptManager, type ProviderAdapter, type ReasoningEngine, type StateManager, type StorageAdapter, type ThreadConfig, type ThreadContext, type ToolRegistry, type ToolResult, type ToolSchema, type ToolSystem, type TypedSocket, type UISystem, VERSION, createArtInstance, generateUUID };
1777
+ export { type AgentFinalResponse, type AgentOptions, type AgentProps, type AgentState, AnthropicAdapter, type ArtInstance, type ArtStandardMessage, type ArtStandardMessageRole, type ArtStandardPrompt, type AvailableProviderEntry, CalculatorTool, type CallOptions, type ConversationManager, type ConversationMessage, type ConversationSocket, DeepSeekAdapter, type ExecutionContext, type ExecutionMetadata, type FilterOptions, type FormattedPrompt, GeminiAdapter, type IAgentCore, type IConversationRepository, type IObservationRepository, type IProviderManager, type IStateRepository, type IToolExecutor, InMemoryStorageAdapter, IndexedDBStorageAdapter, type JsonObjectSchema, type JsonSchema, type LLMMetadata, LogLevel, Logger, type ManagedAdapterAccessor, type MessageOptions, MessageRole, ModelCapability, type Observation, type ObservationFilter, type ObservationManager, type ObservationSocket, ObservationType, OpenAIAdapter, OpenRouterAdapter, type OutputParser, PESAgent, type ParsedToolCall, type PromptContext, type PromptManager, type ProviderAdapter, type ProviderManagerConfig, type ReasoningEngine, type RuntimeProviderConfig, type StateManager, type StorageAdapter, type StreamEvent, type ThreadConfig, type ThreadContext, type ToolRegistry, type ToolResult, type ToolSchema, type ToolSystem, type TypedSocket, type UISystem, VERSION, createArtInstance, generateUUID };