@smythos/sre 1.7.42 → 1.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG +448 -66
  2. package/dist/index.js +65 -50
  3. package/dist/index.js.map +1 -1
  4. package/dist/types/Components/Async.class.d.ts +11 -5
  5. package/dist/types/index.d.ts +2 -0
  6. package/dist/types/subsystems/AgentManager/AgentData.service/connectors/SQLiteAgentDataConnector.class.d.ts +45 -0
  7. package/dist/types/subsystems/LLMManager/LLM.helper.d.ts +32 -1
  8. package/dist/types/subsystems/LLMManager/LLM.inference.d.ts +25 -2
  9. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.d.ts +22 -2
  10. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.d.ts +2 -2
  11. package/dist/types/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.d.ts +27 -2
  12. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Groq.class.d.ts +22 -2
  13. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Ollama.class.d.ts +22 -2
  14. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.d.ts +3 -3
  15. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +23 -3
  16. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.d.ts +2 -2
  17. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.d.ts +2 -2
  18. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +2 -2
  19. package/dist/types/subsystems/LLMManager/LLM.service/connectors/xAI.class.d.ts +3 -3
  20. package/dist/types/subsystems/MemoryManager/LLMContext.d.ts +10 -3
  21. package/dist/types/subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.class.d.ts +24 -0
  22. package/dist/types/subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.redaction.helper.d.ts +49 -0
  23. package/dist/types/types/LLM.types.d.ts +30 -1
  24. package/package.json +4 -3
  25. package/src/Components/APICall/OAuth.helper.ts +16 -1
  26. package/src/Components/APIEndpoint.class.ts +11 -4
  27. package/src/Components/Async.class.ts +38 -5
  28. package/src/Components/GenAILLM.class.ts +13 -7
  29. package/src/Components/ImageGenerator.class.ts +32 -13
  30. package/src/Components/LLMAssistant.class.ts +3 -1
  31. package/src/Components/LogicAND.class.ts +13 -0
  32. package/src/Components/LogicAtLeast.class.ts +18 -0
  33. package/src/Components/LogicAtMost.class.ts +19 -0
  34. package/src/Components/LogicOR.class.ts +12 -2
  35. package/src/Components/LogicXOR.class.ts +11 -0
  36. package/src/constants.ts +1 -1
  37. package/src/helpers/Conversation.helper.ts +10 -8
  38. package/src/index.ts +2 -0
  39. package/src/index.ts.bak +2 -0
  40. package/src/subsystems/AgentManager/AgentData.service/connectors/SQLiteAgentDataConnector.class.ts +190 -0
  41. package/src/subsystems/AgentManager/AgentData.service/index.ts +2 -0
  42. package/src/subsystems/LLMManager/LLM.helper.ts +117 -1
  43. package/src/subsystems/LLMManager/LLM.inference.ts +136 -67
  44. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +22 -6
  45. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +157 -33
  46. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +9 -8
  47. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +124 -90
  48. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +125 -62
  49. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +168 -76
  50. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +18 -8
  51. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +8 -4
  52. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +50 -8
  53. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +30 -16
  54. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +2 -2
  55. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +29 -15
  56. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +10 -8
  57. package/src/subsystems/MemoryManager/LLMContext.ts +27 -8
  58. package/src/subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.class.ts +313 -85
  59. package/src/subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.redaction.helper.ts +203 -0
  60. package/src/types/LLM.types.ts +31 -1
  61. package/src/types/node-sqlite.d.ts +45 -0
@@ -1,17 +1,23 @@
1
1
  import { IAgent as Agent } from '@sre/types/Agent.types';
2
2
  import { Component } from './Component.class';
3
+ interface AsyncResult {
4
+ JobID: string;
5
+ _debug?: string;
6
+ [key: string]: any;
7
+ }
8
+ interface AsyncErrorResult {
9
+ _error: string;
10
+ _debug: string;
11
+ }
3
12
  export declare class Async extends Component {
4
13
  static JOBS: {};
5
14
  protected configSchema: any;
6
15
  static ForkedAgent: any;
7
16
  constructor();
8
17
  init(): void;
9
- process(input: any, config: any, agent: Agent): Promise<{
10
- JobID: any;
11
- } | {
12
- JobID?: undefined;
13
- }>;
18
+ process(input: any, config: any, agent: Agent): Promise<AsyncResult | AsyncErrorResult>;
14
19
  private cleanJobIDBranch;
15
20
  private removeOrphanedBranches;
16
21
  private removeComponent;
17
22
  }
23
+ export {};
@@ -161,6 +161,7 @@ export * from './subsystems/Security/Vault.service/VaultConnector';
161
161
  export * from './subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class';
162
162
  export * from './subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class';
163
163
  export * from './subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class';
164
+ export * from './subsystems/AgentManager/AgentData.service/connectors/SQLiteAgentDataConnector.class';
164
165
  export * from './subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class';
165
166
  export * from './subsystems/AgentManager/Scheduler.service/connectors/LocalScheduler.class';
166
167
  export * from './subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class';
@@ -205,6 +206,7 @@ export * from './subsystems/Security/Vault.service/connectors/SecretsManager.cla
205
206
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class';
206
207
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/types';
207
208
  export * from './subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.class';
209
+ export * from './subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTel.redaction.helper';
208
210
  export * from './subsystems/ObservabilityManager/Telemetry.service/connectors/OTel/OTelContextRegistry';
209
211
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants';
210
212
  export * from './subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils';
@@ -0,0 +1,45 @@
1
+ import { AgentDataConnector } from '../AgentDataConnector';
2
+ export type SQLiteAgentDataSettings = {
3
+ databasePath: string;
4
+ tableName?: string;
5
+ };
6
+ type SQLiteRunResult = {
7
+ changes?: number;
8
+ lastInsertRowid?: number | bigint;
9
+ };
10
+ export interface SQLiteStatementAdapter<T = any> {
11
+ all(...params: any[]): T[];
12
+ get(...params: any[]): T | undefined;
13
+ run(...params: any[]): SQLiteRunResult;
14
+ }
15
+ export interface SQLiteDatabaseAdapter {
16
+ prepare<T = any>(sql: string): SQLiteStatementAdapter<T>;
17
+ exec(sql: string): void;
18
+ close(): void;
19
+ }
20
+ export type SQLiteAdapterFactory = (databasePath: string) => SQLiteDatabaseAdapter;
21
+ export declare class SQLiteAgentDataConnector extends AgentDataConnector {
22
+ protected _settings: SQLiteAgentDataSettings;
23
+ name: string;
24
+ private adapter;
25
+ private readonly tableName;
26
+ constructor(_settings: SQLiteAgentDataSettings);
27
+ getAgentConfig(agentId: string): Partial<SQLiteAgentDataSettings>;
28
+ start(): Promise<void>;
29
+ stop(): Promise<void>;
30
+ getAgentData(agentId: string, version?: string): Promise<{
31
+ data: any;
32
+ version: any;
33
+ }>;
34
+ getAgentIdByDomain(domain: string): Promise<string>;
35
+ getAgentSettings(agentId: string, version?: string): Promise<any>;
36
+ getAgentEmbodiments(agentId: string): Promise<any>;
37
+ listTeamAgents(teamId: string, deployedOnly?: boolean, includeData?: boolean): Promise<any[]>;
38
+ isDeployed(agentId: string): Promise<boolean>;
39
+ private ensureDatabaseDirectory;
40
+ private ensureSchema;
41
+ private validateTableName;
42
+ private prepareStatement;
43
+ private parseData;
44
+ }
45
+ export {};
@@ -1,4 +1,4 @@
1
- import { type TLLMMessageBlock } from '@sre/types/LLM.types';
1
+ import { type TLLMMessageBlock, TLLMFinishReason } from '@sre/types/LLM.types';
2
2
  export declare class LLMHelper {
3
3
  /**
4
4
  * Checks if the given array of messages contains a system message.
@@ -152,4 +152,35 @@ export declare class LLMHelper {
152
152
  * console.log(isClaude4); // false
153
153
  */
154
154
  static isClaude4Family(modelId: string): boolean;
155
+ /**
156
+ * Normalizes provider-specific finish reason values to TLLMFinishReason enum.
157
+ * Handles provider-specific values from OpenAI, Anthropic, Google AI, and other providers.
158
+ *
159
+ * @param finishReason - The finish reason from the provider (can be string, null, or undefined)
160
+ * @returns Normalized TLLMFinishReason enum value
161
+ *
162
+ * @example
163
+ * const normalized = LLMHelper.normalizeFinishReason('end_turn');
164
+ * console.log(normalized); // TLLMFinishReason.Stop
165
+ *
166
+ * @example
167
+ * const normalized = LLMHelper.normalizeFinishReason('tool_use');
168
+ * console.log(normalized); // TLLMFinishReason.ToolCalls
169
+ *
170
+ * @example
171
+ * const normalized = LLMHelper.normalizeFinishReason('SAFETY');
172
+ * console.log(normalized); // TLLMFinishReason.ContentFilter
173
+ */
174
+ static normalizeFinishReason(finishReason: string | null | undefined): TLLMFinishReason;
175
+ /**
176
+ * Gets a user-friendly error message based on the finish reason.
177
+ *
178
+ * @param finishReason - The normalized finish reason enum value
179
+ * @returns User-friendly error message explaining why the response was interrupted
180
+ *
181
+ * @example
182
+ * const message = LLMHelper.getFinishReasonErrorMessage(TLLMFinishReason.Length);
183
+ * console.log(message); // "Empty response. This is usually due to output token limit reached..."
184
+ */
185
+ static getFinishReasonErrorMessage(finishReason: TLLMFinishReason): string;
155
186
  }
@@ -61,14 +61,37 @@ export declare class LLMInference {
61
61
  private getSafeFallbackParams;
62
62
  /**
63
63
  * Executes fallback logic for custom models when the primary model fails.
64
- * This method checks if a fallback model is configured and invokes the appropriate LLM method.
64
+ * Checks if a fallback model is configured and switches to it.
65
65
  * Prevents infinite loops by passing a flag to indicate we're in a fallback attempt.
66
66
  *
67
+ * **Important**: This method should only be called for custom models (already verified by caller).
68
+ *
67
69
  * @param methodName - The name of the method being called ('prompt' or 'promptStream')
68
70
  * @param args - The original arguments passed to the method
69
- * @returns The result from the fallback execution, or null if fallback should not be attempted
71
+ * @returns The result from the fallback execution, or null if no fallback is configured
70
72
  */
71
73
  private executeFallback;
74
+ /**
75
+ * Wraps an emitter with fallback capability using a proxy pattern.
76
+ * This creates a transparent proxy that forwards all events from the source emitter.
77
+ * On error, it attempts to switch to a fallback model and seamlessly redirects events.
78
+ *
79
+ * **Important**: This method is only called for custom models that have fallback configured.
80
+ * Regular models return their emitters directly without wrapping, so errors flow naturally.
81
+ *
82
+ * **Design Pattern**: Proxy/Decorator with listener-based event forwarding
83
+ * **Coupling**: Minimal - reads event types from TLLMEvent enum (single source of truth)
84
+ * **Reliability**: Uses listeners (not emit interception) to avoid timing issues with async emits
85
+ *
86
+ * Note: We use the TLLMEvent enum as the source of truth for all event types.
87
+ * This provides a good balance between decoupling and reliability. The enum already
88
+ * defines all possible LLM events, and connectors emit these standard events.
89
+ *
90
+ * @param sourceEmitter - The custom model's event emitter
91
+ * @param args - The original prompt arguments for fallback execution
92
+ * @returns A proxy emitter that transparently handles primary/fallback switching
93
+ */
94
+ private wrapWithFallback;
72
95
  imageGenRequest({ query, files, params }: TPromptParams): Promise<any>;
73
96
  imageEditRequest({ query, files, params }: TPromptParams): Promise<any>;
74
97
  streamRequest(params: any, agent: string | IAgent): Promise<any>;
@@ -6,8 +6,28 @@ export declare class AnthropicConnector extends LLMConnector {
6
6
  name: string;
7
7
  private validImageMimeTypes;
8
8
  private getClient;
9
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
10
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
9
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
10
+ /**
11
+ * Stream request implementation.
12
+ *
13
+ * **Error Handling Pattern:**
14
+ * - Always returns emitters, never throws errors - ensures consistent error handling
15
+ * - Uses setImmediate for event emission - prevents race conditions where events fire before listeners attach
16
+ * - Emits End after terminal events (Error, Abort) - ensures cleanup code always runs
17
+ *
18
+ * **Why setImmediate?**
19
+ * Since streamRequest is async, callers must await to get the emitter, creating a timing gap.
20
+ * setImmediate defers event emission to the next event loop tick, ensuring events fire AFTER
21
+ * listeners are attached. This prevents race conditions where synchronous event emission
22
+ * would occur before listeners can be registered.
23
+ *
24
+ * @param acRequest - Access request for authorization
25
+ * @param body - Request body parameters
26
+ * @param context - LLM request context
27
+ * @param abortSignal - AbortSignal for cancellation
28
+ * @returns EventEmitter that emits TLLMEvent events (Data, Content, Error, Abort, End, etc.)
29
+ */
30
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
11
31
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<TAnthropicRequestBody>;
12
32
  protected reportUsage(usage: Anthropic.Messages.Usage & {
13
33
  cache_creation_input_tokens?: number;
@@ -5,8 +5,8 @@ import { LLMConnector } from '../LLMConnector';
5
5
  export declare class BedrockConnector extends LLMConnector {
6
6
  name: string;
7
7
  private getClient;
8
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
9
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
8
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
9
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
10
10
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<ConverseCommandInput>;
11
11
  protected reportUsage(usage: TokenUsage & {
12
12
  cacheReadInputTokenCount: number;
@@ -10,8 +10,28 @@ export declare class GoogleAIConnector extends LLMConnector {
10
10
  name: string;
11
11
  private validMimeTypes;
12
12
  private getClient;
13
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
14
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
13
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
14
+ /**
15
+ * Stream request implementation.
16
+ *
17
+ * **Error Handling Pattern:**
18
+ * - Always returns emitters, never throws errors - ensures consistent error handling
19
+ * - Uses setImmediate for event emission - prevents race conditions where events fire before listeners attach
20
+ * - Emits End after terminal events (Error, Abort) - ensures cleanup code always runs
21
+ *
22
+ * **Why setImmediate?**
23
+ * Since streamRequest is async, callers must await to get the emitter, creating a timing gap.
24
+ * setImmediate defers event emission to the next event loop tick, ensuring events fire AFTER
25
+ * listeners are attached. This prevents race conditions where synchronous event emission
26
+ * would occur before listeners can be registered.
27
+ *
28
+ * @param acRequest - Access request for authorization
29
+ * @param body - Request body parameters
30
+ * @param context - LLM request context
31
+ * @param abortSignal - AbortSignal for cancellation
32
+ * @returns EventEmitter that emits TLLMEvent events (Data, Content, Error, Abort, End, etc.)
33
+ */
34
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
15
35
  protected imageGenRequest({ body, context }: ILLMRequestFuncParams): Promise<any>;
16
36
  protected imageEditRequest({ body, context }: ILLMRequestFuncParams): Promise<any>;
17
37
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<TGoogleAIRequestBody>;
@@ -68,6 +88,11 @@ export declare class GoogleAIConnector extends LLMConnector {
68
88
  toolsData: ToolData[];
69
89
  }): TLLMToolResultMessageBlock[];
70
90
  getConsistentMessages(messages: TLLMMessageBlock[]): TLLMMessageBlock[];
91
+ /**
92
+ * Extracts text content from a message block, handling multiple formats (.parts, .content as string/array)
93
+ * This ensures compatibility with messages that have been normalized by getConsistentMessages or come in various formats
94
+ */
95
+ private extractMessageContent;
71
96
  private prepareMessages;
72
97
  private prepareMessagesWithFiles;
73
98
  private prepareMessagesWithTools;
@@ -18,8 +18,28 @@ type ChatCompletionCreateParams = {
18
18
  export declare class GroqConnector extends LLMConnector {
19
19
  name: string;
20
20
  private getClient;
21
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
22
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
21
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
22
+ /**
23
+ * Stream request implementation.
24
+ *
25
+ * **Error Handling Pattern:**
26
+ * - Always returns emitters, never throws errors - ensures consistent error handling
27
+ * - Uses setImmediate for event emission - prevents race conditions where events fire before listeners attach
28
+ * - Emits End after terminal events (Error, Abort) - ensures cleanup code always runs
29
+ *
30
+ * **Why setImmediate?**
31
+ * Since streamRequest is async, callers must await to get the emitter, creating a timing gap.
32
+ * setImmediate defers event emission to the next event loop tick, ensuring events fire AFTER
33
+ * listeners are attached. This prevents race conditions where synchronous event emission
34
+ * would occur before listeners can be registered.
35
+ *
36
+ * @param acRequest - Access request for authorization
37
+ * @param body - Request body parameters
38
+ * @param context - LLM request context
39
+ * @param abortSignal - AbortSignal for cancellation
40
+ * @returns EventEmitter that emits TLLMEvent events (Data, Content, Error, Abort, End, etc.)
41
+ */
42
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
23
43
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<ChatCompletionCreateParams>;
24
44
  protected reportUsage(usage: Groq.Completions.CompletionUsage & {
25
45
  prompt_tokens_details?: {
@@ -4,8 +4,28 @@ import { LLMConnector } from '../LLMConnector';
4
4
  export declare class OllamaConnector extends LLMConnector {
5
5
  name: string;
6
6
  private getClient;
7
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
8
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
7
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
8
+ /**
9
+ * Stream request implementation.
10
+ *
11
+ * **Error Handling Pattern:**
12
+ * - Always returns emitters, never throws errors - ensures consistent error handling
13
+ * - Uses setImmediate for event emission - prevents race conditions where events fire before listeners attach
14
+ * - Emits End after terminal events (Error, Abort) - ensures cleanup code always runs
15
+ *
16
+ * **Why setImmediate?**
17
+ * Since streamRequest is async, callers must await to get the emitter, creating a timing gap.
18
+ * setImmediate defers event emission to the next event loop tick, ensuring events fire AFTER
19
+ * listeners are attached. This prevents race conditions where synchronous event emission
20
+ * would occur before listeners can be registered.
21
+ *
22
+ * @param acRequest - Access request for authorization
23
+ * @param body - Request body parameters
24
+ * @param context - LLM request context
25
+ * @param abortSignal - AbortSignal for cancellation
26
+ * @returns EventEmitter that emits TLLMEvent events (Data, Content, Error, Abort, End, etc.)
27
+ */
28
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
9
29
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<TLLMRequestBody>;
10
30
  protected reportUsage(usage: {
11
31
  prompt_tokens: number;
@@ -26,8 +26,8 @@ type TUsage = {
26
26
  export declare class PerplexityConnector extends LLMConnector {
27
27
  name: string;
28
28
  private getClient;
29
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
30
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
29
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
30
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
31
31
  protected reqBodyAdapter(params: TLLMPreparedParams): Promise<ChatCompletionParams>;
32
32
  protected reportUsage(usage: TUsage, metadata: {
33
33
  modelEntryName: string;
@@ -64,7 +64,7 @@ export declare class PerplexityConnector extends LLMConnector {
64
64
  role: TLLMMessageRole;
65
65
  content?: string | {
66
66
  text: string;
67
- }[] | Array<import("@anthropic-ai/sdk/resources/messages").TextBlockParam | import("@anthropic-ai/sdk/resources/messages").ImageBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolUseBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolResultBlockParam>;
67
+ }[] | Array<import("@anthropic-ai/sdk/resources").TextBlockParam | import("@anthropic-ai/sdk/resources").ImageBlockParam | import("@anthropic-ai/sdk/resources").ToolUseBlockParam | import("@anthropic-ai/sdk/resources").ToolResultBlockParam>;
68
68
  parts?: {
69
69
  text?: string;
70
70
  functionCall?: {
@@ -15,8 +15,28 @@ export declare class OpenAIConnector extends LLMConnector {
15
15
  */
16
16
  private getInterfaceType;
17
17
  protected getClient(context: ILLMRequestContext): Promise<OpenAI>;
18
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
19
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
18
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
19
+ /**
20
+ * Stream request implementation.
21
+ *
22
+ * **Error Handling Pattern:**
23
+ * - Always returns emitters, never throws errors - ensures consistent error handling
24
+ * - Uses setImmediate for event emission - prevents race conditions where events fire before listeners attach
25
+ * - Emits End after terminal events (Error, Abort) - ensures cleanup code always runs
26
+ *
27
+ * **Why setImmediate?**
28
+ * Since streamRequest is async, callers must await to get the emitter, creating a timing gap.
29
+ * setImmediate defers event emission to the next event loop tick, ensuring events fire AFTER
30
+ * listeners are attached. This prevents race conditions where synchronous event emission
31
+ * would occur before listeners can be registered.
32
+ *
33
+ * @param acRequest - Access request for authorization
34
+ * @param body - Request body parameters
35
+ * @param context - LLM request context
36
+ * @param abortSignal - AbortSignal for cancellation
37
+ * @returns EventEmitter that emits TLLMEvent events (Data, Content, Error, Abort, End, etc.)
38
+ */
39
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
20
40
  protected imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse>;
21
41
  protected imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse>;
22
42
  formatToolsConfig({ type, toolDefinitions, toolChoice, modelInfo }: {
@@ -39,7 +59,7 @@ export declare class OpenAIConnector extends LLMConnector {
39
59
  role: TLLMMessageRole;
40
60
  content?: string | {
41
61
  text: string;
42
- }[] | Array<import("@anthropic-ai/sdk/resources/messages").TextBlockParam | import("@anthropic-ai/sdk/resources/messages").ImageBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolUseBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolResultBlockParam>;
62
+ }[] | Array<import("@anthropic-ai/sdk/resources").TextBlockParam | import("@anthropic-ai/sdk/resources").ImageBlockParam | import("@anthropic-ai/sdk/resources").ToolUseBlockParam | import("@anthropic-ai/sdk/resources").ToolResultBlockParam>;
43
63
  parts?: {
44
64
  text?: string;
45
65
  functionCall?: {
@@ -17,8 +17,8 @@ export declare class ChatCompletionsApiInterface extends OpenAIApiInterface {
17
17
  private validImageMimeTypes;
18
18
  private validDocumentMimeTypes;
19
19
  constructor(context: ILLMRequestContext, deps: HandlerDependencies);
20
- createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion>;
21
- createStream(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>>;
20
+ createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<OpenAI.ChatCompletion>;
21
+ createStream(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>>;
22
22
  handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter;
23
23
  prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams>;
24
24
  /**
@@ -27,13 +27,13 @@ export declare abstract class OpenAIApiInterface {
27
27
  * @param body - The request body prepared for this API
28
28
  * @param context - The request context
29
29
  */
30
- abstract createRequest(body: any, context: ILLMRequestContext): Promise<any>;
30
+ abstract createRequest(body: any, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<any>;
31
31
  /**
32
32
  * Create a stream for this API interface
33
33
  * @param body - The request body prepared for this API
34
34
  * @param context - The request context
35
35
  */
36
- abstract createStream(body: any, context: ILLMRequestContext): Promise<any>;
36
+ abstract createStream(body: any, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<any>;
37
37
  /**
38
38
  * Handle the stream response from this API interface
39
39
  * @param stream - The stream returned from createStream
@@ -18,8 +18,8 @@ export declare class ResponsesApiInterface extends OpenAIApiInterface {
18
18
  private validImageMimeTypes;
19
19
  private validDocumentMimeTypes;
20
20
  constructor(context: ILLMRequestContext, deps: HandlerDependencies);
21
- createRequest(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext): Promise<OpenAI.Responses.Response>;
22
- createStream(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext): Promise<Stream<OpenAI.Responses.ResponseStreamEvent>>;
21
+ createRequest(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<OpenAI.Responses.Response>;
22
+ createStream(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext, abortSignal?: AbortSignal): Promise<Stream<OpenAI.Responses.ResponseStreamEvent>>;
23
23
  handleStream(stream: Stream<OpenAI.Responses.ResponseStreamEvent>, context: ILLMRequestContext): EventEmitter;
24
24
  /**
25
25
  * Process the responses API stream format
@@ -61,8 +61,8 @@ type TUsage = {
61
61
  export declare class xAIConnector extends LLMConnector {
62
62
  name: string;
63
63
  private getClient;
64
- protected request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
65
- protected streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter>;
64
+ protected request({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<TLLMChatResponse>;
65
+ protected streamRequest({ acRequest, body, context, abortSignal }: ILLMRequestFuncParams): Promise<EventEmitter>;
66
66
  protected reqBodyAdapter(params: TLLMParams): Promise<ChatCompletionParams>;
67
67
  protected reportUsage(usage: TUsage, metadata: {
68
68
  modelEntryName: string;
@@ -99,7 +99,7 @@ export declare class xAIConnector extends LLMConnector {
99
99
  role: TLLMMessageRole;
100
100
  content?: string | {
101
101
  text: string;
102
- }[] | Array<import("@anthropic-ai/sdk/resources/messages").TextBlockParam | import("@anthropic-ai/sdk/resources/messages").ImageBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolUseBlockParam | import("@anthropic-ai/sdk/resources/messages").ToolResultBlockParam>;
102
+ }[] | Array<import("@anthropic-ai/sdk/resources").TextBlockParam | import("@anthropic-ai/sdk/resources").ImageBlockParam | import("@anthropic-ai/sdk/resources").ToolUseBlockParam | import("@anthropic-ai/sdk/resources").ToolResultBlockParam>;
103
103
  parts?: {
104
104
  text?: string;
105
105
  functionCall?: {
@@ -5,6 +5,8 @@ export declare class LLMContext {
5
5
  private _systemPrompt;
6
6
  private _llmContextStore;
7
7
  private _llmCache;
8
+ /** Resolves when the context store has finished loading (if any). Safe to call before using addUserMessage, getContextWindow, or other context operations. */
9
+ private _readyPromise;
8
10
  get systemPrompt(): string;
9
11
  set systemPrompt(systemPrompt: string);
10
12
  get llmCache(): LLMCache;
@@ -17,9 +19,14 @@ export declare class LLMContext {
17
19
  * @param source a messages[] object, or smyth file system uri (smythfs://...)
18
20
  */
19
21
  constructor(llmInference: any, _systemPrompt?: string, llmContextStore?: ILLMContextStore);
22
+ /**
23
+ * Returns a promise that resolves when the context is ready (store loaded if present).
24
+ * Call before pushing or reading messages to avoid race conditions.
25
+ */
26
+ ready(): Promise<void>;
20
27
  private push;
21
- addUserMessage(content: string, message_id: string, metadata?: any): void;
22
- addAssistantMessage(content: string, message_id: string, metadata?: any): void;
23
- addToolMessage(messageBlock: any, toolsData: any, message_id: string, metadata?: any): void;
28
+ addUserMessage(content: string, message_id: string, metadata?: any): Promise<void>;
29
+ addAssistantMessage(content: string, message_id: string, metadata?: any): Promise<void>;
30
+ addToolMessage(messageBlock: any, toolsData: any, message_id: string, metadata?: any): Promise<void>;
24
31
  getContextWindow(maxTokens: number, maxOutputTokens?: number): Promise<any[]>;
25
32
  }
@@ -29,6 +29,14 @@ export type OTelLogConfig = {
29
29
  * These will be replaced with '[REDACTED]' in logs
30
30
  */
31
31
  redactFields?: string[];
32
+ /**
33
+ * Enable automatic redaction of sensitive data in logs and traces.
34
+ * When true (or omitted), sensitive data such as passwords, tokens,
35
+ * API keys, and JWT tokens are automatically replaced with '[REDACTED]'.
36
+ * Set to false to disable all automatic redaction.
37
+ * Default: true
38
+ */
39
+ enableRedaction?: boolean;
32
40
  };
33
41
  export declare class OTel extends TelemetryConnector {
34
42
  protected _settings: OTelLogConfig;
@@ -47,6 +55,21 @@ export declare class OTel extends TelemetryConnector {
47
55
  * Redact sensitive fields from an object
48
56
  */
49
57
  private redactSensitiveData;
58
+ /**
59
+ * Redact sensitive patterns from a string value.
60
+ * Skips redaction when enableRedaction is explicitly set to false.
61
+ */
62
+ private redactString;
63
+ /**
64
+ * Redact sensitive data from any data type (objects, arrays, strings).
65
+ * Skips redaction when enableRedaction is explicitly set to false.
66
+ */
67
+ private redactObject;
68
+ /**
69
+ * Redact sensitive HTTP headers.
70
+ * Skips redaction when enableRedaction is explicitly set to false.
71
+ */
72
+ private redactRequestHeaders;
50
73
  /**
51
74
  * Safely format output for logging with size limits and redaction
52
75
  */
@@ -55,5 +78,6 @@ export declare class OTel extends TelemetryConnector {
55
78
  protected log(acRequest: AccessRequest, logData: AgentCallLog, callId?: string): Promise<any>;
56
79
  protected logTask(acRequest: AccessRequest, tasks: number, isUsingTestDomain: boolean): Promise<void>;
57
80
  private prepareComponentData;
81
+ private prepareContext;
58
82
  protected setupHooks(): Promise<void>;
59
83
  }
@@ -0,0 +1,49 @@
1
+ /**
2
+ * OTel Redaction Helper
3
+ *
4
+ * Provides sensitive data redaction functions for OpenTelemetry logs and traces.
5
+ * Adapted from Log.helper.ts redactLogMessage() function with enhancements for OTel.
6
+ *
7
+ * Key features:
8
+ * - JSON-aware redaction: handles "key":"value" patterns
9
+ * - Escaped JSON redaction: handles \"key\":\"value\" patterns in embedded strings
10
+ * - Truncated value support: handles preview strings without closing quotes
11
+ * - Key-based redaction: redacts values based on sensitive key names
12
+ * - JWT token redaction: full token redaction
13
+ * - Header redaction: full redaction for auth headers
14
+ */
15
+ /**
16
+ * Sensitive words for content-based redaction
17
+ * Used to identify sensitive data in both key names and value content
18
+ */
19
+ export declare const SENSITIVE_WORDS: string[];
20
+ /**
21
+ * Sensitive header names that should have their values fully redacted
22
+ */
23
+ export declare const SENSITIVE_HEADERS: string[];
24
+ /**
25
+ * Redact sensitive data from a string value
26
+ * Applies multiple regex patterns to catch sensitive data in various formats.
27
+ *
28
+ * @param value - The string to redact
29
+ * @returns The redacted string with sensitive data replaced by [REDACTED]
30
+ */
31
+ export declare function redactSensitiveString(value: string): string;
32
+ /**
33
+ * Redact sensitive HTTP headers
34
+ * Full value redaction for known auth headers, string redaction for others.
35
+ *
36
+ * @param headers - The headers object or string to redact
37
+ * @returns The redacted headers with sensitive values replaced
38
+ */
39
+ export declare function redactHeaders(headers: Record<string, unknown> | string | undefined | null): Record<string, unknown> | string | undefined | null;
40
+ /**
41
+ * Universal data redaction - handles any data type
42
+ * Recursively applies redaction to strings within objects/arrays.
43
+ * Also checks key names against sensitive words and redacts entire values.
44
+ * Preserves the input type structure for OTel compatibility.
45
+ *
46
+ * @param data - The data to redact (any type)
47
+ * @returns The redacted data with the same type structure
48
+ */
49
+ export declare function redactData<T>(data: T): T;
@@ -167,6 +167,8 @@ export type TLLMPreparedParams = TLLMParams & {
167
167
  imageEditing?: boolean;
168
168
  };
169
169
  toolsInfo?: TToolsInfo;
170
+ outputs?: any[];
171
+ structuredOutputs?: any[];
170
172
  };
171
173
  export type TLLMConnectorParams = Omit<TLLMParams, 'model'> & {
172
174
  model: string | TLLMModel | TCustomLLMModel;
@@ -455,6 +457,8 @@ export declare enum TLLMEvent {
455
457
  Thinking = "thinking",
456
458
  /** End of the response */
457
459
  End = "end",
460
+ /** Request aborted */
461
+ Abort = "abort",
458
462
  /** Error */
459
463
  Error = "error",
460
464
  /** Tool information : emitted by the LLM determines the next tool call */
@@ -486,10 +490,35 @@ export interface ILLMRequestFuncParams<TBody = any> {
486
490
  acRequest: AccessRequest;
487
491
  body: TBody;
488
492
  context: ILLMRequestContext;
493
+ abortSignal?: AbortSignal;
494
+ }
495
+ /**
496
+ * Standardized finish reasons for LLM responses across all providers.
497
+ *
498
+ * This enum normalizes provider-specific finish reasons (e.g., 'end_turn' from Anthropic,
499
+ * 'max_tokens' from Google AI) into a consistent set of values.
500
+ */
501
+ export declare enum TLLMFinishReason {
502
+ /** Response completed normally (reached natural stopping point or stop sequence) */
503
+ Stop = "stop",
504
+ /** Response was truncated due to maximum token limit or context window */
505
+ Length = "length",
506
+ /** Response was truncated due to context window limit */
507
+ ContextWindowLength = "context_window_length",
508
+ /** Response was filtered by content moderation policies */
509
+ ContentFilter = "content_filter",
510
+ /** Response ended because the model called a tool/function */
511
+ ToolCalls = "tool_calls",
512
+ /** Request was aborted by user or system */
513
+ Abort = "abort",
514
+ /** Request ended due to an error */
515
+ Error = "error",
516
+ /** Unknown or unmapped finish reason from provider */
517
+ Unknown = "unknown"
489
518
  }
490
519
  export type TLLMChatResponse = {
491
520
  content: string;
492
- finishReason: string;
521
+ finishReason: TLLMFinishReason;
493
522
  thinkingContent?: string;
494
523
  usage?: any;
495
524
  useTool?: boolean;