praisonai 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/agent/audio.d.ts +190 -0
  2. package/dist/agent/audio.js +302 -0
  3. package/dist/agent/index.d.ts +2 -0
  4. package/dist/agent/index.js +5 -1
  5. package/dist/index.d.ts +10 -4
  6. package/dist/index.js +50 -8
  7. package/dist/knowledge/chonkie-adapter.d.ts +99 -0
  8. package/dist/knowledge/chonkie-adapter.js +268 -0
  9. package/dist/knowledge/index.d.ts +5 -0
  10. package/dist/knowledge/index.js +29 -1
  11. package/dist/knowledge/query-engine.d.ts +136 -0
  12. package/dist/knowledge/query-engine.js +214 -0
  13. package/dist/knowledge/rag-pipeline.d.ts +192 -0
  14. package/dist/knowledge/rag-pipeline.js +283 -0
  15. package/dist/knowledge/readers.d.ts +129 -0
  16. package/dist/knowledge/readers.js +393 -0
  17. package/dist/mcp/index.d.ts +188 -0
  18. package/dist/mcp/index.js +373 -0
  19. package/dist/mcp/server.d.ts +211 -0
  20. package/dist/mcp/server.js +412 -0
  21. package/dist/memory/docs-manager.d.ts +165 -0
  22. package/dist/memory/docs-manager.js +294 -0
  23. package/dist/memory/hooks.d.ts +154 -0
  24. package/dist/memory/hooks.js +228 -0
  25. package/dist/memory/rules-manager.d.ts +182 -0
  26. package/dist/memory/rules-manager.js +244 -0
  27. package/dist/tools/index.d.ts +1 -0
  28. package/dist/tools/index.js +7 -1
  29. package/dist/tools/subagent.d.ts +131 -0
  30. package/dist/tools/subagent.js +185 -0
  31. package/dist/workflows/index.d.ts +76 -1
  32. package/dist/workflows/index.js +125 -6
  33. package/dist/workflows/loop.d.ts +111 -0
  34. package/dist/workflows/loop.js +274 -0
  35. package/dist/workflows/repeat.d.ts +115 -0
  36. package/dist/workflows/repeat.js +144 -0
  37. package/package.json +1 -1
@@ -0,0 +1,190 @@
1
+ /**
2
+ * AudioAgent - Speech synthesis and transcription agent
3
+ *
4
+ * Wraps AI SDK's generateSpeech and transcribe functions for
5
+ * text-to-speech and speech-to-text capabilities.
6
+ *
7
+ * Requires AI SDK: npm install ai @ai-sdk/openai
8
+ *
9
+ * @example Text-to-Speech
10
+ * ```typescript
11
+ * import { AudioAgent } from 'praisonai';
12
+ *
13
+ * const agent = new AudioAgent({
14
+ * provider: 'openai',
15
+ * voice: 'alloy'
16
+ * });
17
+ *
18
+ * const audio = await agent.speak('Hello, world!');
19
+ * // Returns audio buffer
20
+ * ```
21
+ *
22
+ * @example Speech-to-Text
23
+ * ```typescript
24
+ * const agent = new AudioAgent({ provider: 'openai' });
25
+ *
26
+ * const text = await agent.transcribe('./audio.mp3');
27
+ * console.log(text); // "Hello, world!"
28
+ * ```
29
+ */
30
+ /**
31
+ * Supported audio providers
32
+ */
33
+ export type AudioProvider = 'openai' | 'elevenlabs' | 'google' | 'deepgram' | 'groq';
34
+ /**
35
+ * Voice options by provider
36
+ */
37
+ export type OpenAIVoice = 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
38
+ export type ElevenLabsVoice = string;
39
+ /**
40
+ * Audio format options
41
+ */
42
+ export type AudioFormat = 'mp3' | 'opus' | 'aac' | 'flac' | 'wav' | 'pcm';
43
+ /**
44
+ * Configuration for AudioAgent
45
+ */
46
+ export interface AudioAgentConfig {
47
+ /** Name of the agent */
48
+ name?: string;
49
+ /** Audio provider (default: 'openai') */
50
+ provider?: AudioProvider;
51
+ /** Voice to use for TTS */
52
+ voice?: string;
53
+ /** TTS model to use (provider-specific) */
54
+ model?: string;
55
+ /** Audio output format */
56
+ format?: AudioFormat;
57
+ /** Speed multiplier for TTS (0.25 to 4.0) */
58
+ speed?: number;
59
+ /** Language for transcription */
60
+ language?: string;
61
+ /** Enable verbose logging */
62
+ verbose?: boolean;
63
+ }
64
+ /**
65
+ * Options for speak method
66
+ */
67
+ export interface SpeakOptions {
68
+ /** Override voice for this call */
69
+ voice?: string;
70
+ /** Override model for this call */
71
+ model?: string;
72
+ /** Override format for this call */
73
+ format?: AudioFormat;
74
+ /** Override speed for this call */
75
+ speed?: number;
76
+ }
77
+ /**
78
+ * Options for transcribe method
79
+ */
80
+ export interface TranscribeOptions {
81
+ /** Language hint for transcription */
82
+ language?: string;
83
+ /** Include word-level timestamps */
84
+ timestamps?: boolean;
85
+ /** Return detailed segments */
86
+ segments?: boolean;
87
+ }
88
+ /**
89
+ * Result from speak method
90
+ */
91
+ export interface SpeakResult {
92
+ /** Audio data as Buffer or ArrayBuffer */
93
+ audio: Buffer | ArrayBuffer;
94
+ /** Duration in seconds (if available) */
95
+ duration?: number;
96
+ /** Audio format */
97
+ format: string;
98
+ }
99
+ /**
100
+ * Result from transcribe method
101
+ */
102
+ export interface TranscribeResult {
103
+ /** Transcribed text */
104
+ text: string;
105
+ /** Detected language */
106
+ language?: string;
107
+ /** Duration in seconds */
108
+ duration?: number;
109
+ /** Word-level timestamps (if requested) */
110
+ words?: Array<{
111
+ word: string;
112
+ start: number;
113
+ end: number;
114
+ }>;
115
+ /** Segments (if requested) */
116
+ segments?: Array<{
117
+ text: string;
118
+ start: number;
119
+ end: number;
120
+ }>;
121
+ }
122
+ /**
123
+ * AudioAgent - Speech synthesis and transcription
124
+ */
125
+ export declare class AudioAgent {
126
+ readonly id: string;
127
+ readonly name: string;
128
+ private config;
129
+ constructor(config?: AudioAgentConfig);
130
+ /**
131
+ * Get default TTS model for provider
132
+ */
133
+ private getDefaultModel;
134
+ /**
135
+ * Generate speech from text (Text-to-Speech)
136
+ *
137
+ * @param text - Text to convert to speech
138
+ * @param options - Override options for this call
139
+ * @returns Audio data with metadata
140
+ *
141
+ * @example
142
+ * ```typescript
143
+ * const result = await agent.speak('Hello, world!');
144
+ * fs.writeFileSync('output.mp3', result.audio);
145
+ * ```
146
+ */
147
+ speak(text: string, options?: SpeakOptions): Promise<SpeakResult>;
148
+ /**
149
+ * Get provider-specific speech model
150
+ */
151
+ private getSpeechModel;
152
+ /**
153
+ * Transcribe audio to text (Speech-to-Text)
154
+ *
155
+ * @param audioInput - Audio file path, URL, or Buffer
156
+ * @param options - Transcription options
157
+ * @returns Transcribed text with metadata
158
+ *
159
+ * @example From file
160
+ * ```typescript
161
+ * const result = await agent.transcribe('./audio.mp3');
162
+ * console.log(result.text);
163
+ * ```
164
+ *
165
+ * @example From Buffer
166
+ * ```typescript
167
+ * const audioBuffer = fs.readFileSync('./audio.mp3');
168
+ * const result = await agent.transcribe(audioBuffer);
169
+ * ```
170
+ */
171
+ transcribe(audioInput: string | Buffer | ArrayBuffer, options?: TranscribeOptions): Promise<TranscribeResult>;
172
+ /**
173
+ * Prepare audio input for transcription
174
+ */
175
+ private prepareAudioInput;
176
+ /**
177
+ * Get provider-specific transcription model
178
+ */
179
+ private getTranscriptionModel;
180
+ /**
181
+ * Chat method for agent-like interface
182
+ * Determines whether to speak or transcribe based on input
183
+ */
184
+ chat(input: string): Promise<string>;
185
+ }
186
+ /**
187
+ * Factory function to create AudioAgent
188
+ */
189
+ export declare function createAudioAgent(config?: AudioAgentConfig): AudioAgent;
190
+ export default AudioAgent;
@@ -0,0 +1,302 @@
1
+ "use strict";
2
+ /**
3
+ * AudioAgent - Speech synthesis and transcription agent
4
+ *
5
+ * Wraps AI SDK's generateSpeech and transcribe functions for
6
+ * text-to-speech and speech-to-text capabilities.
7
+ *
8
+ * Requires AI SDK: npm install ai @ai-sdk/openai
9
+ *
10
+ * @example Text-to-Speech
11
+ * ```typescript
12
+ * import { AudioAgent } from 'praisonai';
13
+ *
14
+ * const agent = new AudioAgent({
15
+ * provider: 'openai',
16
+ * voice: 'alloy'
17
+ * });
18
+ *
19
+ * const audio = await agent.speak('Hello, world!');
20
+ * // Returns audio buffer
21
+ * ```
22
+ *
23
+ * @example Speech-to-Text
24
+ * ```typescript
25
+ * const agent = new AudioAgent({ provider: 'openai' });
26
+ *
27
+ * const text = await agent.transcribe('./audio.mp3');
28
+ * console.log(text); // "Hello, world!"
29
+ * ```
30
+ */
31
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
32
+ if (k2 === undefined) k2 = k;
33
+ var desc = Object.getOwnPropertyDescriptor(m, k);
34
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
35
+ desc = { enumerable: true, get: function() { return m[k]; } };
36
+ }
37
+ Object.defineProperty(o, k2, desc);
38
+ }) : (function(o, m, k, k2) {
39
+ if (k2 === undefined) k2 = k;
40
+ o[k2] = m[k];
41
+ }));
42
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
43
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
44
+ }) : function(o, v) {
45
+ o["default"] = v;
46
+ });
47
+ var __importStar = (this && this.__importStar) || (function () {
48
+ var ownKeys = function(o) {
49
+ ownKeys = Object.getOwnPropertyNames || function (o) {
50
+ var ar = [];
51
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
52
+ return ar;
53
+ };
54
+ return ownKeys(o);
55
+ };
56
+ return function (mod) {
57
+ if (mod && mod.__esModule) return mod;
58
+ var result = {};
59
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
60
+ __setModuleDefault(result, mod);
61
+ return result;
62
+ };
63
+ })();
64
+ Object.defineProperty(exports, "__esModule", { value: true });
65
+ exports.AudioAgent = void 0;
66
+ exports.createAudioAgent = createAudioAgent;
67
+ const crypto_1 = require("crypto");
68
+ /**
69
+ * AudioAgent - Speech synthesis and transcription
70
+ */
71
+ class AudioAgent {
72
+ constructor(config = {}) {
73
+ this.id = (0, crypto_1.randomUUID)();
74
+ this.name = config.name ?? `AudioAgent_${(0, crypto_1.randomUUID)().slice(0, 8)}`;
75
+ this.config = {
76
+ provider: config.provider ?? 'openai',
77
+ voice: config.voice ?? 'alloy',
78
+ model: config.model ?? this.getDefaultModel(config.provider ?? 'openai'),
79
+ format: config.format ?? 'mp3',
80
+ speed: config.speed ?? 1.0,
81
+ language: config.language ?? 'en',
82
+ verbose: config.verbose ?? false,
83
+ };
84
+ }
85
+ /**
86
+ * Get default TTS model for provider
87
+ */
88
+ getDefaultModel(provider) {
89
+ switch (provider) {
90
+ case 'openai':
91
+ return 'tts-1';
92
+ case 'elevenlabs':
93
+ return 'eleven_multilingual_v2';
94
+ case 'google':
95
+ return 'text-to-speech';
96
+ case 'deepgram':
97
+ return 'aura-asteria-en';
98
+ case 'groq':
99
+ return 'whisper-large-v3';
100
+ default:
101
+ return 'tts-1';
102
+ }
103
+ }
104
+ /**
105
+ * Generate speech from text (Text-to-Speech)
106
+ *
107
+ * @param text - Text to convert to speech
108
+ * @param options - Override options for this call
109
+ * @returns Audio data with metadata
110
+ *
111
+ * @example
112
+ * ```typescript
113
+ * const result = await agent.speak('Hello, world!');
114
+ * fs.writeFileSync('output.mp3', result.audio);
115
+ * ```
116
+ */
117
+ async speak(text, options) {
118
+ const voice = options?.voice ?? this.config.voice;
119
+ const model = options?.model ?? this.config.model;
120
+ const speed = options?.speed ?? this.config.speed;
121
+ if (this.config.verbose) {
122
+ console.log(`[AudioAgent] Speaking with ${this.config.provider}/${model}, voice: ${voice}`);
123
+ }
124
+ try {
125
+ // Lazy import AI SDK
126
+ const { experimental_generateSpeech: generateSpeech } = await Promise.resolve().then(() => __importStar(require('ai')));
127
+ // Get provider-specific speech model
128
+ const speechModel = await this.getSpeechModel(model, voice);
129
+ const result = await generateSpeech({
130
+ model: speechModel,
131
+ text,
132
+ voice,
133
+ // Note: speed is provider-specific, may not be supported by all
134
+ });
135
+ // Handle both Buffer and audio file object types
136
+ const audioData = result.audio instanceof Buffer
137
+ ? result.audio
138
+ : (result.audio.arrayBuffer
139
+ ? await result.audio.arrayBuffer()
140
+ : result.audio);
141
+ return {
142
+ audio: audioData,
143
+ format: this.config.format,
144
+ duration: result.duration, // If available
145
+ };
146
+ }
147
+ catch (error) {
148
+ // Check for common issues
149
+ if (error.message?.includes('Cannot find module')) {
150
+ throw new Error(`AI SDK not installed. Run: npm install ai @ai-sdk/${this.config.provider}`);
151
+ }
152
+ throw error;
153
+ }
154
+ }
155
+ /**
156
+ * Get provider-specific speech model
157
+ */
158
+ async getSpeechModel(model, voice) {
159
+ switch (this.config.provider) {
160
+ case 'openai': {
161
+ const { openai } = await Promise.resolve().then(() => __importStar(require('@ai-sdk/openai')));
162
+ return openai.speech(model);
163
+ }
164
+ case 'elevenlabs': {
165
+ // @ts-ignore - optional dependency
166
+ const elevenlabsModule = await Promise.resolve().then(() => __importStar(require('@ai-sdk/elevenlabs'))).catch(() => null);
167
+ if (!elevenlabsModule)
168
+ throw new Error('Install @ai-sdk/elevenlabs for ElevenLabs support');
169
+ return elevenlabsModule.elevenlabs.speech(model);
170
+ }
171
+ case 'google': {
172
+ const { google } = await Promise.resolve().then(() => __importStar(require('@ai-sdk/google')));
173
+ return google.speech?.(model) ?? google(model);
174
+ }
175
+ default:
176
+ throw new Error(`Provider ${this.config.provider} not supported for TTS`);
177
+ }
178
+ }
179
+ /**
180
+ * Transcribe audio to text (Speech-to-Text)
181
+ *
182
+ * @param audioInput - Audio file path, URL, or Buffer
183
+ * @param options - Transcription options
184
+ * @returns Transcribed text with metadata
185
+ *
186
+ * @example From file
187
+ * ```typescript
188
+ * const result = await agent.transcribe('./audio.mp3');
189
+ * console.log(result.text);
190
+ * ```
191
+ *
192
+ * @example From Buffer
193
+ * ```typescript
194
+ * const audioBuffer = fs.readFileSync('./audio.mp3');
195
+ * const result = await agent.transcribe(audioBuffer);
196
+ * ```
197
+ */
198
+ async transcribe(audioInput, options) {
199
+ const language = options?.language ?? this.config.language;
200
+ if (this.config.verbose) {
201
+ console.log(`[AudioAgent] Transcribing with ${this.config.provider}`);
202
+ }
203
+ try {
204
+ // Lazy import AI SDK
205
+ const { experimental_transcribe: transcribe } = await Promise.resolve().then(() => __importStar(require('ai')));
206
+ // Convert input to appropriate format
207
+ const audio = await this.prepareAudioInput(audioInput);
208
+ // Get provider-specific transcription model
209
+ const transcriptionModel = await this.getTranscriptionModel();
210
+ const result = await transcribe({
211
+ model: transcriptionModel,
212
+ audio,
213
+ // language, // If supported by provider
214
+ });
215
+ return {
216
+ text: result.text,
217
+ language: result.language,
218
+ duration: result.duration,
219
+ words: options?.timestamps ? result.words : undefined,
220
+ segments: options?.segments ? result.segments : undefined,
221
+ };
222
+ }
223
+ catch (error) {
224
+ if (error.message?.includes('Cannot find module')) {
225
+ throw new Error(`AI SDK not installed. Run: npm install ai @ai-sdk/${this.config.provider}`);
226
+ }
227
+ throw error;
228
+ }
229
+ }
230
+ /**
231
+ * Prepare audio input for transcription
232
+ */
233
+ async prepareAudioInput(input) {
234
+ if (typeof input === 'string') {
235
+ // Check if it's a URL
236
+ if (input.startsWith('http://') || input.startsWith('https://')) {
237
+ return { type: 'url', url: input };
238
+ }
239
+ // Assume it's a file path - load with fs
240
+ const fs = await Promise.resolve().then(() => __importStar(require('fs'))).catch(() => null);
241
+ if (!fs) {
242
+ throw new Error('File loading requires Node.js fs module');
243
+ }
244
+ const buffer = fs.readFileSync(input);
245
+ return { type: 'buffer', data: buffer };
246
+ }
247
+ // Already a buffer
248
+ return { type: 'buffer', data: input };
249
+ }
250
+ /**
251
+ * Get provider-specific transcription model
252
+ */
253
+ async getTranscriptionModel() {
254
+ switch (this.config.provider) {
255
+ case 'openai': {
256
+ const { openai } = await Promise.resolve().then(() => __importStar(require('@ai-sdk/openai')));
257
+ return openai.transcription('whisper-1');
258
+ }
259
+ case 'groq': {
260
+ // @ts-ignore - optional dependency
261
+ const groqModule = await Promise.resolve().then(() => __importStar(require('@ai-sdk/groq'))).catch(() => null);
262
+ if (!groqModule)
263
+ throw new Error('Install @ai-sdk/groq for Groq support');
264
+ return groqModule.groq.transcription?.('whisper-large-v3') ?? groqModule.groq('whisper-large-v3');
265
+ }
266
+ case 'deepgram': {
267
+ // @ts-ignore - optional dependency
268
+ const deepgramModule = await Promise.resolve().then(() => __importStar(require('@ai-sdk/deepgram'))).catch(() => null);
269
+ if (!deepgramModule)
270
+ throw new Error('Install @ai-sdk/deepgram for Deepgram support');
271
+ return deepgramModule.deepgram.transcription?.('nova-2') ?? deepgramModule.deepgram('nova-2');
272
+ }
273
+ default:
274
+ throw new Error(`Provider ${this.config.provider} not supported for transcription`);
275
+ }
276
+ }
277
+ /**
278
+ * Chat method for agent-like interface
279
+ * Determines whether to speak or transcribe based on input
280
+ */
281
+ async chat(input) {
282
+ // If input looks like a file path, transcribe it
283
+ if (input.endsWith('.mp3') || input.endsWith('.wav') ||
284
+ input.endsWith('.m4a') || input.endsWith('.ogg') ||
285
+ input.startsWith('http')) {
286
+ const result = await this.transcribe(input);
287
+ return result.text;
288
+ }
289
+ // Otherwise, speak the text and return info
290
+ const result = await this.speak(input);
291
+ return `[Audio generated: ${result.format}, ${result.audio.byteLength} bytes]`;
292
+ }
293
+ }
294
+ exports.AudioAgent = AudioAgent;
295
+ /**
296
+ * Factory function to create AudioAgent
297
+ */
298
+ function createAudioAgent(config) {
299
+ return new AudioAgent(config);
300
+ }
301
+ // Default export
302
+ exports.default = AudioAgent;
@@ -9,6 +9,8 @@
9
9
  */
10
10
  export { Agent, PraisonAIAgents, Agents } from './simple';
11
11
  export type { SimpleAgentConfig, PraisonAIAgentsConfig } from './simple';
12
+ export { AudioAgent, createAudioAgent } from './audio';
13
+ export type { AudioAgentConfig, SpeakOptions, TranscribeOptions, SpeakResult, TranscribeResult, AudioProvider } from './audio';
12
14
  export { Router, RouterAgent, createRouter, routeConditions } from './router';
13
15
  export type { RouterConfig, RouteConfig, RouteContext, SimpleRouterConfig, SimpleRouteConfig } from './router';
14
16
  export { Task } from './types';
@@ -9,13 +9,17 @@
9
9
  * - Workflow: Step-based workflow execution (from workflows module)
10
10
  */
11
11
  Object.defineProperty(exports, "__esModule", { value: true });
12
- exports.Task = exports.routeConditions = exports.createRouter = exports.RouterAgent = exports.Router = exports.Agents = exports.PraisonAIAgents = exports.Agent = void 0;
12
+ exports.Task = exports.routeConditions = exports.createRouter = exports.RouterAgent = exports.Router = exports.createAudioAgent = exports.AudioAgent = exports.Agents = exports.PraisonAIAgents = exports.Agent = void 0;
13
13
  exports.setTaskMode = setTaskMode;
14
14
  // Core exports - the main API surface
15
15
  var simple_1 = require("./simple");
16
16
  Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return simple_1.Agent; } });
17
17
  Object.defineProperty(exports, "PraisonAIAgents", { enumerable: true, get: function () { return simple_1.PraisonAIAgents; } });
18
18
  Object.defineProperty(exports, "Agents", { enumerable: true, get: function () { return simple_1.Agents; } });
19
+ // AudioAgent - Speech synthesis and transcription
20
+ var audio_1 = require("./audio");
21
+ Object.defineProperty(exports, "AudioAgent", { enumerable: true, get: function () { return audio_1.AudioAgent; } });
22
+ Object.defineProperty(exports, "createAudioAgent", { enumerable: true, get: function () { return audio_1.createAudioAgent; } });
19
23
  // Router exports
20
24
  var router_1 = require("./router");
21
25
  Object.defineProperty(exports, "Router", { enumerable: true, get: function () { return router_1.Router; } });
package/dist/index.d.ts CHANGED
@@ -38,11 +38,11 @@
38
38
  */
39
39
  export { Agent, Agents, PraisonAIAgents, Router } from './agent';
40
40
  export type { SimpleAgentConfig, PraisonAIAgentsConfig, SimpleRouterConfig, SimpleRouteConfig } from './agent';
41
- export { Workflow, parallel, route, loop, repeat } from './workflows';
42
- export type { WorkflowStep, WorkflowContext, StepResult } from './workflows';
41
+ export { Workflow, parallel, route, loop, repeat, Loop, loopPattern, Repeat, repeatPattern, WorkflowStep, } from './workflows';
42
+ export type { WorkflowContext, StepResult, WorkflowStepConfig, LoopConfig, LoopResult, RepeatConfig, RepeatResult, RepeatContext, StepContextConfig, StepOutputConfig, StepExecutionConfig, StepRoutingConfig } from './workflows';
43
43
  export { db, createDbAdapter, getDefaultDbAdapter, setDefaultDbAdapter } from './db';
44
44
  export type { DbAdapter, DbConfig, DbMessage, DbRun, DbTrace } from './db';
45
- export { BaseTool, ToolResult, ToolValidationError, validateTool, createTool, FunctionTool, tool, ToolRegistry, getRegistry, registerTool, getTool, type ToolConfig, type ToolContext, type ToolParameters } from './tools';
45
+ export { BaseTool, ToolResult, ToolValidationError, validateTool, createTool, FunctionTool, tool, ToolRegistry, getRegistry, registerTool, getTool, SubagentTool, createSubagentTool, createSubagentTools, createDelegator, type ToolConfig, type ToolContext, type ToolParameters, type SubagentToolConfig, type DelegatorConfig } from './tools';
46
46
  export * from './tools/arxivTools';
47
47
  export * from './tools/mcpSse';
48
48
  export { tools, registerBuiltinTools } from './tools/tools';
@@ -53,6 +53,7 @@ export { createLoggingMiddleware, createTimeoutMiddleware, createRedactionMiddle
53
53
  export { codeExecution, tavilySearch, tavilyExtract, tavilyCrawl, exaSearch, perplexitySearch, parallelSearch, firecrawlScrape, firecrawlCrawl, superagentGuard, superagentRedact, superagentVerify, valyuWebSearch, valyuFinanceSearch, valyuPaperSearch, valyuBioSearch, valyuPatentSearch, valyuSecSearch, valyuEconomicsSearch, valyuCompanyResearch, bedrockCodeInterpreter, bedrockBrowserNavigate, bedrockBrowserClick, bedrockBrowserFill, airweaveSearch, codeMode, registerCustomTool, createCustomTool, registerNpmTool, registerLocalTool } from './tools/builtins';
54
54
  export * from './session';
55
55
  export * from './knowledge';
56
+ export { MCPClient, createMCPClient, getMCPTools, MCPServer, createMCPServer, type MCPClientConfig, type MCPSession, type MCPTransportType, type MCPServerConfig, type MCPServerTool } from './mcp';
56
57
  export * from './llm';
57
58
  export * from './process';
58
59
  export * from './guardrails';
@@ -66,9 +67,14 @@ export { Memory, createMemory } from './memory/memory';
66
67
  export type { MemoryEntry, MemoryConfig } from './memory/memory';
67
68
  export { FileMemory, createFileMemory, type FileMemoryConfig, type FileMemoryEntry } from './memory/file-memory';
68
69
  export { AutoMemory, createAutoMemory, createLLMSummarizer, DEFAULT_POLICIES, type AutoMemoryConfig, type AutoMemoryPolicy, type AutoMemoryContext, type VectorStoreAdapter as AutoMemoryVectorStore, type KnowledgeBaseAdapter as AutoMemoryKnowledgeBase } from './memory/auto-memory';
70
+ export { MemoryHooks, createMemoryHooks, createLoggingHooks, createValidationHooks, createEncryptionHooks, type MemoryHooksConfig, type BeforeStoreHook, type AfterStoreHook, type BeforeRetrieveHook, type AfterRetrieveHook, type BeforeDeleteHook, type AfterDeleteHook, type BeforeSearchHook, type AfterSearchHook } from './memory/hooks';
71
+ export { RulesManager, createRulesManager, createSafetyRules, type Rule, type RuleAction, type RulePriority, type RuleContext, type RuleResult, type RulesEvaluation, type RulesManagerConfig } from './memory/rules-manager';
72
+ export { DocsManager, createDocsManager, type Doc, type DocChunk, type DocSearchResult, type DocsManagerConfig } from './memory/docs-manager';
69
73
  export { TelemetryCollector, AgentTelemetry, getTelemetry, enableTelemetry, disableTelemetry, cleanupTelemetry, createAgentTelemetry, type TelemetryEvent, type TelemetryConfig, type AgentStats } from './telemetry';
70
74
  export { AutoAgents, createAutoAgents, type AgentConfig, type TaskConfig, type TeamStructure, type AutoAgentsConfig } from './auto';
71
75
  export { ImageAgent, createImageAgent, type ImageAgentConfig, type ImageGenerationConfig, type ImageAnalysisConfig } from './agent/image';
76
+ export { AudioAgent, createAudioAgent } from './agent/audio';
77
+ export type { AudioAgentConfig, SpeakOptions as AudioSpeakOptions, TranscribeOptions as AudioTranscribeOptions, SpeakResult as AudioSpeakResult, TranscribeResult as AudioTranscribeResult, AudioProvider } from './agent/audio';
72
78
  export { DeepResearchAgent, createDeepResearchAgent, type DeepResearchConfig, type ResearchResponse, type Citation, type ReasoningStep } from './agent/research';
73
79
  export { QueryRewriterAgent, createQueryRewriterAgent, type QueryRewriterConfig, type RewriteResult, type RewriteStrategy } from './agent/query-rewriter';
74
80
  export { PromptExpanderAgent, createPromptExpanderAgent, type PromptExpanderConfig, type ExpandResult, type ExpandStrategy } from './agent/prompt-expander';
@@ -89,7 +95,7 @@ export { createProvider, getDefaultProvider, parseModelString, isProviderAvailab
89
95
  export { type SpanKind, type SpanStatus, type SpanData, type SpanEvent, type TraceData, type TraceContext, type SpanContext, type ObservabilityAdapter, type AttributionContext, type ProviderMetadata, type ObservabilityToolConfig, type ObservabilityToolName, type ObservabilityToolInfo, OBSERVABILITY_TOOLS, getObservabilityToolInfo, listObservabilityTools, hasObservabilityToolEnvVar, NoopObservabilityAdapter, noopAdapter, MemoryObservabilityAdapter, createMemoryAdapter, ConsoleObservabilityAdapter, createConsoleAdapter, createObservabilityAdapter, clearAdapterCache, setObservabilityAdapter, getObservabilityAdapter, resetObservabilityAdapter, trace, } from './observability';
90
96
  export { AISDK_PROVIDERS, PROVIDER_ALIASES, COMMUNITY_PROVIDERS, ADAPTERS, type ProviderInfo, type ProviderModalities, type CommunityProvider, type AdapterInfo, } from './llm/providers/ai-sdk/types';
91
97
  export { SlashCommandHandler, createSlashCommandHandler, registerCommand, parseSlashCommand, executeSlashCommand, isSlashCommand, type SlashCommand, type SlashCommandContext, type SlashCommandResult, CostTracker, createCostTracker, estimateTokens, formatCost, MODEL_PRICING, type ModelPricing, type TokenUsage as CostTokenUsage, type RequestStats, type SessionStats, InteractiveTUI, createInteractiveTUI, StatusDisplay, createStatusDisplay, HistoryManager, createHistoryManager, type TUIConfig, type TUIState, RepoMap, createRepoMap, getRepoTree, DEFAULT_IGNORE_PATTERNS, type RepoMapConfig, type FileInfo, type SymbolInfo, type RepoMapResult, GitManager, createGitManager, DiffViewer, createDiffViewer, type GitConfig, type GitStatus, type GitCommit, type GitDiff, type GitDiffFile, SandboxExecutor, createSandboxExecutor, sandboxExec, CommandValidator, DEFAULT_BLOCKED_COMMANDS, DEFAULT_BLOCKED_PATHS, type SandboxMode, type SandboxConfig, type ExecutionResult, AutonomyManager, createAutonomyManager, cliApprovalPrompt, MODE_POLICIES, type AutonomyMode, type ActionType, type ApprovalPolicy, type AutonomyConfig, type ActionRequest, type ActionDecision, Scheduler, createScheduler, cronExpressions, type ScheduleConfig, type ScheduledTask, type SchedulerStats, JobQueue, createJobQueue, MemoryJobStorage, FileJobStorage, createFileJobStorage, type Job, type JobStatus, type JobPriority, type JobQueueConfig, type JobStorageAdapter, type JobHandler, type JobContext, CheckpointManager, createCheckpointManager, MemoryCheckpointStorage, FileCheckpointStorage, createFileCheckpointStorage, type CheckpointData, type CheckpointConfig, type CheckpointStorage, FlowDisplay, createFlowDisplay, renderWorkflow, type FlowNode, type FlowGraph, type FlowDisplayConfig, BaseExternalAgent, ClaudeCodeAgent, GeminiCliAgent, CodexCliAgent, AiderAgent, GenericExternalAgent, getExternalAgentRegistry, createExternalAgent, externalAgentAsTool, type ExternalAgentConfig, type ExternalAgentResult, N8NIntegration, createN8NIntegration, triggerN8NWebhook, type N8NConfig, type N8NWebhookPayload, type N8NWorkflow, type N8NWorkflowNode, FastContext, createFastContext, getQuickContext, type FastContextConfig, type ContextSource, type FastContextResult } from './cli/features';
92
- export { generateText as aiGenerateText, streamText as aiStreamText, type GenerateTextOptions as AIGenerateTextOptions, type GenerateTextResult as AIGenerateTextResult, type StreamTextOptions as AIStreamTextOptions, type StreamTextResult as AIStreamTextResult, type TextStreamPart, generateObject as aiGenerateObject, streamObject as aiStreamObject, type GenerateObjectOptions as AIGenerateObjectOptions, type GenerateObjectResult as AIGenerateObjectResult, type StreamObjectOptions as AIStreamObjectOptions, type StreamObjectResult as AIStreamObjectResult, generateImage as aiGenerateImage, type GenerateImageOptions as AIGenerateImageOptions, type GenerateImageResult as AIGenerateImageResult, embed as aiEmbed, embedMany as aiEmbedMany, type EmbedOptions as AIEmbedOptions, type EmbedResult as AIEmbedResult, type EmbedManyResult as AIEmbedManyResult, defineTool, createToolSet, functionToTool, type ToolDefinition as AIToolDefinition, type ToolExecuteFunction, type ToolInput, type ToolOutput, createModel, getModel, parseModel, MODEL_ALIASES, listModelAliases, hasModelAlias, resolveModelAlias, type ModelConfig, type ModelId, createCachingMiddleware, createLoggingMiddleware as createAILoggingMiddleware, wrapModel, applyMiddleware, clearCache as clearAICache, getCacheStats as getAICacheStats, type Middleware as AIMiddleware, type MiddlewareConfig as AIMiddlewareConfig, type MiddlewareRequest, type MiddlewareResponse, createImagePart, createFilePart, createPdfPart, createTextPart, createMultimodalMessage, toMessageContent, base64ToUint8Array, uint8ArrayToBase64, isUrl, isDataUrl, type InputPart, type ImagePart as AIImagePart, type FilePart as AIFilePart, type PdfPart, type TextPart as AITextPart, createMCP, getMCPClient, closeMCPClient, closeAllMCPClients, mcpToolsToAITools, type MCPConfig, type MCPClient, type MCPTool, type MCPResource, type MCPPrompt, createHttpHandler, createExpressHandler, createHonoHandler, createFastifyHandler, createNestHandler, type ServerHandler, type ServerHandlerConfig, createRouteHandler, createPagesHandler, type RouteHandlerConfig, type UseChatConfig, createAgentLoop, AgentLoop, stopAfterSteps, stopWhenNoToolCalls, stopWhen, type AgentLoopConfig, type AgentStep as AIAgentStep, type AgentLoopResult, type StopCondition, convertToModelMessages, convertToUIMessages, validateUIMessages, safeValidateUIMessages, createTextMessage, createSystemMessage, hasPendingApprovals, getToolsNeedingApproval, createApprovalResponse, toUIMessageStreamResponse, pipeUIMessageStreamToResponse, type UIMessage, type UIMessagePart, type TextUIPart, type ReasoningUIPart, type ToolUIPart, type FileUIPart, type DataUIPart, type ModelMessage as AIModelMessage, type UIMessageStreamOptions, ApprovalManager, getApprovalManager, setApprovalManager, withApproval, ToolApprovalDeniedError, ToolApprovalTimeoutError, DANGEROUS_PATTERNS, isDangerous, createDangerousPatternChecker, type ToolApprovalConfig, type ToolApprovalRequest, type ToolApprovalResponse, type ApprovalState, type ApprovalHandler, generateSpeech, transcribe, SPEECH_MODELS, TRANSCRIPTION_MODELS, type GenerateSpeechOptions, type GenerateSpeechResult, type TranscribeOptions, type TranscribeResult, type TranscriptionSegment, enableDevTools, disableDevTools, isDevToolsEnabled, getDevToolsState, getDevToolsUrl, createDevToolsMiddleware, autoEnableDevTools, type DevToolsConfig, type DevToolsState, configureTelemetry, getTelemetrySettings, enableAITelemetry, disableAITelemetry, isTelemetryEnabled, initOpenTelemetry, getTracer, createAISpan, withSpan, createTelemetryMiddleware, recordEvent, getEvents, clearEvents, createTelemetrySettings, type TelemetrySettings as AITelemetrySettings, type Tracer as AITracer, type Span as AISpan, type SpanOptions as AISpanOptions, type SpanKind as AISpanKind, type SpanStatus as AISpanStatus, type TelemetryEvent as AITelemetryEvent, type OAuthClientProvider, } from './ai';
98
+ export { generateText as aiGenerateText, streamText as aiStreamText, type GenerateTextOptions as AIGenerateTextOptions, type GenerateTextResult as AIGenerateTextResult, type StreamTextOptions as AIStreamTextOptions, type StreamTextResult as AIStreamTextResult, type TextStreamPart, generateObject as aiGenerateObject, streamObject as aiStreamObject, type GenerateObjectOptions as AIGenerateObjectOptions, type GenerateObjectResult as AIGenerateObjectResult, type StreamObjectOptions as AIStreamObjectOptions, type StreamObjectResult as AIStreamObjectResult, generateImage as aiGenerateImage, type GenerateImageOptions as AIGenerateImageOptions, type GenerateImageResult as AIGenerateImageResult, embed as aiEmbed, embedMany as aiEmbedMany, type EmbedOptions as AIEmbedOptions, type EmbedResult as AIEmbedResult, type EmbedManyResult as AIEmbedManyResult, defineTool, createToolSet, functionToTool, type ToolDefinition as AIToolDefinition, type ToolExecuteFunction, type ToolInput, type ToolOutput, createModel, getModel, parseModel, MODEL_ALIASES, listModelAliases, hasModelAlias, resolveModelAlias, type ModelConfig, type ModelId, createCachingMiddleware, createLoggingMiddleware as createAILoggingMiddleware, wrapModel, applyMiddleware, clearCache as clearAICache, getCacheStats as getAICacheStats, type Middleware as AIMiddleware, type MiddlewareConfig as AIMiddlewareConfig, type MiddlewareRequest, type MiddlewareResponse, createImagePart, createFilePart, createPdfPart, createTextPart, createMultimodalMessage, toMessageContent, base64ToUint8Array, uint8ArrayToBase64, isUrl, isDataUrl, type InputPart, type ImagePart as AIImagePart, type FilePart as AIFilePart, type PdfPart, type TextPart as AITextPart, createMCP, getMCPClient, closeMCPClient, closeAllMCPClients, mcpToolsToAITools, type MCPConfig, type MCPClient as MCPClientType, type MCPTool, type MCPResource, type MCPPrompt, createHttpHandler, createExpressHandler, createHonoHandler, createFastifyHandler, createNestHandler, type ServerHandler, type ServerHandlerConfig, createRouteHandler, createPagesHandler, type RouteHandlerConfig, type UseChatConfig, createAgentLoop, AgentLoop, stopAfterSteps, stopWhenNoToolCalls, stopWhen, type AgentLoopConfig, type AgentStep as AIAgentStep, type AgentLoopResult, type StopCondition, convertToModelMessages, convertToUIMessages, validateUIMessages, safeValidateUIMessages, createTextMessage, createSystemMessage, hasPendingApprovals, getToolsNeedingApproval, createApprovalResponse, toUIMessageStreamResponse, pipeUIMessageStreamToResponse, type UIMessage, type UIMessagePart, type TextUIPart, type ReasoningUIPart, type ToolUIPart, type FileUIPart, type DataUIPart, type ModelMessage as AIModelMessage, type UIMessageStreamOptions, ApprovalManager, getApprovalManager, setApprovalManager, withApproval, ToolApprovalDeniedError, ToolApprovalTimeoutError, DANGEROUS_PATTERNS, isDangerous, createDangerousPatternChecker, type ToolApprovalConfig, type ToolApprovalRequest, type ToolApprovalResponse, type ApprovalState, type ApprovalHandler, generateSpeech, transcribe, SPEECH_MODELS, TRANSCRIPTION_MODELS, type GenerateSpeechOptions, type GenerateSpeechResult, type TranscribeOptions, type TranscribeResult, type TranscriptionSegment, enableDevTools, disableDevTools, isDevToolsEnabled, getDevToolsState, getDevToolsUrl, createDevToolsMiddleware, autoEnableDevTools, type DevToolsConfig, type DevToolsState, configureTelemetry, getTelemetrySettings, enableAITelemetry, disableAITelemetry, isTelemetryEnabled, initOpenTelemetry, getTracer, createAISpan, withSpan, createTelemetryMiddleware, recordEvent, getEvents, clearEvents, createTelemetrySettings, type TelemetrySettings as AITelemetrySettings, type Tracer as AITracer, type Span as AISpan, type SpanOptions as AISpanOptions, type SpanKind as AISpanKind, type SpanStatus as AISpanStatus, type TelemetryEvent as AITelemetryEvent, type OAuthClientProvider, } from './ai';
93
99
  export { createSlackBot, SlackBot, verifySlackSignature, parseSlackMessage, type SlackConfig, type SlackMessage, type SlackResponse, type SlackEventHandler, } from './integrations/slack';
94
100
  export { createNLPostgres, NLPostgresClient, createPostgresTool, type PostgresConfig as NLPostgresConfig, type TableSchema, type ColumnSchema, type QueryResult, type NLQueryResult, } from './integrations/postgres';
95
101
  export { createComputerUse, ComputerUseClient, createComputerUseAgent, createCLIApprovalPrompt, type ComputerUseConfig, type ComputerUseTools, type ComputerAction, type ScreenshotResult, } from './integrations/computer-use';