@intelliweave/embedded 2.0.72-beta.8 → 2.1.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -65,6 +65,8 @@ declare class TokenWindowGroup<DataType> {
65
65
  recalculateTokens(): void;
66
66
  /** Add an item to the group */
67
67
  add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
68
+ /** Manually remove an item */
69
+ remove(itemId: string): boolean;
68
70
  /** Get all items as a string */
69
71
  getAllAsString(): string;
70
72
  /** Get all items. Doesn't return disabled items. */
@@ -181,6 +183,17 @@ declare class MCPKnowledgeClient {
181
183
  searchToolName?: string;
182
184
  /** Keep search function available for the AI to use. */
183
185
  searchToolVisible?: boolean;
186
+ /** Use the IntelliWeave proxy */
187
+ proxy?: {
188
+ /** If true, will send requests via the IntelliWeave MCP proxy */
189
+ enabled?: boolean;
190
+ /** The URL of the proxy server, defaults to the standard IntelliWeave proxy */
191
+ url?: string;
192
+ /** IntelliWeave API key */
193
+ apiKey?: string;
194
+ };
195
+ /** Pass extra headers to the MCP server */
196
+ headers?: Record<string, string>;
184
197
  };
185
198
  /** Constructor */
186
199
  constructor(config: MCPKnowledgeClient['config']);
@@ -191,16 +204,10 @@ declare class MCPKnowledgeClient {
191
204
  method: string;
192
205
  params?: {
193
206
  [x: string]: unknown;
194
- task?: {
195
- [x: string]: unknown;
196
- ttl?: number | null | undefined;
197
- pollInterval?: number | undefined;
198
- } | undefined;
199
207
  _meta?: {
200
208
  [x: string]: unknown;
201
209
  progressToken?: string | number | undefined;
202
210
  "io.modelcontextprotocol/related-task"?: {
203
- [x: string]: unknown;
204
211
  taskId: string;
205
212
  } | undefined;
206
213
  } | undefined;
@@ -211,8 +218,8 @@ declare class MCPKnowledgeClient {
211
218
  [x: string]: unknown;
212
219
  _meta?: {
213
220
  [x: string]: unknown;
221
+ progressToken?: string | number | undefined;
214
222
  "io.modelcontextprotocol/related-task"?: {
215
- [x: string]: unknown;
216
223
  taskId: string;
217
224
  } | undefined;
218
225
  } | undefined;
@@ -221,8 +228,8 @@ declare class MCPKnowledgeClient {
221
228
  [x: string]: unknown;
222
229
  _meta?: {
223
230
  [x: string]: unknown;
231
+ progressToken?: string | number | undefined;
224
232
  "io.modelcontextprotocol/related-task"?: {
225
- [x: string]: unknown;
226
233
  taskId: string;
227
234
  } | undefined;
228
235
  } | undefined;
@@ -231,16 +238,10 @@ declare class MCPKnowledgeClient {
231
238
  method: string;
232
239
  params?: {
233
240
  [x: string]: unknown;
234
- task?: {
235
- [x: string]: unknown;
236
- ttl?: number | null | undefined;
237
- pollInterval?: number | undefined;
238
- } | undefined;
239
241
  _meta?: {
240
242
  [x: string]: unknown;
241
243
  progressToken?: string | number | undefined;
242
244
  "io.modelcontextprotocol/related-task"?: {
243
- [x: string]: unknown;
244
245
  taskId: string;
245
246
  } | undefined;
246
247
  } | undefined;
@@ -251,8 +252,8 @@ declare class MCPKnowledgeClient {
251
252
  [x: string]: unknown;
252
253
  _meta?: {
253
254
  [x: string]: unknown;
255
+ progressToken?: string | number | undefined;
254
256
  "io.modelcontextprotocol/related-task"?: {
255
- [x: string]: unknown;
256
257
  taskId: string;
257
258
  } | undefined;
258
259
  } | undefined;
@@ -261,8 +262,8 @@ declare class MCPKnowledgeClient {
261
262
  [x: string]: unknown;
262
263
  _meta?: {
263
264
  [x: string]: unknown;
265
+ progressToken?: string | number | undefined;
264
266
  "io.modelcontextprotocol/related-task"?: {
265
- [x: string]: unknown;
266
267
  taskId: string;
267
268
  } | undefined;
268
269
  } | undefined;
@@ -467,6 +468,8 @@ declare class KnowledgeBase {
467
468
  allowWindowSources: boolean;
468
469
  /** If true, allows using knowledge specified in the global configuration object */
469
470
  allowGlobalConfigSources: boolean;
471
+ /** If true, allows the AI to search the knowledge base. If false, essentially disables RAG lookup. */
472
+ allowRagSearch: boolean;
470
473
  /** Constructor */
471
474
  constructor(ai: IntelliWeave);
472
475
  /** Ensures the internal knowledge is set correctly */
@@ -495,7 +498,7 @@ declare class KnowledgeBase {
495
498
  /** Create and register an external knowledge base source from a URL */
496
499
  registerSourceFromURL(url: string, id?: string): void;
497
500
  /** Clone this instance */
498
- clone(): KnowledgeBase;
501
+ clone(newIW: IntelliWeave): KnowledgeBase;
499
502
  /** Registers an MCP server as a knowledge base source */
500
503
  registerMCPSource(config: MCPKnowledgeClient['config']): MCPKnowledgeClient;
501
504
  }
@@ -825,6 +828,8 @@ interface SubAgentConfig {
825
828
  usageInstructions?: string;
826
829
  /** If true, will remove all Persona knowledge entries */
827
830
  clearExistingKnowledge?: boolean;
831
+ /** Disable RAG search for subagents. If true, only KB entries with isContext=true will be used. */
832
+ disableRagSearch?: boolean;
828
833
  /** Extra knowledge base sources for the sub-agent */
829
834
  knowledge?: KnowledgeFetcher;
830
835
  /** Optional extra configuration for the subagent instance */
@@ -990,6 +995,8 @@ declare class IntelliWeave extends EventTarget {
990
995
  private _lastSystemMsg;
991
996
  /** Get the system message prefix, before the KB entries are added */
992
997
  getContextPrefix(): Promise<string>;
998
+ /** KB items added in the last run */
999
+ private lastKBItems;
993
1000
  /** Get system message to send to the AI */
994
1001
  onBeforeMessageProcessing(): Promise<void>;
995
1002
  /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
@@ -1205,7 +1212,7 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
1205
1212
  static debugExportWav: boolean;
1206
1213
  /** Server address for transcription */
1207
1214
  apiAddress: string;
1208
- /** OpenAI API key */
1215
+ /** IntelliWeave API key */
1209
1216
  apiKey: string;
1210
1217
  /** WebSocket connection */
1211
1218
  private ws?;
@@ -1225,6 +1232,32 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
1225
1232
  onSocketClose(): void;
1226
1233
  }
1227
1234
 
1235
+ /**
1236
+ * This AudioNode uses ElevenLabs to transcribe spoken speech to text.
1237
+ *
1238
+ * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text.
1239
+ */
1240
+ declare class ElevenLabsTranscriptionNode extends VoiceChunkOutputNode {
1241
+ /** ElevenLabs API key */
1242
+ apiKey: string;
1243
+ /** ElevenLabs stream connection */
1244
+ private connection?;
1245
+ /** True if currently transcribing */
1246
+ isTranscribing: boolean;
1247
+ /** WebSocket shutdown timer */
1248
+ private shutdownTimer?;
1249
+ /** Constructor */
1250
+ constructor(audioContext: AudioContext, apiKey: string);
1251
+ /** Called when a voice chunk is received */
1252
+ onVoiceChunk(buffer: Float32Array): Promise<void>;
1253
+ /** Start reading the stream */
1254
+ private startReading;
1255
+ /** Called when the voice recording ends */
1256
+ onVoiceEnd(buffers: Float32Array[]): Promise<void>;
1257
+ /** Called when a transcription is ready */
1258
+ onVoiceTranscription(text: string): void;
1259
+ }
1260
+
1228
1261
  /**
1229
1262
  * Handles speech recognition from the microphone
1230
1263
  *
@@ -1250,7 +1283,7 @@ declare class WebWeaverSpeechRecognition extends EventTarget {
1250
1283
  /** Returns true if speech recognition is supported by this persona and browser */
1251
1284
  get isSupported(): boolean;
1252
1285
  /** Currently active voice detection node */
1253
- voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode;
1286
+ voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode | ElevenLabsTranscriptionNode;
1254
1287
  /** Constructor */
1255
1288
  constructor(ai: IntelliWeave);
1256
1289
  private _skipEvents;
@@ -1345,6 +1378,29 @@ interface ElevenLabsTextToSpeechStream {
1345
1378
  }
1346
1379
  /** This creates a two-way connection to ElevenLabs that allows for realtime text upload and realtime speech download. */
1347
1380
  declare function createElevenLabsTextToSpeechStream(apiKey: string, voiceID: string, format?: string): ElevenLabsTextToSpeechStream;
1381
+ /** Return value of createElevenLabsSpeechToTextStream() */
1382
+ interface ElevenLabsSpeechToTextStream {
1383
+ /** Audio data output stream */
1384
+ stream: ReadableStream<string>;
1385
+ /** Function to send audio to be processed */
1386
+ sendAudio: (audio: ArrayBuffer) => void;
1387
+ /** Function to signal speech has stopped. Only necessary if commitStrategy is 'manual' */
1388
+ commit: () => void;
1389
+ /** Function to close the connection */
1390
+ close: () => void;
1391
+ /** True if this connection is closed */
1392
+ isClosed?: boolean;
1393
+ }
1394
+ /** This creates a two-way connection to ElevenLabs that allows for realtime speech upload and text download. */
1395
+ declare function createElevenLabsSpeechToTextStream(
1396
+ /** ElevenLabs API key */
1397
+ apiKey: string,
1398
+ /** Format the audio data you're passing is in */
1399
+ inputAudioFormat?: string,
1400
+ /** Input audio sample rate */
1401
+ sampleRate?: number,
1402
+ /** Should ElevenLabs detect speech end, or will you call commit() manually */
1403
+ commitStrategy?: 'manual' | 'vad'): ElevenLabsSpeechToTextStream;
1348
1404
 
1349
1405
  /**
1350
1406
  * This is a utility class for dealing with the ONNX runtime and model loading.
@@ -2206,4 +2262,4 @@ declare function useIntelliWeave(): IntelliWeave | undefined;
2206
2262
  /** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
2207
2263
  declare function useIntelliWeaveKnowledge(query: KnowledgeBaseSource['query'], dependencies?: any[]): void;
2208
2264
 
2209
- export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, type ElevenLabsTextToSpeechStream, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseItemAttachment, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SubAgentConfig, SubAgents, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, createElevenLabsTextToSpeechStream, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };
2265
+ export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, type ElevenLabsSpeechToTextStream, type ElevenLabsTextToSpeechStream, ElevenLabsTranscriptionNode, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseItemAttachment, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SubAgentConfig, SubAgents, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, createElevenLabsSpeechToTextStream, createElevenLabsTextToSpeechStream, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };