@intelliweave/embedded 2.0.72-beta.9 → 2.1.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -65,6 +65,8 @@ declare class TokenWindowGroup<DataType> {
65
65
  recalculateTokens(): void;
66
66
  /** Add an item to the group */
67
67
  add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
68
+ /** Manually remove an item */
69
+ remove(itemId: string): boolean;
68
70
  /** Get all items as a string */
69
71
  getAllAsString(): string;
70
72
  /** Get all items. Doesn't return disabled items. */
@@ -466,6 +468,8 @@ declare class KnowledgeBase {
466
468
  allowWindowSources: boolean;
467
469
  /** If true, allows using knowledge specified in the global configuration object */
468
470
  allowGlobalConfigSources: boolean;
471
+ /** If true, allows the AI to search the knowledge base. If false, essentially disables RAG lookup. */
472
+ allowRagSearch: boolean;
469
473
  /** Constructor */
470
474
  constructor(ai: IntelliWeave);
471
475
  /** Ensures the internal knowledge is set correctly */
@@ -494,7 +498,7 @@ declare class KnowledgeBase {
494
498
  /** Create and register an external knowledge base source from a URL */
495
499
  registerSourceFromURL(url: string, id?: string): void;
496
500
  /** Clone this instance */
497
- clone(): KnowledgeBase;
501
+ clone(newIW: IntelliWeave): KnowledgeBase;
498
502
  /** Registers an MCP server as a knowledge base source */
499
503
  registerMCPSource(config: MCPKnowledgeClient['config']): MCPKnowledgeClient;
500
504
  }
@@ -824,6 +828,8 @@ interface SubAgentConfig {
824
828
  usageInstructions?: string;
825
829
  /** If true, will remove all Persona knowledge entries */
826
830
  clearExistingKnowledge?: boolean;
831
+ /** Disable RAG search for subagents. If true, only KB entries with isContext=true will be used. */
832
+ disableRagSearch?: boolean;
827
833
  /** Extra knowledge base sources for the sub-agent */
828
834
  knowledge?: KnowledgeFetcher;
829
835
  /** Optional extra configuration for the subagent instance */
@@ -989,6 +995,8 @@ declare class IntelliWeave extends EventTarget {
989
995
  private _lastSystemMsg;
990
996
  /** Get the system message prefix, before the KB entries are added */
991
997
  getContextPrefix(): Promise<string>;
998
+ /** KB items added in the last run */
999
+ private lastKBItems;
992
1000
  /** Get system message to send to the AI */
993
1001
  onBeforeMessageProcessing(): Promise<void>;
994
1002
  /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
@@ -1204,7 +1212,7 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
1204
1212
  static debugExportWav: boolean;
1205
1213
  /** Server address for transcription */
1206
1214
  apiAddress: string;
1207
- /** OpenAI API key */
1215
+ /** IntelliWeave API key */
1208
1216
  apiKey: string;
1209
1217
  /** WebSocket connection */
1210
1218
  private ws?;
@@ -1224,6 +1232,32 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
1224
1232
  onSocketClose(): void;
1225
1233
  }
1226
1234
 
1235
+ /**
1236
+ * This AudioNode uses ElevenLabs to transcribe spoken speech to text.
1237
+ *
1238
+ * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text.
1239
+ */
1240
+ declare class ElevenLabsTranscriptionNode extends VoiceChunkOutputNode {
1241
+ /** ElevenLabs API key */
1242
+ apiKey: string;
1243
+ /** ElevenLabs stream connection */
1244
+ private connection?;
1245
+ /** True if currently transcribing */
1246
+ isTranscribing: boolean;
1247
+ /** WebSocket shutdown timer */
1248
+ private shutdownTimer?;
1249
+ /** Constructor */
1250
+ constructor(audioContext: AudioContext, apiKey: string);
1251
+ /** Called when a voice chunk is received */
1252
+ onVoiceChunk(buffer: Float32Array): Promise<void>;
1253
+ /** Start reading the stream */
1254
+ private startReading;
1255
+ /** Called when the voice recording ends */
1256
+ onVoiceEnd(buffers: Float32Array[]): Promise<void>;
1257
+ /** Called when a transcription is ready */
1258
+ onVoiceTranscription(text: string): void;
1259
+ }
1260
+
1227
1261
  /**
1228
1262
  * Handles speech recognition from the microphone
1229
1263
  *
@@ -1249,7 +1283,7 @@ declare class WebWeaverSpeechRecognition extends EventTarget {
1249
1283
  /** Returns true if speech recognition is supported by this persona and browser */
1250
1284
  get isSupported(): boolean;
1251
1285
  /** Currently active voice detection node */
1252
- voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode;
1286
+ voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode | ElevenLabsTranscriptionNode;
1253
1287
  /** Constructor */
1254
1288
  constructor(ai: IntelliWeave);
1255
1289
  private _skipEvents;
@@ -1344,6 +1378,29 @@ interface ElevenLabsTextToSpeechStream {
1344
1378
  }
1345
1379
  /** This creates a two-way connection to ElevenLabs that allows for realtime text upload and realtime speech download. */
1346
1380
  declare function createElevenLabsTextToSpeechStream(apiKey: string, voiceID: string, format?: string): ElevenLabsTextToSpeechStream;
1381
+ /** Return value of createElevenLabsSpeechToTextStream() */
1382
+ interface ElevenLabsSpeechToTextStream {
1383
+ /** Audio data output stream */
1384
+ stream: ReadableStream<string>;
1385
+ /** Function to send audio to be processed */
1386
+ sendAudio: (audio: ArrayBuffer) => void;
1387
+ /** Function to signal speech has stopped. Only necessary if commitStrategy is 'manual' */
1388
+ commit: () => void;
1389
+ /** Function to close the connection */
1390
+ close: () => void;
1391
+ /** True if this connection is closed */
1392
+ isClosed?: boolean;
1393
+ }
1394
+ /** This creates a two-way connection to ElevenLabs that allows for realtime speech upload and text download. */
1395
+ declare function createElevenLabsSpeechToTextStream(
1396
+ /** ElevenLabs API key */
1397
+ apiKey: string,
1398
+ /** Format the audio data you're passing is in */
1399
+ inputAudioFormat?: string,
1400
+ /** Input audio sample rate */
1401
+ sampleRate?: number,
1402
+ /** Should ElevenLabs detect speech end, or will you call commit() manually */
1403
+ commitStrategy?: 'manual' | 'vad'): ElevenLabsSpeechToTextStream;
1347
1404
 
1348
1405
  /**
1349
1406
  * This is a utility class for dealing with the ONNX runtime and model loading.
@@ -2205,4 +2262,4 @@ declare function useIntelliWeave(): IntelliWeave | undefined;
2205
2262
  /** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
2206
2263
  declare function useIntelliWeaveKnowledge(query: KnowledgeBaseSource['query'], dependencies?: any[]): void;
2207
2264
 
2208
- export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, type ElevenLabsTextToSpeechStream, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseItemAttachment, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SubAgentConfig, SubAgents, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, createElevenLabsTextToSpeechStream, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };
2265
+ export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, type ElevenLabsSpeechToTextStream, type ElevenLabsTextToSpeechStream, ElevenLabsTranscriptionNode, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseItemAttachment, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SubAgentConfig, SubAgents, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, createElevenLabsSpeechToTextStream, createElevenLabsTextToSpeechStream, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };