@intelliweave/embedded 2.0.71 → 2.0.72-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,152 @@
1
1
  import React from 'react';
2
- import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3
2
  import { Optional } from 'utility-types';
3
+ import { Client } from '@modelcontextprotocol/sdk/client/index.js';
4
+
5
+ /**
6
+ * This class helps organize groups of tokenized text along with removing items when the window is full.
7
+ */
8
+ declare class TokenWindow {
9
+ /** Token window size */
10
+ size: number;
11
+ /** Token groups */
12
+ groups: TokenWindowGroup<any>[];
13
+ /** Create a new group */
14
+ createGroup(id: string): TokenWindowGroup<unknown>;
15
+ /** Get a group */
16
+ group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
17
+ /** Counts tokens in the specified text */
18
+ static countTokensInText(text: string): number;
19
+ /** Calculate current tokens in all groups */
20
+ countTokens(): number;
21
+ /** Remove overflow from all groups. */
22
+ removeOverflow(): void;
23
+ /** Remove one overflow item. Returns null if no items were able to be removed. */
24
+ private removeOneItem;
25
+ }
26
+ /** A token group. */
27
+ declare class TokenWindowGroup<DataType> {
28
+ /** Group ID */
29
+ id: string;
30
+ /** List of items */
31
+ items: TokenWindowGroupItem<DataType>[];
32
+ /**
33
+ * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
34
+ * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
35
+ * that group will be allowed to keep double the amount of items.
36
+ */
37
+ weight: number;
38
+ /** Current total token count, computed automatically. Don't update this value manually. */
39
+ tokenCount: number;
40
+ /** Group item separator. This text is added in between each item in the token window. */
41
+ separator: string;
42
+ /** Token count padding added to each item. */
43
+ private itemPadding;
44
+ /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
45
+ setItemPadding(padding: number): this;
46
+ /** Sort function */
47
+ private sortFunction;
48
+ /** Set sort function */
49
+ sortBy(sortFunction: (a: TokenWindowGroupItem<DataType>, b: TokenWindowGroupItem<DataType>) => number): this;
50
+ /** Set separator. This text is added in between each item in the token window. */
51
+ setSeparator(separator: string): this;
52
+ /**
53
+ * Set weight. Weight controls how many items from this group should be kept
54
+ * in relation to the entire window. For example if all groups have a weight
55
+ * of 1, each group remove items equally if full. If one has a weight of 2
56
+ * while the rest are 1, that group will be allowed to keep double the
57
+ * amount of items.
58
+ */
59
+ setWeight(weight: number): this;
60
+ /** Recalculate all tokens. Note this may take a while. */
61
+ recalculateTokens(): void;
62
+ /** Add an item to the group */
63
+ add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
64
+ /** Get all items as a string */
65
+ getAllAsString(): string;
66
+ /** Get all items. Doesn't return disabled items. */
67
+ getAll(): TokenWindowGroupItem<DataType>[];
68
+ /** Remove all items from this group */
69
+ empty(): void;
70
+ }
71
+ /** Token group item section types */
72
+ declare enum TokenWindowGroupItemSectionType {
73
+ /** Text items represent plain text. */
74
+ Text = "text",
75
+ /** Tool call items represent a tool call requested by the AI. */
76
+ ToolCall = "tool_call",
77
+ /** Tool result items represent the result of a tool call. */
78
+ ToolResult = "tool_result",
79
+ /** Thinking section */
80
+ Thinking = "thinking",
81
+ /** Other item types */
82
+ Other = "other"
83
+ }
84
+ /** Token group item */
85
+ interface TokenWindowGroupItem<DataType> {
86
+ /** Each item must have a unique ID. */
87
+ id: string;
88
+ /** True if this item should never be removed */
89
+ cannotRemove?: boolean;
90
+ /** Sorting order. If not specified, uses dateAdded instead. */
91
+ sortOrder: number;
92
+ /** Date this item was added */
93
+ dateAdded: number;
94
+ /** Token count in the content */
95
+ tokenCount: number;
96
+ /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */
97
+ data?: DataType;
98
+ /** If disabled, this item will not be included and will not add to the token count. */
99
+ disabled?: boolean;
100
+ /** Message source, ie was this message created by the user, or by the AI? */
101
+ source: 'user' | 'assistant';
102
+ /**
103
+ * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group.
104
+ * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array.
105
+ *
106
+ * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array.
107
+ */
108
+ text?: string;
109
+ /** Message sections */
110
+ sections?: TokenWindowGroupItemSection[];
111
+ /** If this message was generated by the AI, this contains the token usage for this message. */
112
+ usage?: {
113
+ /** Number of tokens consumed from the data passed to the AI */
114
+ inputTokens: number;
115
+ /** Number of input tokens that were used in token caching */
116
+ cachedInputTokens: number;
117
+ /** Number of tokens consumed by the AI generating output */
118
+ outputTokens: number;
119
+ /** Total token usage */
120
+ totalTokens: number;
121
+ };
122
+ /** True if this item is still being streamed */
123
+ streamingInProgress?: boolean;
124
+ }
125
+ /** A section of a message returned by the AI */
126
+ interface TokenWindowGroupItemSection {
127
+ /** Section type */
128
+ type: TokenWindowGroupItemSectionType;
129
+ /** Text content when this section represents text or thinking */
130
+ text?: string;
131
+ /** The raw tool name the AI requested to be called. */
132
+ toolName?: string;
133
+ /** The ID of the KB action this tool call maps to, if any */
134
+ toolKbID?: string;
135
+ /** The name of the KB action this tool call maps to, if any */
136
+ toolKbName?: string;
137
+ /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */
138
+ toolParameters?: any;
139
+ /** Successful response of the tool call. Will be null if toolErrorResponse is set. */
140
+ toolSuccessResponse?: any;
141
+ /** Error response of the tool call. Will be null if toolSuccessResponse is set. */
142
+ toolErrorResponse?: string;
143
+ /** Tool call ID. This can be used to match a tool call request with it's result. */
144
+ toolCallInstanceID?: string;
145
+ /** True if this tool call should be hidden in the UI */
146
+ toolCallHiddenInUI?: 'always' | 'after-complete';
147
+ }
148
+ /** Token window group item input, without the autogenerated fields */
149
+ type TokenWindowGroupItemParams<DataType> = Omit<Optional<TokenWindowGroupItem<DataType>, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>;
4
150
 
5
151
  /**
6
152
  * Speech output
@@ -12,10 +158,16 @@ import { Optional } from 'utility-types';
12
158
  declare class WebWeaverSpeechOutput extends EventTarget {
13
159
  /** Reference to the AI */
14
160
  private ai?;
161
+ /** Automatically speak output from the AI */
162
+ autoSpeak: boolean;
163
+ /** If enabled, connections will be pre-emptively opened to speed up text-to-speech response times, if possible */
164
+ preemptiveConnection: boolean;
15
165
  /** Constructor */
16
166
  constructor(ai: IntelliWeave);
17
- /** Called when the AI speaks */
18
- onTextOutputFromAI(e: CustomEvent): void;
167
+ /** Message IDs we've processed */
168
+ private processedMessages;
169
+ /** Called when the AI responds */
170
+ onOutputFromAI(e: CustomEvent): void;
19
171
  /** Current player vars */
20
172
  private currentPlayerVolume?;
21
173
  private currentPlayer?;
@@ -27,8 +179,15 @@ declare class WebWeaverSpeechOutput extends EventTarget {
27
179
  private maxVolumeHeard;
28
180
  /** Get current (realtime) audio output volume level, from 0 to 1 */
29
181
  get volumeLevel(): number;
182
+ /** Queued messages to speak next */
183
+ private _queuedText;
30
184
  /** Speak the text */
31
185
  speak(text: string): Promise<void>;
186
+ private _queueActive;
187
+ _runQueue(): Promise<void>;
188
+ /** ElevenLabs connection pre-cache */
189
+ private _elevenLabsPrecachedConnection?;
190
+ private _getElevenLabsConnection;
32
191
  private _speakWithLock;
33
192
  /** True if currently playing audio */
34
193
  get isSpeaking(): boolean;
@@ -171,7 +330,7 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
171
330
  static debugExportWav: boolean;
172
331
  /** Server address for transcription */
173
332
  apiAddress: string;
174
- /** OpenAI API key */
333
+ /** IntelliWeave API key */
175
334
  apiKey: string;
176
335
  /** WebSocket connection */
177
336
  private ws?;
@@ -191,6 +350,32 @@ declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
191
350
  onSocketClose(): void;
192
351
  }
193
352
 
353
+ /**
354
+ * This AudioNode uses ElevenLabs to transcribe spoken speech to text.
355
+ *
356
+ * - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text.
357
+ */
358
+ declare class ElevenLabsTranscriptionNode extends VoiceChunkOutputNode {
359
+ /** ElevenLabs API key */
360
+ apiKey: string;
361
+ /** ElevenLabs stream connection */
362
+ private connection?;
363
+ /** True if currently transcribing */
364
+ isTranscribing: boolean;
365
+ /** WebSocket shutdown timer */
366
+ private shutdownTimer?;
367
+ /** Constructor */
368
+ constructor(audioContext: AudioContext, apiKey: string);
369
+ /** Called when a voice chunk is received */
370
+ onVoiceChunk(buffer: Float32Array): Promise<void>;
371
+ /** Start reading the stream */
372
+ private startReading;
373
+ /** Called when the voice recording ends */
374
+ onVoiceEnd(buffers: Float32Array[]): Promise<void>;
375
+ /** Called when a transcription is ready */
376
+ onVoiceTranscription(text: string): void;
377
+ }
378
+
194
379
  /**
195
380
  * Handles speech recognition from the microphone
196
381
  *
@@ -216,7 +401,7 @@ declare class WebWeaverSpeechRecognition extends EventTarget {
216
401
  /** Returns true if speech recognition is supported by this persona and browser */
217
402
  get isSupported(): boolean;
218
403
  /** Currently active voice detection node */
219
- voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode;
404
+ voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode | ElevenLabsTranscriptionNode;
220
405
  /** Constructor */
221
406
  constructor(ai: IntelliWeave);
222
407
  private _skipEvents;
@@ -349,6 +534,17 @@ declare class MCPKnowledgeClient {
349
534
  searchToolName?: string;
350
535
  /** Keep search function available for the AI to use. */
351
536
  searchToolVisible?: boolean;
537
+ /** Use the IntelliWeave proxy */
538
+ proxy?: {
539
+ /** If true, will send requests via the IntelliWeave MCP proxy */
540
+ enabled?: boolean;
541
+ /** The URL of the proxy server, defaults to the standard IntelliWeave proxy */
542
+ url?: string;
543
+ /** IntelliWeave API key */
544
+ apiKey?: string;
545
+ };
546
+ /** Pass extra headers to the MCP server */
547
+ headers?: Record<string, string>;
352
548
  };
353
549
  /** Constructor */
354
550
  constructor(config: MCPKnowledgeClient['config']);
@@ -359,16 +555,10 @@ declare class MCPKnowledgeClient {
359
555
  method: string;
360
556
  params?: {
361
557
  [x: string]: unknown;
362
- task?: {
363
- [x: string]: unknown;
364
- ttl?: number | null | undefined;
365
- pollInterval?: number | undefined;
366
- } | undefined;
367
558
  _meta?: {
368
559
  [x: string]: unknown;
369
560
  progressToken?: string | number | undefined;
370
561
  "io.modelcontextprotocol/related-task"?: {
371
- [x: string]: unknown;
372
562
  taskId: string;
373
563
  } | undefined;
374
564
  } | undefined;
@@ -379,8 +569,8 @@ declare class MCPKnowledgeClient {
379
569
  [x: string]: unknown;
380
570
  _meta?: {
381
571
  [x: string]: unknown;
572
+ progressToken?: string | number | undefined;
382
573
  "io.modelcontextprotocol/related-task"?: {
383
- [x: string]: unknown;
384
574
  taskId: string;
385
575
  } | undefined;
386
576
  } | undefined;
@@ -389,8 +579,8 @@ declare class MCPKnowledgeClient {
389
579
  [x: string]: unknown;
390
580
  _meta?: {
391
581
  [x: string]: unknown;
582
+ progressToken?: string | number | undefined;
392
583
  "io.modelcontextprotocol/related-task"?: {
393
- [x: string]: unknown;
394
584
  taskId: string;
395
585
  } | undefined;
396
586
  } | undefined;
@@ -399,16 +589,10 @@ declare class MCPKnowledgeClient {
399
589
  method: string;
400
590
  params?: {
401
591
  [x: string]: unknown;
402
- task?: {
403
- [x: string]: unknown;
404
- ttl?: number | null | undefined;
405
- pollInterval?: number | undefined;
406
- } | undefined;
407
592
  _meta?: {
408
593
  [x: string]: unknown;
409
594
  progressToken?: string | number | undefined;
410
595
  "io.modelcontextprotocol/related-task"?: {
411
- [x: string]: unknown;
412
596
  taskId: string;
413
597
  } | undefined;
414
598
  } | undefined;
@@ -419,8 +603,8 @@ declare class MCPKnowledgeClient {
419
603
  [x: string]: unknown;
420
604
  _meta?: {
421
605
  [x: string]: unknown;
606
+ progressToken?: string | number | undefined;
422
607
  "io.modelcontextprotocol/related-task"?: {
423
- [x: string]: unknown;
424
608
  taskId: string;
425
609
  } | undefined;
426
610
  } | undefined;
@@ -429,8 +613,8 @@ declare class MCPKnowledgeClient {
429
613
  [x: string]: unknown;
430
614
  _meta?: {
431
615
  [x: string]: unknown;
616
+ progressToken?: string | number | undefined;
432
617
  "io.modelcontextprotocol/related-task"?: {
433
- [x: string]: unknown;
434
618
  taskId: string;
435
619
  } | undefined;
436
620
  } | undefined;
@@ -450,84 +634,6 @@ declare class MCPKnowledgeClient {
450
634
  private performToolCall;
451
635
  }
452
636
 
453
- /**
454
- * This class helps organize groups of tokenized text along with removing items when the window is full.
455
- */
456
- declare class TokenWindow {
457
- /** Token window size */
458
- size: number;
459
- /** Token groups */
460
- groups: TokenWindowGroup<any>[];
461
- /** Create a new group */
462
- createGroup(id: string): TokenWindowGroup<unknown>;
463
- /** Get a group */
464
- group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
465
- /** Calculate current tokens in all groups */
466
- countTokens(): number;
467
- /** Remove overflow from all groups. */
468
- removeOverflow(): void;
469
- /** Remove one overflow item. Returns null if no items were able to be removed. */
470
- private removeOneItem;
471
- }
472
- /** A token group. */
473
- declare class TokenWindowGroup<CustomDataType> {
474
- /** Group ID */
475
- id: string;
476
- /** List of items */
477
- items: TokenWindowGroupItem<CustomDataType>[];
478
- /**
479
- * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
480
- * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
481
- * that group will be allowed to keep double the amount of items.
482
- */
483
- weight: number;
484
- /** Current total token count, computed automatically. Don't update this value manually. */
485
- tokenCount: number;
486
- /** Group item separator */
487
- separator: string;
488
- /** Token count padding added to each item. */
489
- private itemPadding;
490
- /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
491
- setItemPadding(padding: number): this;
492
- /** Sort function */
493
- private sortFunction;
494
- /** Set sort function */
495
- sortBy(sortFunction: (a: TokenWindowGroupItem<CustomDataType>, b: TokenWindowGroupItem<CustomDataType>) => number): this;
496
- /** Set separator */
497
- setSeparator(separator: string): this;
498
- /** Set weight */
499
- setWeight(weight: number): this;
500
- /** Recalculate all tokens. Note this may take a while. */
501
- recalculateTokens(): void;
502
- /** Add an item to the group */
503
- add(item: string | Omit<Optional<TokenWindowGroupItem<CustomDataType>, 'id' | 'dateAdded' | 'sortOrder'>, 'tokenCount'>): TokenWindowGroupItem<CustomDataType>;
504
- /** Get all items as a string */
505
- getAllAsString(): string;
506
- /** Get all items. Doesn't return disabled items. */
507
- getAll(): TokenWindowGroupItem<CustomDataType>[];
508
- /** Remove all items from this group */
509
- empty(): void;
510
- }
511
- /** Token group item */
512
- interface TokenWindowGroupItem<CustomDataType> {
513
- /** Each item must have a unique ID. */
514
- id: string;
515
- /** The string content of the item */
516
- content: string;
517
- /** True if this item should never be removed */
518
- cannotRemove?: boolean;
519
- /** Sorting order. If not specified, uses dateAdded instead. */
520
- sortOrder: number;
521
- /** Date this item was added */
522
- dateAdded: number;
523
- /** Token count in the content */
524
- tokenCount: number;
525
- /** Anything to attach to this item */
526
- customData?: CustomDataType;
527
- /** If disabled, this item will not be included and will not add tot he token count. */
528
- disabled?: boolean;
529
- }
530
-
531
637
  // ==================================================================================================
532
638
  // JSON Schema Draft 07
533
639
  // ==================================================================================================
@@ -715,8 +821,8 @@ interface ChatBaseConfig {
715
821
  maxTokens: number;
716
822
  /** Callback before the AI sends info to the LLM */
717
823
  onBeforeMessageProcessing?: () => void;
718
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
719
- onAIMessage?: (text: string, isChunk: boolean) => void;
824
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
825
+ onAIMessage?: (output: TokenWindowGroupItemParams<any>[], isPartial: boolean) => void;
720
826
  /** Callback when the AI starts performing an action */
721
827
  onAIToolStart?: (toolName: string, input: any) => void;
722
828
  }
@@ -730,19 +836,17 @@ interface ChatBaseToolConfig {
730
836
  params: JSONSchema7;
731
837
  /** Callback function to process the tool */
732
838
  callback: (params: any) => any;
733
- /** If true, this tool call will be removed from the message history after it is executed. */
734
- removeFromMessageHistory?: boolean;
735
839
  /** If true, this item can be removed if there's not enough context available. */
736
840
  canRemove?: boolean;
737
- /** Misc app context */
738
- [key: string]: any;
841
+ /** Knowledge base item this tool use represents */
842
+ kbItem?: KnowledgeBaseItem;
739
843
  }
740
844
  /**
741
845
  * API for interacting with chat APIs.
742
846
  */
743
847
  declare class ChatBase<
744
848
  /** Format for messages in the token window */
745
- MessageFormat = any,
849
+ DataType = any,
746
850
  /** Optional extended config */
747
851
  ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
748
852
  /** ID */
@@ -761,27 +865,95 @@ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
761
865
  /** Token window management */
762
866
  tokenWindow: TokenWindow;
763
867
  /** Token window group used for the context message */
764
- get contextGroup(): TokenWindowGroup<any>;
868
+ get contextGroup(): TokenWindowGroup<string>;
765
869
  /** Token window group used for tools / actions */
766
870
  get toolGroup(): TokenWindowGroup<ChatBaseToolConfig>;
767
871
  /** Token window group used for messages */
768
- get messageGroup(): TokenWindowGroup<MessageFormat>;
872
+ get messageGroup(): TokenWindowGroup<DataType>;
873
+ /** Get the API base after stripping out exact endpoints, or undefined for the default */
874
+ getBaseURL(): string | undefined;
769
875
  /** Constructor */
770
876
  constructor(config: ConfigFormat);
771
- /** Send a message, and get the response */
772
- sendMessage(message: string): Promise<string>;
877
+ /** Send a message, and get the response as a string. */
878
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
773
879
  /** Add a user message to the message history */
774
880
  addUserMessage(message: string): void;
775
881
  /** Add an assistant message to the message history */
776
882
  addAssistantMessage(message: string): void;
883
+ /** Helper to add a plain text item */
884
+ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void;
777
885
  /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
778
- onBeforeIncomingMessage(message: MessageFormat): void;
886
+ onBeforeIncomingMessage(message: DataType): void;
779
887
  /** Reset the conversation */
780
888
  resetConversation(): void;
781
889
  /** Trim message list */
782
890
  trimMessages(): Promise<void>;
783
891
  /** Register a tool. */
784
892
  registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem<ChatBaseToolConfig>;
893
+ /** Find a tool based on the AI-safe name */
894
+ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined;
895
+ /** Execute the specified tool. Throws an error if the tool is undefined. */
896
+ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise<string>;
897
+ }
898
+
899
+ /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */
900
+ declare class IntelliWeaveMessageParser {
901
+ /** New messages produced after sendMessage() was called */
902
+ messages: TokenWindowGroupItemParams<unknown>[];
903
+ /** Constructor */
904
+ constructor(items: TokenWindowGroupItemParams<unknown>[]);
905
+ /** Plain text output from the AI */
906
+ text(): string;
907
+ /** Total token usage */
908
+ tokenUsage(): {
909
+ cachedInputTokens: number;
910
+ inputTokens: number;
911
+ outputTokens: number;
912
+ totalTokens: number;
913
+ };
914
+ /** Component sections for display */
915
+ sections(): TokenWindowGroupItemParams<unknown>['sections'];
916
+ /** List all tool calls that took place */
917
+ toolCalls(): TokenWindowGroupItemSection[];
918
+ /** Find the response for a tool call */
919
+ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null;
920
+ }
921
+
922
+ /** Handles subagents. This allows your Persona to use other Personas as tools. */
923
+ declare class SubAgents {
924
+ /** Reference to the main IntelliWeave instance */
925
+ ai: IntelliWeave;
926
+ /** Constructor */
927
+ constructor(ai: IntelliWeave);
928
+ /** Subagents */
929
+ subagents: SubAgentConfig[];
930
+ /** Cached subagents */
931
+ cachedSubagents: Record<string, IntelliWeave>;
932
+ /** Register a sub-agent */
933
+ register(config: SubAgentConfig): void;
934
+ /** Unregister subagent */
935
+ remove(id: string): void;
936
+ /** Run the subagent */
937
+ runQuery(config: SubAgentConfig, query: string): Promise<string>;
938
+ }
939
+ /** Sub-agent config */
940
+ interface SubAgentConfig {
941
+ /** ID of the sub-agent */
942
+ id: string;
943
+ /** API key for the persona. If not specified, uses the same api key as the main agent. */
944
+ apiKey?: string;
945
+ /** Name of the sub-agent */
946
+ name?: string;
947
+ /** Instructions for the main agent to use this sub agent */
948
+ usageInstructions?: string;
949
+ /** If true, will remove all Persona knowledge entries */
950
+ clearExistingKnowledge?: boolean;
951
+ /** Extra knowledge base sources for the sub-agent */
952
+ knowledge?: KnowledgeFetcher;
953
+ /** Optional extra configuration for the subagent instance */
954
+ config?: Partial<WebWeaverGPTConfig>;
955
+ /** Called when the subagent is loaded */
956
+ onAgentLoaded?: (agent: IntelliWeave) => Promise<void> | void;
785
957
  }
786
958
 
787
959
  /** Built-in action flags for the persona */
@@ -821,6 +993,8 @@ interface WebWeaverGPTConfig {
821
993
  textColor?: string;
822
994
  /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */
823
995
  displayMode?: 'closed' | 'open';
996
+ /** Layout preset: 'widget' (default) or 'fullscreen' */
997
+ layout?: 'widget' | 'fullscreen';
824
998
  /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */
825
999
  positioningMode?: 'fixed' | 'container';
826
1000
  /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */
@@ -831,6 +1005,8 @@ interface WebWeaverGPTConfig {
831
1005
  offsetX?: number;
832
1006
  /** Vertical offset from edge in pixels (default: 20) - only used when positioningMode is 'fixed' */
833
1007
  offsetY?: number;
1008
+ /** Identifier of an external app or service which manages this persona, if any. (eg. "chatterly") */
1009
+ managedBy?: string;
834
1010
  /** Voice information */
835
1011
  voice?: {
836
1012
  /** Provider ID */
@@ -855,6 +1031,10 @@ interface WebWeaverGPTConfig {
855
1031
  mcpServers?: MCPKnowledgeClient['config'][];
856
1032
  /** Built-in action flags that are currently enabled */
857
1033
  flags?: BuiltInActionFlags;
1034
+ /** Allow custom chat provider */
1035
+ onCreateProvider?: (config: ChatBaseConfig) => ChatBase;
1036
+ /** Subagents */
1037
+ subagents?: SubAgentConfig[];
858
1038
  }
859
1039
  /**
860
1040
  * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave
@@ -865,7 +1045,7 @@ interface WebWeaverGPTConfig {
865
1045
  * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object.
866
1046
  * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object.
867
1047
  * - event `input` - Fired when the user sends a message to the AI.
868
- * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isChunk` is true, the message is incomplete and will be followed by more events.
1048
+ * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events.
869
1049
  * - event `toolstart` - Fired when the AI starts performing an action.
870
1050
  * - event `tool` - Fired when the AI finishes performing an action.
871
1051
  */
@@ -874,14 +1054,16 @@ declare class IntelliWeave extends EventTarget {
874
1054
  static version: string;
875
1055
  /** Built-in actions version - increment this when adding new actions */
876
1056
  static builtInActionsVersion: string;
877
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
878
- onAIMessage?: (text: string, isChunk: boolean) => void;
1057
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
1058
+ onAIMessage?: (messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean) => void;
879
1059
  /** Callback when the AI starts performing an action */
880
1060
  onAIToolStart?: ChatBaseConfig['onAIToolStart'];
881
1061
  /** Current conversation ID */
882
1062
  conversationID: string;
883
1063
  /** Knowledge database interface */
884
1064
  knowledgeBase: KnowledgeBase;
1065
+ /** Subagent interface */
1066
+ subAgents: SubAgents;
885
1067
  /** Last KB search that was performed */
886
1068
  private _lastKBsearch;
887
1069
  /** If set, the next time a request is made this is the KB result items that will be used, once-off. */
@@ -925,7 +1107,7 @@ declare class IntelliWeave extends EventTarget {
925
1107
  /** URL of the IntelliWeave Hub API */
926
1108
  hubAPI: string;
927
1109
  /** Set model and load data from an API key */
928
- load(apiKey: string): Promise<WebWeaverGPTConfig>;
1110
+ load(apiKey: string, config?: Partial<WebWeaverGPTConfig>): Promise<IntelliWeave>;
929
1111
  /** Set the current model */
930
1112
  setModel(id: string): void;
931
1113
  private _lastSystemMsg;
@@ -933,18 +1115,19 @@ declare class IntelliWeave extends EventTarget {
933
1115
  getContextPrefix(): Promise<string>;
934
1116
  /** Get system message to send to the AI */
935
1117
  onBeforeMessageProcessing(): Promise<void>;
936
- /** @private Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
937
- processIncomingMessage(message: string, isChunk?: boolean): void;
1118
+ /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
1119
+ processIncomingMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial?: boolean): void;
938
1120
  /** True if currently processing a message */
939
1121
  isProcessing: boolean;
940
- /** @private Last tracked token count for calculating per-message token usage */
941
- private _lastTrackedTokens;
942
1122
  /** Send a message, and get the response */
943
- sendMessage(message: string): Promise<string | null>;
1123
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<unknown>[]) => void): Promise<IntelliWeaveMessageParser>;
944
1124
  /** @private Called when the AI wants to run a KB action */
945
1125
  toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
946
- /** Submit an analytics event asynchronously */
1126
+ /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */
1127
+ private activeAnalyticsPromises;
947
1128
  submitAnalyticsEvent(data: any): void;
1129
+ /** Wait for all analytics events to finish */
1130
+ waitForAnalytics(): Promise<void>;
948
1131
  /** Reset the conversation */
949
1132
  resetConversation(): void;
950
1133
  /** Insert a message as if the assistant has written it */
@@ -953,12 +1136,14 @@ declare class IntelliWeave extends EventTarget {
953
1136
  exportState(): {
954
1137
  type: string;
955
1138
  conversationID: string;
956
- messages: any[] | undefined;
1139
+ messages: TokenWindowGroupItem<any>[] | undefined;
957
1140
  };
958
1141
  /** Import conversation state from JSON */
959
1142
  importState(state: any): void;
960
1143
  /** Clone this instance without any message history */
961
1144
  clone(): IntelliWeave;
1145
+ /** Get all messages in the conversation history */
1146
+ get messages(): TokenWindowGroupItem<any>[];
962
1147
  }
963
1148
 
964
1149
  /**
@@ -975,8 +1160,16 @@ declare class KnowledgeBase {
975
1160
  lastResults: KnowledgeBaseItem[];
976
1161
  /** Individual knowledge base entries added manually by the application */
977
1162
  manualEntries: KnowledgeBaseItem[];
1163
+ /** If true, allows using globally defined sources from the browser window events */
1164
+ allowWindowSources: boolean;
1165
+ /** If true, allows using knowledge specified in the global configuration object */
1166
+ allowGlobalConfigSources: boolean;
978
1167
  /** Constructor */
979
1168
  constructor(ai: IntelliWeave);
1169
+ /** Ensures the internal knowledge is set correctly */
1170
+ ensureInternalKnowledge(): void;
1171
+ /** Clears all knowledge back to the default */
1172
+ reset(): void;
980
1173
  /**
981
1174
  * Register a new knowledge base source. You can pass either just a query function, or an ID and a query function.
982
1175
  *
@@ -1040,12 +1233,12 @@ interface KnowledgeBaseItem {
1040
1233
  name: string;
1041
1234
  /** Item tags. Helps with search optimization. */
1042
1235
  tags?: string;
1043
- /** Item content. Can be a function to return a dynamic string. */
1044
- content: string | (() => string);
1236
+ /** Item content */
1237
+ content: string;
1045
1238
  /** If true, this item will always be returned from all search results. */
1046
1239
  isContext?: boolean;
1047
1240
  /** If true, this item will not be visible to the AI. */
1048
- disabled?: boolean;
1241
+ disabled?: boolean | ((ai: IntelliWeave) => boolean);
1049
1242
  /** List of parameters for an action function. Can either use IW's format, or a JSON Schema object. */
1050
1243
  parameters?: KnowledgeBaseActionParameterSchema;
1051
1244
  /**
@@ -1054,8 +1247,27 @@ interface KnowledgeBaseItem {
1054
1247
  * that was performed. If an error is thrown, the AI will respond appropriately to the user.
1055
1248
  */
1056
1249
  action?: (input: any, ai: IntelliWeave) => (any | Promise<any>);
1057
- /** If true, this item will be removed from the AI's message history after it gets called. This is a special case for LLMs that struggle with follow-up function calls and need to use the KB search functino first. */
1058
- removeFromMessageHistory?: boolean;
1250
+ /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */
1251
+ hideActionInUI?: 'always' | 'after-complete';
1252
+ /** Attachments such as images, etc */
1253
+ attachments?: KnowledgeBaseItemAttachment[];
1254
+ }
1255
+ /** Knowledge base item attachment, such as an image, file, etc. */
1256
+ interface KnowledgeBaseItemAttachment {
1257
+ /** UUID */
1258
+ uuid: string;
1259
+ /** Attachment mime type */
1260
+ mimeType: string;
1261
+ /** File name */
1262
+ name: string;
1263
+ /** Full URL to access the file. This is required for the AI to be able to see the attachment. */
1264
+ url?: string;
1265
+ /** UNIX timestamp (milliseconds since epoch) when the file was added */
1266
+ dateAdded?: number;
1267
+ /** Internal path to where the file is stored */
1268
+ path?: string;
1269
+ /** File size */
1270
+ size?: number;
1059
1271
  }
1060
1272
  /** Parameter definition used by IntelliWeave */
1061
1273
  interface IntelliWeaveParameterDefinition {
@@ -1105,6 +1317,10 @@ declare const WebWeaverUI: (props: {
1105
1317
  hubAPI?: string;
1106
1318
  /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */
1107
1319
  displayMode?: "closed" | "open";
1320
+ /** Layout preset: 'widget' (default) or 'fullscreen' */
1321
+ layout?: "widget" | "fullscreen";
1322
+ /** Fullscreen header: 'show' (default) or 'hidden' */
1323
+ header?: "show" | "hidden";
1108
1324
  /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */
1109
1325
  positioningMode?: "fixed" | "container";
1110
1326
  /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */