@intelliweave/embedded 2.0.71 → 2.0.72-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,149 @@
1
- import { Client } from '@modelcontextprotocol/sdk/client/index.js';
2
1
  import { Optional } from 'utility-types';
2
+ import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3
+
4
+ /**
5
+ * This class helps organize groups of tokenized text along with removing items when the window is full.
6
+ */
7
+ declare class TokenWindow {
8
+ /** Token window size */
9
+ size: number;
10
+ /** Token groups */
11
+ groups: TokenWindowGroup<any>[];
12
+ /** Create a new group */
13
+ createGroup(id: string): TokenWindowGroup<unknown>;
14
+ /** Get a group */
15
+ group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
16
+ /** Counts tokens in the specified text */
17
+ static countTokensInText(text: string): number;
18
+ /** Calculate current tokens in all groups */
19
+ countTokens(): number;
20
+ /** Remove overflow from all groups. */
21
+ removeOverflow(): void;
22
+ /** Remove one overflow item. Returns null if no items were able to be removed. */
23
+ private removeOneItem;
24
+ }
25
+ /** A token group. */
26
+ declare class TokenWindowGroup<DataType> {
27
+ /** Group ID */
28
+ id: string;
29
+ /** List of items */
30
+ items: TokenWindowGroupItem<DataType>[];
31
+ /**
32
+ * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
33
+ * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
34
+ * that group will be allowed to keep double the amount of items.
35
+ */
36
+ weight: number;
37
+ /** Current total token count, computed automatically. Don't update this value manually. */
38
+ tokenCount: number;
39
+ /** Group item separator. This text is added in between each item in the token window. */
40
+ separator: string;
41
+ /** Token count padding added to each item. */
42
+ private itemPadding;
43
+ /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
44
+ setItemPadding(padding: number): this;
45
+ /** Sort function */
46
+ private sortFunction;
47
+ /** Set sort function */
48
+ sortBy(sortFunction: (a: TokenWindowGroupItem<DataType>, b: TokenWindowGroupItem<DataType>) => number): this;
49
+ /** Set separator. This text is added in between each item in the token window. */
50
+ setSeparator(separator: string): this;
51
+ /**
52
+ * Set weight. Weight controls how many items from this group should be kept
53
+ * in relation to the entire window. For example if all groups have a weight
54
+ * of 1, each group remove items equally if full. If one has a weight of 2
55
+ * while the rest are 1, that group will be allowed to keep double the
56
+ * amount of items.
57
+ */
58
+ setWeight(weight: number): this;
59
+ /** Recalculate all tokens. Note this may take a while. */
60
+ recalculateTokens(): void;
61
+ /** Add an item to the group */
62
+ add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
63
+ /** Get all items as a string */
64
+ getAllAsString(): string;
65
+ /** Get all items. Doesn't return disabled items. */
66
+ getAll(): TokenWindowGroupItem<DataType>[];
67
+ /** Remove all items from this group */
68
+ empty(): void;
69
+ }
70
+ /** Token group item section types */
71
+ declare enum TokenWindowGroupItemSectionType {
72
+ /** Text items represent plain text. */
73
+ Text = "text",
74
+ /** Tool call items represent a tool call requested by the AI. */
75
+ ToolCall = "tool_call",
76
+ /** Tool result items represent the result of a tool call. */
77
+ ToolResult = "tool_result",
78
+ /** Thinking section */
79
+ Thinking = "thinking",
80
+ /** Other item types */
81
+ Other = "other"
82
+ }
83
+ /** Token group item */
84
+ interface TokenWindowGroupItem<DataType> {
85
+ /** Each item must have a unique ID. */
86
+ id: string;
87
+ /** True if this item should never be removed */
88
+ cannotRemove?: boolean;
89
+ /** Sorting order. If not specified, uses dateAdded instead. */
90
+ sortOrder: number;
91
+ /** Date this item was added */
92
+ dateAdded: number;
93
+ /** Token count in the content */
94
+ tokenCount: number;
95
+ /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */
96
+ data?: DataType;
97
+ /** If disabled, this item will not be included and will not add to the token count. */
98
+ disabled?: boolean;
99
+ /** Message source, ie was this message created by the user, or by the AI? */
100
+ source: 'user' | 'assistant';
101
+ /**
102
+ * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group.
103
+ * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array.
104
+ *
105
+ * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array.
106
+ */
107
+ text?: string;
108
+ /** Message sections */
109
+ sections?: TokenWindowGroupItemSection[];
110
+ /** If this message was generated by the AI, this contains the token usage for this message. */
111
+ usage?: {
112
+ /** Number of tokens consumed from the data passed to the AI */
113
+ inputTokens: number;
114
+ /** Number of input tokens that were used in token caching */
115
+ cachedInputTokens: number;
116
+ /** Number of tokens consumed by the AI generating output */
117
+ outputTokens: number;
118
+ /** Total token usage */
119
+ totalTokens: number;
120
+ };
121
+ }
122
+ /** A section of a message returned by the AI */
123
+ interface TokenWindowGroupItemSection {
124
+ /** Section type */
125
+ type: TokenWindowGroupItemSectionType;
126
+ /** Text content when this section represents text or thinking */
127
+ text?: string;
128
+ /** The raw tool name the AI requested to be called. */
129
+ toolName?: string;
130
+ /** The ID of the KB action this tool call maps to, if any */
131
+ toolKbID?: string;
132
+ /** The name of the KB action this tool call maps to, if any */
133
+ toolKbName?: string;
134
+ /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */
135
+ toolParameters?: any;
136
+ /** Successful response of the tool call. Will be null if toolErrorResponse is set. */
137
+ toolSuccessResponse?: any;
138
+ /** Error response of the tool call. Will be null if toolSuccessResponse is set. */
139
+ toolErrorResponse?: string;
140
+ /** Tool call ID. This can be used to match a tool call request with it's result. */
141
+ toolCallInstanceID?: string;
142
+ /** True if this tool call should be hidden in the UI */
143
+ toolCallHiddenInUI?: 'always' | 'after-complete';
144
+ }
145
+ /** Token window group item input, without the autogenerated fields */
146
+ type TokenWindowGroupItemParams<DataType> = Omit<Optional<TokenWindowGroupItem<DataType>, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>;
3
147
 
4
148
  /**
5
149
  * Allows an MCP server to be used as a knowledge source for IntelliWeave.
@@ -377,12 +521,12 @@ interface KnowledgeBaseItem {
377
521
  name: string;
378
522
  /** Item tags. Helps with search optimization. */
379
523
  tags?: string;
380
- /** Item content. Can be a function to return a dynamic string. */
381
- content: string | (() => string);
524
+ /** Item content */
525
+ content: string;
382
526
  /** If true, this item will always be returned from all search results. */
383
527
  isContext?: boolean;
384
528
  /** If true, this item will not be visible to the AI. */
385
- disabled?: boolean;
529
+ disabled?: boolean | ((ai: IntelliWeave) => boolean);
386
530
  /** List of parameters for an action function. Can either use IW's format, or a JSON Schema object. */
387
531
  parameters?: KnowledgeBaseActionParameterSchema;
388
532
  /**
@@ -391,8 +535,8 @@ interface KnowledgeBaseItem {
391
535
  * that was performed. If an error is thrown, the AI will respond appropriately to the user.
392
536
  */
393
537
  action?: (input: any, ai: IntelliWeave) => (any | Promise<any>);
394
- /** If true, this item will be removed from the AI's message history after it gets called. This is a special case for LLMs that struggle with follow-up function calls and need to use the KB search functino first. */
395
- removeFromMessageHistory?: boolean;
538
+ /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */
539
+ hideActionInUI?: 'always' | 'after-complete';
396
540
  }
397
541
  /** Parameter definition used by IntelliWeave */
398
542
  interface IntelliWeaveParameterDefinition {
@@ -721,84 +865,6 @@ interface IntelliWeaveInstructConfig {
721
865
  callback?: (txt: string) => void;
722
866
  }
723
867
 
724
- /**
725
- * This class helps organize groups of tokenized text along with removing items when the window is full.
726
- */
727
- declare class TokenWindow {
728
- /** Token window size */
729
- size: number;
730
- /** Token groups */
731
- groups: TokenWindowGroup<any>[];
732
- /** Create a new group */
733
- createGroup(id: string): TokenWindowGroup<unknown>;
734
- /** Get a group */
735
- group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
736
- /** Calculate current tokens in all groups */
737
- countTokens(): number;
738
- /** Remove overflow from all groups. */
739
- removeOverflow(): void;
740
- /** Remove one overflow item. Returns null if no items were able to be removed. */
741
- private removeOneItem;
742
- }
743
- /** A token group. */
744
- declare class TokenWindowGroup<CustomDataType> {
745
- /** Group ID */
746
- id: string;
747
- /** List of items */
748
- items: TokenWindowGroupItem<CustomDataType>[];
749
- /**
750
- * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
751
- * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
752
- * that group will be allowed to keep double the amount of items.
753
- */
754
- weight: number;
755
- /** Current total token count, computed automatically. Don't update this value manually. */
756
- tokenCount: number;
757
- /** Group item separator */
758
- separator: string;
759
- /** Token count padding added to each item. */
760
- private itemPadding;
761
- /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
762
- setItemPadding(padding: number): this;
763
- /** Sort function */
764
- private sortFunction;
765
- /** Set sort function */
766
- sortBy(sortFunction: (a: TokenWindowGroupItem<CustomDataType>, b: TokenWindowGroupItem<CustomDataType>) => number): this;
767
- /** Set separator */
768
- setSeparator(separator: string): this;
769
- /** Set weight */
770
- setWeight(weight: number): this;
771
- /** Recalculate all tokens. Note this may take a while. */
772
- recalculateTokens(): void;
773
- /** Add an item to the group */
774
- add(item: string | Omit<Optional<TokenWindowGroupItem<CustomDataType>, 'id' | 'dateAdded' | 'sortOrder'>, 'tokenCount'>): TokenWindowGroupItem<CustomDataType>;
775
- /** Get all items as a string */
776
- getAllAsString(): string;
777
- /** Get all items. Doesn't return disabled items. */
778
- getAll(): TokenWindowGroupItem<CustomDataType>[];
779
- /** Remove all items from this group */
780
- empty(): void;
781
- }
782
- /** Token group item */
783
- interface TokenWindowGroupItem<CustomDataType> {
784
- /** Each item must have a unique ID. */
785
- id: string;
786
- /** The string content of the item */
787
- content: string;
788
- /** True if this item should never be removed */
789
- cannotRemove?: boolean;
790
- /** Sorting order. If not specified, uses dateAdded instead. */
791
- sortOrder: number;
792
- /** Date this item was added */
793
- dateAdded: number;
794
- /** Token count in the content */
795
- tokenCount: number;
796
- /** Anything to attach to this item */
797
- customData?: CustomDataType;
798
- /** If disabled, this item will not be included and will not add tot he token count. */
799
- disabled?: boolean;
800
- }
801
-
802
868
  /** Chat config options */
803
869
  interface ChatBaseConfig {
804
870
  /** API key */
@@ -819,8 +885,8 @@ interface ChatBaseConfig {
819
885
  maxTokens: number;
820
886
  /** Callback before the AI sends info to the LLM */
821
887
  onBeforeMessageProcessing?: () => void;
822
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
823
- onAIMessage?: (text: string, isChunk: boolean) => void;
888
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
889
+ onAIMessage?: (output: TokenWindowGroupItemParams<any>[], isPartial: boolean) => void;
824
890
  /** Callback when the AI starts performing an action */
825
891
  onAIToolStart?: (toolName: string, input: any) => void;
826
892
  }
@@ -834,19 +900,17 @@ interface ChatBaseToolConfig {
834
900
  params: JSONSchema7;
835
901
  /** Callback function to process the tool */
836
902
  callback: (params: any) => any;
837
- /** If true, this tool call will be removed from the message history after it is executed. */
838
- removeFromMessageHistory?: boolean;
839
903
  /** If true, this item can be removed if there's not enough context available. */
840
904
  canRemove?: boolean;
841
- /** Misc app context */
842
- [key: string]: any;
905
+ /** Knowledge base item this tool use represents */
906
+ kbItem?: KnowledgeBaseItem;
843
907
  }
844
908
  /**
845
909
  * API for interacting with chat APIs.
846
910
  */
847
911
  declare class ChatBase<
848
912
  /** Format for messages in the token window */
849
- MessageFormat = any,
913
+ DataType = any,
850
914
  /** Optional extended config */
851
915
  ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
852
916
  /** ID */
@@ -865,27 +929,58 @@ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
865
929
  /** Token window management */
866
930
  tokenWindow: TokenWindow;
867
931
  /** Token window group used for the context message */
868
- get contextGroup(): TokenWindowGroup<any>;
932
+ get contextGroup(): TokenWindowGroup<string>;
869
933
  /** Token window group used for tools / actions */
870
934
  get toolGroup(): TokenWindowGroup<ChatBaseToolConfig>;
871
935
  /** Token window group used for messages */
872
- get messageGroup(): TokenWindowGroup<MessageFormat>;
936
+ get messageGroup(): TokenWindowGroup<DataType>;
937
+ /** Get the API base after stripping out exact endpoints, or undefined for the default */
938
+ getBaseURL(): string | undefined;
873
939
  /** Constructor */
874
940
  constructor(config: ConfigFormat);
875
- /** Send a message, and get the response */
876
- sendMessage(message: string): Promise<string>;
941
+ /** Send a message, and get the response as a string. */
942
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
877
943
  /** Add a user message to the message history */
878
944
  addUserMessage(message: string): void;
879
945
  /** Add an assistant message to the message history */
880
946
  addAssistantMessage(message: string): void;
947
+ /** Helper to add a plain text item */
948
+ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void;
881
949
  /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
882
- onBeforeIncomingMessage(message: MessageFormat): void;
950
+ onBeforeIncomingMessage(message: DataType): void;
883
951
  /** Reset the conversation */
884
952
  resetConversation(): void;
885
953
  /** Trim message list */
886
954
  trimMessages(): Promise<void>;
887
955
  /** Register a tool. */
888
956
  registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem<ChatBaseToolConfig>;
957
+ /** Find a tool based on the AI-safe name */
958
+ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined;
959
+ /** Execute the specified tool. Throws an error if the tool is undefined. */
960
+ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise<string>;
961
+ }
962
+
963
+ /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */
964
+ declare class IntelliWeaveMessageParser {
965
+ /** New messages produced after sendMessage() was called */
966
+ messages: TokenWindowGroupItemParams<unknown>[];
967
+ /** Constructor */
968
+ constructor(items: TokenWindowGroupItemParams<unknown>[]);
969
+ /** Plain text output from the AI */
970
+ text(): string;
971
+ /** Total token usage */
972
+ tokenUsage(): {
973
+ cachedInputTokens: number;
974
+ inputTokens: number;
975
+ outputTokens: number;
976
+ totalTokens: number;
977
+ };
978
+ /** Component sections for display */
979
+ sections(): TokenWindowGroupItemParams<unknown>['sections'];
980
+ /** List all tool calls that took place */
981
+ toolCalls(): TokenWindowGroupItemSection[];
982
+ /** Find the response for a tool call */
983
+ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null;
889
984
  }
890
985
 
891
986
  /** Built-in action flags for the persona */
@@ -925,6 +1020,8 @@ interface WebWeaverGPTConfig {
925
1020
  textColor?: string;
926
1021
  /** Display mode: 'closed' (default - starts minimized) or 'open' (always open) */
927
1022
  displayMode?: 'closed' | 'open';
1023
+ /** Layout preset: 'widget' (default) or 'fullscreen' */
1024
+ layout?: 'widget' | 'fullscreen';
928
1025
  /** Positioning mode: 'fixed' (default - floats on page) or 'container' (fills parent container) */
929
1026
  positioningMode?: 'fixed' | 'container';
930
1027
  /** Horizontal position: 'left' or 'right' (default: 'right') - only used when positioningMode is 'fixed' */
@@ -959,6 +1056,8 @@ interface WebWeaverGPTConfig {
959
1056
  mcpServers?: MCPKnowledgeClient['config'][];
960
1057
  /** Built-in action flags that are currently enabled */
961
1058
  flags?: BuiltInActionFlags;
1059
+ /** Allow custom chat provider */
1060
+ onCreateProvider?: (config: ChatBaseConfig) => ChatBase;
962
1061
  }
963
1062
  /**
964
1063
  * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave
@@ -969,7 +1068,7 @@ interface WebWeaverGPTConfig {
969
1068
  * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object.
970
1069
  * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object.
971
1070
  * - event `input` - Fired when the user sends a message to the AI.
972
- * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isChunk` is true, the message is incomplete and will be followed by more events.
1071
+ * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events.
973
1072
  * - event `toolstart` - Fired when the AI starts performing an action.
974
1073
  * - event `tool` - Fired when the AI finishes performing an action.
975
1074
  */
@@ -978,8 +1077,8 @@ declare class IntelliWeave extends EventTarget {
978
1077
  static version: string;
979
1078
  /** Built-in actions version - increment this when adding new actions */
980
1079
  static builtInActionsVersion: string;
981
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
982
- onAIMessage?: (text: string, isChunk: boolean) => void;
1080
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
1081
+ onAIMessage?: (messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean) => void;
983
1082
  /** Callback when the AI starts performing an action */
984
1083
  onAIToolStart?: ChatBaseConfig['onAIToolStart'];
985
1084
  /** Current conversation ID */
@@ -1029,7 +1128,7 @@ declare class IntelliWeave extends EventTarget {
1029
1128
  /** URL of the IntelliWeave Hub API */
1030
1129
  hubAPI: string;
1031
1130
  /** Set model and load data from an API key */
1032
- load(apiKey: string): Promise<WebWeaverGPTConfig>;
1131
+ load(apiKey: string, config?: Partial<WebWeaverGPTConfig>): Promise<IntelliWeave>;
1033
1132
  /** Set the current model */
1034
1133
  setModel(id: string): void;
1035
1134
  private _lastSystemMsg;
@@ -1037,17 +1136,15 @@ declare class IntelliWeave extends EventTarget {
1037
1136
  getContextPrefix(): Promise<string>;
1038
1137
  /** Get system message to send to the AI */
1039
1138
  onBeforeMessageProcessing(): Promise<void>;
1040
- /** @private Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
1041
- processIncomingMessage(message: string, isChunk?: boolean): void;
1139
+ /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
1140
+ processIncomingMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial?: boolean): void;
1042
1141
  /** True if currently processing a message */
1043
1142
  isProcessing: boolean;
1044
- /** @private Last tracked token count for calculating per-message token usage */
1045
- private _lastTrackedTokens;
1046
1143
  /** Send a message, and get the response */
1047
- sendMessage(message: string): Promise<string | null>;
1144
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<unknown>[]) => void): Promise<IntelliWeaveMessageParser>;
1048
1145
  /** @private Called when the AI wants to run a KB action */
1049
1146
  toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
1050
- /** Submit an analytics event asynchronously */
1147
+ /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */
1051
1148
  submitAnalyticsEvent(data: any): void;
1052
1149
  /** Reset the conversation */
1053
1150
  resetConversation(): void;
@@ -1057,12 +1154,14 @@ declare class IntelliWeave extends EventTarget {
1057
1154
  exportState(): {
1058
1155
  type: string;
1059
1156
  conversationID: string;
1060
- messages: any[] | undefined;
1157
+ messages: TokenWindowGroupItem<any>[] | undefined;
1061
1158
  };
1062
1159
  /** Import conversation state from JSON */
1063
1160
  importState(state: any): void;
1064
1161
  /** Clone this instance without any message history */
1065
1162
  clone(): IntelliWeave;
1163
+ /** Get all messages in the conversation history */
1164
+ get messages(): TokenWindowGroupItem<any>[];
1066
1165
  }
1067
1166
 
1068
1167
  /**
@@ -1147,11 +1246,18 @@ declare class WebWeaverEmbed extends BaseComponent {
1147
1246
  private _lastBackground?;
1148
1247
  private _lastTextColor?;
1149
1248
  private _lastDisplayMode?;
1249
+ private _lastLayout?;
1250
+ private _lastPersonaName?;
1251
+ private _lastHeaderLogo?;
1150
1252
  private _lastPositioningMode?;
1151
1253
  private _lastPositionX?;
1152
1254
  private _lastPositionY?;
1153
1255
  private _lastOffsetX?;
1154
1256
  private _lastOffsetY?;
1257
+ /** Apply persona-based color variants as CSS variables */
1258
+ private applyPersonaColorVariants;
1259
+ /** Parse a color string to RGB (supports hex and rgb/rgba) */
1260
+ private parseColorToRGB;
1155
1261
  /** Apply UI styles from config and attributes, prioritizing attributes */
1156
1262
  private applyConfigStylesAndAttributes;
1157
1263
  /** Called on update */
@@ -1175,9 +1281,11 @@ declare class WebWeaverEmbed extends BaseComponent {
1175
1281
  /** Process input text from the user */
1176
1282
  processInput(inputText: string): Promise<void>;
1177
1283
  /** Called when the AI responds with some text */
1178
- onAIMessage(msg: string): Promise<void>;
1179
- /** Called when the AI starts running a tool */
1180
- onAIToolStart(toolName: string, args: any): void;
1284
+ onAIMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean): Promise<void>;
1285
+ /** Updates a text element */
1286
+ updateTextElement(elementID: string, text: string): void;
1287
+ /** Updates an info block element */
1288
+ updateInfoElement(elementID: string, text: string, iconType: string): void;
1181
1289
  /** Called when a suggestion button is clicked */
1182
1290
  onSuggestionClick(e: Event, suggestion: string): void;
1183
1291
  /** Called when an LLM model is selected */