@intelliweave/embedded 2.0.72-beta.1 → 2.0.72-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,150 @@
1
1
  import React from 'react';
2
- import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3
2
  import { Optional } from 'utility-types';
3
+ import { Client } from '@modelcontextprotocol/sdk/client/index.js';
4
+
5
+ /**
6
+ * This class helps organize groups of tokenized text along with removing items when the window is full.
7
+ */
8
+ declare class TokenWindow {
9
+ /** Token window size */
10
+ size: number;
11
+ /** Token groups */
12
+ groups: TokenWindowGroup<any>[];
13
+ /** Create a new group */
14
+ createGroup(id: string): TokenWindowGroup<unknown>;
15
+ /** Get a group */
16
+ group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
17
+ /** Counts tokens in the specified text */
18
+ static countTokensInText(text: string): number;
19
+ /** Calculate current tokens in all groups */
20
+ countTokens(): number;
21
+ /** Remove overflow from all groups. */
22
+ removeOverflow(): void;
23
+ /** Remove one overflow item. Returns null if no items were able to be removed. */
24
+ private removeOneItem;
25
+ }
26
+ /** A token group. */
27
+ declare class TokenWindowGroup<DataType> {
28
+ /** Group ID */
29
+ id: string;
30
+ /** List of items */
31
+ items: TokenWindowGroupItem<DataType>[];
32
+ /**
33
+ * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
34
+ * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
35
+ * that group will be allowed to keep double the amount of items.
36
+ */
37
+ weight: number;
38
+ /** Current total token count, computed automatically. Don't update this value manually. */
39
+ tokenCount: number;
40
+ /** Group item separator. This text is added in between each item in the token window. */
41
+ separator: string;
42
+ /** Token count padding added to each item. */
43
+ private itemPadding;
44
+ /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
45
+ setItemPadding(padding: number): this;
46
+ /** Sort function */
47
+ private sortFunction;
48
+ /** Set sort function */
49
+ sortBy(sortFunction: (a: TokenWindowGroupItem<DataType>, b: TokenWindowGroupItem<DataType>) => number): this;
50
+ /** Set separator. This text is added in between each item in the token window. */
51
+ setSeparator(separator: string): this;
52
+ /**
53
+ * Set weight. Weight controls how many items from this group should be kept
54
+ * in relation to the entire window. For example if all groups have a weight
55
+ * of 1, each group remove items equally if full. If one has a weight of 2
56
+ * while the rest are 1, that group will be allowed to keep double the
57
+ * amount of items.
58
+ */
59
+ setWeight(weight: number): this;
60
+ /** Recalculate all tokens. Note this may take a while. */
61
+ recalculateTokens(): void;
62
+ /** Add an item to the group */
63
+ add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
64
+ /** Get all items as a string */
65
+ getAllAsString(): string;
66
+ /** Get all items. Doesn't return disabled items. */
67
+ getAll(): TokenWindowGroupItem<DataType>[];
68
+ /** Remove all items from this group */
69
+ empty(): void;
70
+ }
71
+ /** Token group item section types */
72
+ declare enum TokenWindowGroupItemSectionType {
73
+ /** Text items represent plain text. */
74
+ Text = "text",
75
+ /** Tool call items represent a tool call requested by the AI. */
76
+ ToolCall = "tool_call",
77
+ /** Tool result items represent the result of a tool call. */
78
+ ToolResult = "tool_result",
79
+ /** Thinking section */
80
+ Thinking = "thinking",
81
+ /** Other item types */
82
+ Other = "other"
83
+ }
84
+ /** Token group item */
85
+ interface TokenWindowGroupItem<DataType> {
86
+ /** Each item must have a unique ID. */
87
+ id: string;
88
+ /** True if this item should never be removed */
89
+ cannotRemove?: boolean;
90
+ /** Sorting order. If not specified, uses dateAdded instead. */
91
+ sortOrder: number;
92
+ /** Date this item was added */
93
+ dateAdded: number;
94
+ /** Token count in the content */
95
+ tokenCount: number;
96
+ /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */
97
+ data?: DataType;
98
+ /** If disabled, this item will not be included and will not add to the token count. */
99
+ disabled?: boolean;
100
+ /** Message source, ie was this message created by the user, or by the AI? */
101
+ source: 'user' | 'assistant';
102
+ /**
103
+ * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group.
104
+ * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array.
105
+ *
106
+ * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array.
107
+ */
108
+ text?: string;
109
+ /** Message sections */
110
+ sections?: TokenWindowGroupItemSection[];
111
+ /** If this message was generated by the AI, this contains the token usage for this message. */
112
+ usage?: {
113
+ /** Number of tokens consumed from the data passed to the AI */
114
+ inputTokens: number;
115
+ /** Number of input tokens that were used in token caching */
116
+ cachedInputTokens: number;
117
+ /** Number of tokens consumed by the AI generating output */
118
+ outputTokens: number;
119
+ /** Total token usage */
120
+ totalTokens: number;
121
+ };
122
+ }
123
+ /** A section of a message returned by the AI */
124
+ interface TokenWindowGroupItemSection {
125
+ /** Section type */
126
+ type: TokenWindowGroupItemSectionType;
127
+ /** Text content when this section represents text or thinking */
128
+ text?: string;
129
+ /** The raw tool name the AI requested to be called. */
130
+ toolName?: string;
131
+ /** The ID of the KB action this tool call maps to, if any */
132
+ toolKbID?: string;
133
+ /** The name of the KB action this tool call maps to, if any */
134
+ toolKbName?: string;
135
+ /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */
136
+ toolParameters?: any;
137
+ /** Successful response of the tool call. Will be null if toolErrorResponse is set. */
138
+ toolSuccessResponse?: any;
139
+ /** Error response of the tool call. Will be null if toolSuccessResponse is set. */
140
+ toolErrorResponse?: string;
141
+ /** Tool call ID. This can be used to match a tool call request with it's result. */
142
+ toolCallInstanceID?: string;
143
+ /** True if this tool call should be hidden in the UI */
144
+ toolCallHiddenInUI?: 'always' | 'after-complete';
145
+ }
146
+ /** Token window group item input, without the autogenerated fields */
147
+ type TokenWindowGroupItemParams<DataType> = Omit<Optional<TokenWindowGroupItem<DataType>, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>;
4
148
 
5
149
  /**
6
150
  * Speech output
@@ -450,84 +594,6 @@ declare class MCPKnowledgeClient {
450
594
  private performToolCall;
451
595
  }
452
596
 
453
- /**
454
- * This class helps organize groups of tokenized text along with removing items when the window is full.
455
- */
456
- declare class TokenWindow {
457
- /** Token window size */
458
- size: number;
459
- /** Token groups */
460
- groups: TokenWindowGroup<any>[];
461
- /** Create a new group */
462
- createGroup(id: string): TokenWindowGroup<unknown>;
463
- /** Get a group */
464
- group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
465
- /** Calculate current tokens in all groups */
466
- countTokens(): number;
467
- /** Remove overflow from all groups. */
468
- removeOverflow(): void;
469
- /** Remove one overflow item. Returns null if no items were able to be removed. */
470
- private removeOneItem;
471
- }
472
- /** A token group. */
473
- declare class TokenWindowGroup<CustomDataType> {
474
- /** Group ID */
475
- id: string;
476
- /** List of items */
477
- items: TokenWindowGroupItem<CustomDataType>[];
478
- /**
479
- * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
480
- * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
481
- * that group will be allowed to keep double the amount of items.
482
- */
483
- weight: number;
484
- /** Current total token count, computed automatically. Don't update this value manually. */
485
- tokenCount: number;
486
- /** Group item separator */
487
- separator: string;
488
- /** Token count padding added to each item. */
489
- private itemPadding;
490
- /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
491
- setItemPadding(padding: number): this;
492
- /** Sort function */
493
- private sortFunction;
494
- /** Set sort function */
495
- sortBy(sortFunction: (a: TokenWindowGroupItem<CustomDataType>, b: TokenWindowGroupItem<CustomDataType>) => number): this;
496
- /** Set separator */
497
- setSeparator(separator: string): this;
498
- /** Set weight */
499
- setWeight(weight: number): this;
500
- /** Recalculate all tokens. Note this may take a while. */
501
- recalculateTokens(): void;
502
- /** Add an item to the group */
503
- add(item: string | Omit<Optional<TokenWindowGroupItem<CustomDataType>, 'id' | 'dateAdded' | 'sortOrder'>, 'tokenCount'>): TokenWindowGroupItem<CustomDataType>;
504
- /** Get all items as a string */
505
- getAllAsString(): string;
506
- /** Get all items. Doesn't return disabled items. */
507
- getAll(): TokenWindowGroupItem<CustomDataType>[];
508
- /** Remove all items from this group */
509
- empty(): void;
510
- }
511
- /** Token group item */
512
- interface TokenWindowGroupItem<CustomDataType> {
513
- /** Each item must have a unique ID. */
514
- id: string;
515
- /** The string content of the item */
516
- content: string;
517
- /** True if this item should never be removed */
518
- cannotRemove?: boolean;
519
- /** Sorting order. If not specified, uses dateAdded instead. */
520
- sortOrder: number;
521
- /** Date this item was added */
522
- dateAdded: number;
523
- /** Token count in the content */
524
- tokenCount: number;
525
- /** Anything to attach to this item */
526
- customData?: CustomDataType;
527
- /** If disabled, this item will not be included and will not add tot he token count. */
528
- disabled?: boolean;
529
- }
530
-
531
597
  // ==================================================================================================
532
598
  // JSON Schema Draft 07
533
599
  // ==================================================================================================
@@ -715,8 +781,8 @@ interface ChatBaseConfig {
715
781
  maxTokens: number;
716
782
  /** Callback before the AI sends info to the LLM */
717
783
  onBeforeMessageProcessing?: () => void;
718
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
719
- onAIMessage?: (text: string, isChunk: boolean) => void;
784
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
785
+ onAIMessage?: (output: TokenWindowGroupItemParams<any>[], isPartial: boolean) => void;
720
786
  /** Callback when the AI starts performing an action */
721
787
  onAIToolStart?: (toolName: string, input: any) => void;
722
788
  }
@@ -730,19 +796,17 @@ interface ChatBaseToolConfig {
730
796
  params: JSONSchema7;
731
797
  /** Callback function to process the tool */
732
798
  callback: (params: any) => any;
733
- /** If true, this tool call will be removed from the message history after it is executed. */
734
- removeFromMessageHistory?: boolean;
735
799
  /** If true, this item can be removed if there's not enough context available. */
736
800
  canRemove?: boolean;
737
- /** Misc app context */
738
- [key: string]: any;
801
+ /** Knowledge base item this tool use represents */
802
+ kbItem?: KnowledgeBaseItem;
739
803
  }
740
804
  /**
741
805
  * API for interacting with chat APIs.
742
806
  */
743
807
  declare class ChatBase<
744
808
  /** Format for messages in the token window */
745
- MessageFormat = any,
809
+ DataType = any,
746
810
  /** Optional extended config */
747
811
  ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
748
812
  /** ID */
@@ -761,27 +825,58 @@ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
761
825
  /** Token window management */
762
826
  tokenWindow: TokenWindow;
763
827
  /** Token window group used for the context message */
764
- get contextGroup(): TokenWindowGroup<any>;
828
+ get contextGroup(): TokenWindowGroup<string>;
765
829
  /** Token window group used for tools / actions */
766
830
  get toolGroup(): TokenWindowGroup<ChatBaseToolConfig>;
767
831
  /** Token window group used for messages */
768
- get messageGroup(): TokenWindowGroup<MessageFormat>;
832
+ get messageGroup(): TokenWindowGroup<DataType>;
833
+ /** Get the API base after stripping out exact endpoints, or undefined for the default */
834
+ getBaseURL(): string | undefined;
769
835
  /** Constructor */
770
836
  constructor(config: ConfigFormat);
771
- /** Send a message, and get the response */
772
- sendMessage(message: string): Promise<string>;
837
+ /** Send a message, and get the response as a string. */
838
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
773
839
  /** Add a user message to the message history */
774
840
  addUserMessage(message: string): void;
775
841
  /** Add an assistant message to the message history */
776
842
  addAssistantMessage(message: string): void;
843
+ /** Helper to add a plain text item */
844
+ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void;
777
845
  /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
778
- onBeforeIncomingMessage(message: MessageFormat): void;
846
+ onBeforeIncomingMessage(message: DataType): void;
779
847
  /** Reset the conversation */
780
848
  resetConversation(): void;
781
849
  /** Trim message list */
782
850
  trimMessages(): Promise<void>;
783
851
  /** Register a tool. */
784
852
  registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem<ChatBaseToolConfig>;
853
+ /** Find a tool based on the AI-safe name */
854
+ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined;
855
+ /** Execute the specified tool. Throws an error if the tool is undefined. */
856
+ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise<string>;
857
+ }
858
+
859
+ /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */
860
+ declare class IntelliWeaveMessageParser {
861
+ /** New messages produced after sendMessage() was called */
862
+ messages: TokenWindowGroupItemParams<unknown>[];
863
+ /** Constructor */
864
+ constructor(items: TokenWindowGroupItemParams<unknown>[]);
865
+ /** Plain text output from the AI */
866
+ text(): string;
867
+ /** Total token usage */
868
+ tokenUsage(): {
869
+ cachedInputTokens: number;
870
+ inputTokens: number;
871
+ outputTokens: number;
872
+ totalTokens: number;
873
+ };
874
+ /** Component sections for display */
875
+ sections(): TokenWindowGroupItemParams<unknown>['sections'];
876
+ /** List all tool calls that took place */
877
+ toolCalls(): TokenWindowGroupItemSection[];
878
+ /** Find the response for a tool call */
879
+ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null;
785
880
  }
786
881
 
787
882
  /** Built-in action flags for the persona */
@@ -857,6 +952,8 @@ interface WebWeaverGPTConfig {
857
952
  mcpServers?: MCPKnowledgeClient['config'][];
858
953
  /** Built-in action flags that are currently enabled */
859
954
  flags?: BuiltInActionFlags;
955
+ /** Allow custom chat provider */
956
+ onCreateProvider?: (config: ChatBaseConfig) => ChatBase;
860
957
  }
861
958
  /**
862
959
  * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave
@@ -867,7 +964,7 @@ interface WebWeaverGPTConfig {
867
964
  * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object.
868
965
  * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object.
869
966
  * - event `input` - Fired when the user sends a message to the AI.
870
- * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isChunk` is true, the message is incomplete and will be followed by more events.
967
+ * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events.
871
968
  * - event `toolstart` - Fired when the AI starts performing an action.
872
969
  * - event `tool` - Fired when the AI finishes performing an action.
873
970
  */
@@ -876,8 +973,8 @@ declare class IntelliWeave extends EventTarget {
876
973
  static version: string;
877
974
  /** Built-in actions version - increment this when adding new actions */
878
975
  static builtInActionsVersion: string;
879
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
880
- onAIMessage?: (text: string, isChunk: boolean) => void;
976
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
977
+ onAIMessage?: (messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean) => void;
881
978
  /** Callback when the AI starts performing an action */
882
979
  onAIToolStart?: ChatBaseConfig['onAIToolStart'];
883
980
  /** Current conversation ID */
@@ -927,7 +1024,7 @@ declare class IntelliWeave extends EventTarget {
927
1024
  /** URL of the IntelliWeave Hub API */
928
1025
  hubAPI: string;
929
1026
  /** Set model and load data from an API key */
930
- load(apiKey: string): Promise<WebWeaverGPTConfig>;
1027
+ load(apiKey: string, config?: Partial<WebWeaverGPTConfig>): Promise<IntelliWeave>;
931
1028
  /** Set the current model */
932
1029
  setModel(id: string): void;
933
1030
  private _lastSystemMsg;
@@ -935,17 +1032,15 @@ declare class IntelliWeave extends EventTarget {
935
1032
  getContextPrefix(): Promise<string>;
936
1033
  /** Get system message to send to the AI */
937
1034
  onBeforeMessageProcessing(): Promise<void>;
938
- /** @private Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
939
- processIncomingMessage(message: string, isChunk?: boolean): void;
1035
+ /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
1036
+ processIncomingMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial?: boolean): void;
940
1037
  /** True if currently processing a message */
941
1038
  isProcessing: boolean;
942
- /** @private Last tracked token count for calculating per-message token usage */
943
- private _lastTrackedTokens;
944
1039
  /** Send a message, and get the response */
945
- sendMessage(message: string): Promise<string | null>;
1040
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<unknown>[]) => void): Promise<IntelliWeaveMessageParser>;
946
1041
  /** @private Called when the AI wants to run a KB action */
947
1042
  toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
948
- /** Submit an analytics event asynchronously */
1043
+ /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */
949
1044
  submitAnalyticsEvent(data: any): void;
950
1045
  /** Reset the conversation */
951
1046
  resetConversation(): void;
@@ -955,12 +1050,14 @@ declare class IntelliWeave extends EventTarget {
955
1050
  exportState(): {
956
1051
  type: string;
957
1052
  conversationID: string;
958
- messages: any[] | undefined;
1053
+ messages: TokenWindowGroupItem<any>[] | undefined;
959
1054
  };
960
1055
  /** Import conversation state from JSON */
961
1056
  importState(state: any): void;
962
1057
  /** Clone this instance without any message history */
963
1058
  clone(): IntelliWeave;
1059
+ /** Get all messages in the conversation history */
1060
+ get messages(): TokenWindowGroupItem<any>[];
964
1061
  }
965
1062
 
966
1063
  /**
@@ -1042,8 +1139,8 @@ interface KnowledgeBaseItem {
1042
1139
  name: string;
1043
1140
  /** Item tags. Helps with search optimization. */
1044
1141
  tags?: string;
1045
- /** Item content. Can be a function to return a dynamic string. */
1046
- content: string | (() => string);
1142
+ /** Item content */
1143
+ content: string;
1047
1144
  /** If true, this item will always be returned from all search results. */
1048
1145
  isContext?: boolean;
1049
1146
  /** If true, this item will not be visible to the AI. */
@@ -1056,8 +1153,8 @@ interface KnowledgeBaseItem {
1056
1153
  * that was performed. If an error is thrown, the AI will respond appropriately to the user.
1057
1154
  */
1058
1155
  action?: (input: any, ai: IntelliWeave) => (any | Promise<any>);
1059
- /** If true, this item will be removed from the AI's message history after it gets called. This is a special case for LLMs that struggle with follow-up function calls and need to use the KB search functino first. */
1060
- removeFromMessageHistory?: boolean;
1156
+ /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */
1157
+ hideActionInUI?: 'always' | 'after-complete';
1061
1158
  }
1062
1159
  /** Parameter definition used by IntelliWeave */
1063
1160
  interface IntelliWeaveParameterDefinition {