@intelliweave/embedded 2.0.72-beta.1 → 2.0.72-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,155 @@
1
- import { Client } from '@modelcontextprotocol/sdk/client/index.js';
2
1
  import { Optional } from 'utility-types';
2
+ import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3
3
  import * as onnxruntime_web from 'onnxruntime-web';
4
4
  import { InferenceSession, Tensor } from 'onnxruntime-web';
5
+ import OpenAI from 'openai';
5
6
  import Anthropic from '@anthropic-ai/sdk';
6
7
  import React from 'react';
7
8
 
9
+ /**
10
+ * This class helps organize groups of tokenized text along with removing items when the window is full.
11
+ */
12
+ declare class TokenWindow {
13
+ /** Token window size */
14
+ size: number;
15
+ /** Token groups */
16
+ groups: TokenWindowGroup<any>[];
17
+ /** Create a new group */
18
+ createGroup(id: string): TokenWindowGroup<unknown>;
19
+ /** Get a group */
20
+ group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
21
+ /** Counts tokens in the specified text */
22
+ static countTokensInText(text: string): number;
23
+ /** Calculate current tokens in all groups */
24
+ countTokens(): number;
25
+ /** Remove overflow from all groups. */
26
+ removeOverflow(): void;
27
+ /** Remove one overflow item. Returns null if no items were able to be removed. */
28
+ private removeOneItem;
29
+ }
30
+ /** A token group. */
31
+ declare class TokenWindowGroup<DataType> {
32
+ /** Group ID */
33
+ id: string;
34
+ /** List of items */
35
+ items: TokenWindowGroupItem<DataType>[];
36
+ /**
37
+ * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
38
+ * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
39
+ * that group will be allowed to keep double the amount of items.
40
+ */
41
+ weight: number;
42
+ /** Current total token count, computed automatically. Don't update this value manually. */
43
+ tokenCount: number;
44
+ /** Group item separator. This text is added in between each item in the token window. */
45
+ separator: string;
46
+ /** Token count padding added to each item. */
47
+ private itemPadding;
48
+ /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
49
+ setItemPadding(padding: number): this;
50
+ /** Sort function */
51
+ private sortFunction;
52
+ /** Set sort function */
53
+ sortBy(sortFunction: (a: TokenWindowGroupItem<DataType>, b: TokenWindowGroupItem<DataType>) => number): this;
54
+ /** Set separator. This text is added in between each item in the token window. */
55
+ setSeparator(separator: string): this;
56
+ /**
57
+ * Set weight. Weight controls how many items from this group should be kept
58
+ * in relation to the entire window. For example if all groups have a weight
59
+ * of 1, each group remove items equally if full. If one has a weight of 2
60
+ * while the rest are 1, that group will be allowed to keep double the
61
+ * amount of items.
62
+ */
63
+ setWeight(weight: number): this;
64
+ /** Recalculate all tokens. Note this may take a while. */
65
+ recalculateTokens(): void;
66
+ /** Add an item to the group */
67
+ add(item: string | TokenWindowGroupItemParams<DataType>): TokenWindowGroupItem<DataType>;
68
+ /** Get all items as a string */
69
+ getAllAsString(): string;
70
+ /** Get all items. Doesn't return disabled items. */
71
+ getAll(): TokenWindowGroupItem<DataType>[];
72
+ /** Remove all items from this group */
73
+ empty(): void;
74
+ }
75
+ /** Token group item section types */
76
+ declare enum TokenWindowGroupItemSectionType {
77
+ /** Text items represent plain text. */
78
+ Text = "text",
79
+ /** Tool call items represent a tool call requested by the AI. */
80
+ ToolCall = "tool_call",
81
+ /** Tool result items represent the result of a tool call. */
82
+ ToolResult = "tool_result",
83
+ /** Thinking section */
84
+ Thinking = "thinking",
85
+ /** Other item types */
86
+ Other = "other"
87
+ }
88
+ /** Token group item */
89
+ interface TokenWindowGroupItem<DataType> {
90
+ /** Each item must have a unique ID. */
91
+ id: string;
92
+ /** True if this item should never be removed */
93
+ cannotRemove?: boolean;
94
+ /** Sorting order. If not specified, uses dateAdded instead. */
95
+ sortOrder: number;
96
+ /** Date this item was added */
97
+ dateAdded: number;
98
+ /** Token count in the content */
99
+ tokenCount: number;
100
+ /** This is the actual item that gets sent to the APIs. It will be in whatever format is required for the associated API. */
101
+ data?: DataType;
102
+ /** If disabled, this item will not be included and will not add to the token count. */
103
+ disabled?: boolean;
104
+ /** Message source, ie was this message created by the user, or by the AI? */
105
+ source: 'user' | 'assistant';
106
+ /**
107
+ * The string content of the item, or a summary of it. This is an autogenerated field, updated when the item is added/updated in the token window group.
108
+ * If `data` is a string, this will be the same as `data`. If `data` is more complex, this will be a text representation of all items in the `sections` array.
109
+ *
110
+ * Note: When the response contains text and tool calls, this will add in a summary of what's happening. For better displaying, use the `sections` array.
111
+ */
112
+ text?: string;
113
+ /** Message sections */
114
+ sections?: TokenWindowGroupItemSection[];
115
+ /** If this message was generated by the AI, this contains the token usage for this message. */
116
+ usage?: {
117
+ /** Number of tokens consumed from the data passed to the AI */
118
+ inputTokens: number;
119
+ /** Number of input tokens that were used in token caching */
120
+ cachedInputTokens: number;
121
+ /** Number of tokens consumed by the AI generating output */
122
+ outputTokens: number;
123
+ /** Total token usage */
124
+ totalTokens: number;
125
+ };
126
+ }
127
+ /** A section of a message returned by the AI */
128
+ interface TokenWindowGroupItemSection {
129
+ /** Section type */
130
+ type: TokenWindowGroupItemSectionType;
131
+ /** Text content when this section represents text or thinking */
132
+ text?: string;
133
+ /** The raw tool name the AI requested to be called. */
134
+ toolName?: string;
135
+ /** The ID of the KB action this tool call maps to, if any */
136
+ toolKbID?: string;
137
+ /** The name of the KB action this tool call maps to, if any */
138
+ toolKbName?: string;
139
+ /** The parameters the AI requested to be sent to the tool. Only available if type == 'tool_call' */
140
+ toolParameters?: any;
141
+ /** Successful response of the tool call. Will be null if toolErrorResponse is set. */
142
+ toolSuccessResponse?: any;
143
+ /** Error response of the tool call. Will be null if toolSuccessResponse is set. */
144
+ toolErrorResponse?: string;
145
+ /** Tool call ID. This can be used to match a tool call request with it's result. */
146
+ toolCallInstanceID?: string;
147
+ /** True if this tool call should be hidden in the UI */
148
+ toolCallHiddenInUI?: 'always' | 'after-complete';
149
+ }
150
+ /** Token window group item input, without the autogenerated fields */
151
+ type TokenWindowGroupItemParams<DataType> = Omit<Optional<TokenWindowGroupItem<DataType>, 'id' | 'dateAdded' | 'sortOrder' | 'text' | 'source' | 'sections'>, 'tokenCount'>;
152
+
8
153
  /**
9
154
  * Allows an MCP server to be used as a knowledge source for IntelliWeave.
10
155
  */
@@ -381,8 +526,8 @@ interface KnowledgeBaseItem {
381
526
  name: string;
382
527
  /** Item tags. Helps with search optimization. */
383
528
  tags?: string;
384
- /** Item content. Can be a function to return a dynamic string. */
385
- content: string | (() => string);
529
+ /** Item content */
530
+ content: string;
386
531
  /** If true, this item will always be returned from all search results. */
387
532
  isContext?: boolean;
388
533
  /** If true, this item will not be visible to the AI. */
@@ -395,8 +540,8 @@ interface KnowledgeBaseItem {
395
540
  * that was performed. If an error is thrown, the AI will respond appropriately to the user.
396
541
  */
397
542
  action?: (input: any, ai: IntelliWeave) => (any | Promise<any>);
398
- /** If true, this item will be removed from the AI's message history after it gets called. This is a special case for LLMs that struggle with follow-up function calls and need to use the KB search functino first. */
399
- removeFromMessageHistory?: boolean;
543
+ /** If specified, will hide this action from the default UI after the AI finishes running it, or always hide it */
544
+ hideActionInUI?: 'always' | 'after-complete';
400
545
  }
401
546
  /** Parameter definition used by IntelliWeave */
402
547
  interface IntelliWeaveParameterDefinition {
@@ -504,84 +649,6 @@ interface IntelliWeaveInstructConfig {
504
649
  callback?: (txt: string) => void;
505
650
  }
506
651
 
507
- /**
508
- * This class helps organize groups of tokenized text along with removing items when the window is full.
509
- */
510
- declare class TokenWindow {
511
- /** Token window size */
512
- size: number;
513
- /** Token groups */
514
- groups: TokenWindowGroup<any>[];
515
- /** Create a new group */
516
- createGroup(id: string): TokenWindowGroup<unknown>;
517
- /** Get a group */
518
- group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
519
- /** Calculate current tokens in all groups */
520
- countTokens(): number;
521
- /** Remove overflow from all groups. */
522
- removeOverflow(): void;
523
- /** Remove one overflow item. Returns null if no items were able to be removed. */
524
- private removeOneItem;
525
- }
526
- /** A token group. */
527
- declare class TokenWindowGroup<CustomDataType> {
528
- /** Group ID */
529
- id: string;
530
- /** List of items */
531
- items: TokenWindowGroupItem<CustomDataType>[];
532
- /**
533
- * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
534
- * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
535
- * that group will be allowed to keep double the amount of items.
536
- */
537
- weight: number;
538
- /** Current total token count, computed automatically. Don't update this value manually. */
539
- tokenCount: number;
540
- /** Group item separator */
541
- separator: string;
542
- /** Token count padding added to each item. */
543
- private itemPadding;
544
- /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
545
- setItemPadding(padding: number): this;
546
- /** Sort function */
547
- private sortFunction;
548
- /** Set sort function */
549
- sortBy(sortFunction: (a: TokenWindowGroupItem<CustomDataType>, b: TokenWindowGroupItem<CustomDataType>) => number): this;
550
- /** Set separator */
551
- setSeparator(separator: string): this;
552
- /** Set weight */
553
- setWeight(weight: number): this;
554
- /** Recalculate all tokens. Note this may take a while. */
555
- recalculateTokens(): void;
556
- /** Add an item to the group */
557
- add(item: string | Omit<Optional<TokenWindowGroupItem<CustomDataType>, 'id' | 'dateAdded' | 'sortOrder'>, 'tokenCount'>): TokenWindowGroupItem<CustomDataType>;
558
- /** Get all items as a string */
559
- getAllAsString(): string;
560
- /** Get all items. Doesn't return disabled items. */
561
- getAll(): TokenWindowGroupItem<CustomDataType>[];
562
- /** Remove all items from this group */
563
- empty(): void;
564
- }
565
- /** Token group item */
566
- interface TokenWindowGroupItem<CustomDataType> {
567
- /** Each item must have a unique ID. */
568
- id: string;
569
- /** The string content of the item */
570
- content: string;
571
- /** True if this item should never be removed */
572
- cannotRemove?: boolean;
573
- /** Sorting order. If not specified, uses dateAdded instead. */
574
- sortOrder: number;
575
- /** Date this item was added */
576
- dateAdded: number;
577
- /** Token count in the content */
578
- tokenCount: number;
579
- /** Anything to attach to this item */
580
- customData?: CustomDataType;
581
- /** If disabled, this item will not be included and will not add tot he token count. */
582
- disabled?: boolean;
583
- }
584
-
585
652
  /** Chat config options */
586
653
  interface ChatBaseConfig {
587
654
  /** API key */
@@ -602,8 +669,8 @@ interface ChatBaseConfig {
602
669
  maxTokens: number;
603
670
  /** Callback before the AI sends info to the LLM */
604
671
  onBeforeMessageProcessing?: () => void;
605
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
606
- onAIMessage?: (text: string, isChunk: boolean) => void;
672
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
673
+ onAIMessage?: (output: TokenWindowGroupItemParams<any>[], isPartial: boolean) => void;
607
674
  /** Callback when the AI starts performing an action */
608
675
  onAIToolStart?: (toolName: string, input: any) => void;
609
676
  }
@@ -617,19 +684,17 @@ interface ChatBaseToolConfig {
617
684
  params: JSONSchema7;
618
685
  /** Callback function to process the tool */
619
686
  callback: (params: any) => any;
620
- /** If true, this tool call will be removed from the message history after it is executed. */
621
- removeFromMessageHistory?: boolean;
622
687
  /** If true, this item can be removed if there's not enough context available. */
623
688
  canRemove?: boolean;
624
- /** Misc app context */
625
- [key: string]: any;
689
+ /** Knowledge base item this tool use represents */
690
+ kbItem?: KnowledgeBaseItem;
626
691
  }
627
692
  /**
628
693
  * API for interacting with chat APIs.
629
694
  */
630
695
  declare class ChatBase<
631
696
  /** Format for messages in the token window */
632
- MessageFormat = any,
697
+ DataType = any,
633
698
  /** Optional extended config */
634
699
  ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
635
700
  /** ID */
@@ -648,27 +713,58 @@ ConfigFormat extends ChatBaseConfig = ChatBaseConfig> {
648
713
  /** Token window management */
649
714
  tokenWindow: TokenWindow;
650
715
  /** Token window group used for the context message */
651
- get contextGroup(): TokenWindowGroup<any>;
716
+ get contextGroup(): TokenWindowGroup<string>;
652
717
  /** Token window group used for tools / actions */
653
718
  get toolGroup(): TokenWindowGroup<ChatBaseToolConfig>;
654
719
  /** Token window group used for messages */
655
- get messageGroup(): TokenWindowGroup<MessageFormat>;
720
+ get messageGroup(): TokenWindowGroup<DataType>;
721
+ /** Get the API base after stripping out exact endpoints, or undefined for the default */
722
+ getBaseURL(): string | undefined;
656
723
  /** Constructor */
657
724
  constructor(config: ConfigFormat);
658
- /** Send a message, and get the response */
659
- sendMessage(message: string): Promise<string>;
725
+ /** Send a message, and get the response as a string. */
726
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
660
727
  /** Add a user message to the message history */
661
728
  addUserMessage(message: string): void;
662
729
  /** Add an assistant message to the message history */
663
730
  addAssistantMessage(message: string): void;
731
+ /** Helper to add a plain text item */
732
+ protected addTextMessage(text: string, source: 'user' | 'assistant', data: DataType): void;
664
733
  /** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
665
- onBeforeIncomingMessage(message: MessageFormat): void;
734
+ onBeforeIncomingMessage(message: DataType): void;
666
735
  /** Reset the conversation */
667
736
  resetConversation(): void;
668
737
  /** Trim message list */
669
738
  trimMessages(): Promise<void>;
670
739
  /** Register a tool. */
671
740
  registerTool(tool: ChatBaseToolConfig): TokenWindowGroupItem<ChatBaseToolConfig>;
741
+ /** Find a tool based on the AI-safe name */
742
+ protected findToolBySafeName(toolSafeName: string): ChatBaseToolConfig | undefined;
743
+ /** Execute the specified tool. Throws an error if the tool is undefined. */
744
+ protected executeTool(tool: ChatBaseToolConfig | undefined, input: any): Promise<string>;
745
+ }
746
+
747
+ /** Parses the response from `IntelliWeave.sendMessage()` or a collection of message items. */
748
+ declare class IntelliWeaveMessageParser {
749
+ /** New messages produced after sendMessage() was called */
750
+ messages: TokenWindowGroupItemParams<unknown>[];
751
+ /** Constructor */
752
+ constructor(items: TokenWindowGroupItemParams<unknown>[]);
753
+ /** Plain text output from the AI */
754
+ text(): string;
755
+ /** Total token usage */
756
+ tokenUsage(): {
757
+ cachedInputTokens: number;
758
+ inputTokens: number;
759
+ outputTokens: number;
760
+ totalTokens: number;
761
+ };
762
+ /** Component sections for display */
763
+ sections(): TokenWindowGroupItemParams<unknown>['sections'];
764
+ /** List all tool calls that took place */
765
+ toolCalls(): TokenWindowGroupItemSection[];
766
+ /** Find the response for a tool call */
767
+ toolResult(toolCallInstanceID: string): TokenWindowGroupItemSection | null;
672
768
  }
673
769
 
674
770
  /** Built-in action flags for the persona */
@@ -744,6 +840,8 @@ interface WebWeaverGPTConfig {
744
840
  mcpServers?: MCPKnowledgeClient['config'][];
745
841
  /** Built-in action flags that are currently enabled */
746
842
  flags?: BuiltInActionFlags;
843
+ /** Allow custom chat provider */
844
+ onCreateProvider?: (config: ChatBaseConfig) => ChatBase;
747
845
  }
748
846
  /**
749
847
  * IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave
@@ -754,7 +852,7 @@ interface WebWeaverGPTConfig {
754
852
  * - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object.
755
853
  * - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object.
756
854
  * - event `input` - Fired when the user sends a message to the AI.
757
- * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isChunk` is true, the message is incomplete and will be followed by more events.
855
+ * - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isPartial` is true, the message is incomplete and will be followed by more events.
758
856
  * - event `toolstart` - Fired when the AI starts performing an action.
759
857
  * - event `tool` - Fired when the AI finishes performing an action.
760
858
  */
@@ -763,8 +861,8 @@ declare class IntelliWeave extends EventTarget {
763
861
  static version: string;
764
862
  /** Built-in actions version - increment this when adding new actions */
765
863
  static builtInActionsVersion: string;
766
- /** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
767
- onAIMessage?: (text: string, isChunk: boolean) => void;
864
+ /** Callback when a message from the AI is returned. If isPartial is true, it may be incomplete and be called again with more updates. */
865
+ onAIMessage?: (messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean) => void;
768
866
  /** Callback when the AI starts performing an action */
769
867
  onAIToolStart?: ChatBaseConfig['onAIToolStart'];
770
868
  /** Current conversation ID */
@@ -814,7 +912,7 @@ declare class IntelliWeave extends EventTarget {
814
912
  /** URL of the IntelliWeave Hub API */
815
913
  hubAPI: string;
816
914
  /** Set model and load data from an API key */
817
- load(apiKey: string): Promise<WebWeaverGPTConfig>;
915
+ load(apiKey: string, config?: Partial<WebWeaverGPTConfig>): Promise<IntelliWeave>;
818
916
  /** Set the current model */
819
917
  setModel(id: string): void;
820
918
  private _lastSystemMsg;
@@ -822,17 +920,15 @@ declare class IntelliWeave extends EventTarget {
822
920
  getContextPrefix(): Promise<string>;
823
921
  /** Get system message to send to the AI */
824
922
  onBeforeMessageProcessing(): Promise<void>;
825
- /** @private Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
826
- processIncomingMessage(message: string, isChunk?: boolean): void;
923
+ /** @private Process incoming message(s) from the AI. Can be used to respond to encoded actions in the text response. */
924
+ processIncomingMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial?: boolean): void;
827
925
  /** True if currently processing a message */
828
926
  isProcessing: boolean;
829
- /** @private Last tracked token count for calculating per-message token usage */
830
- private _lastTrackedTokens;
831
927
  /** Send a message, and get the response */
832
- sendMessage(message: string): Promise<string | null>;
928
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<unknown>[]) => void): Promise<IntelliWeaveMessageParser>;
833
929
  /** @private Called when the AI wants to run a KB action */
834
930
  toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
835
- /** Submit an analytics event asynchronously */
931
+ /** Submit an analytics event asynchronously. These events are for use in the Conversation Analytics code. For anonymous statistic analysis, use track() instead. */
836
932
  submitAnalyticsEvent(data: any): void;
837
933
  /** Reset the conversation */
838
934
  resetConversation(): void;
@@ -842,12 +938,14 @@ declare class IntelliWeave extends EventTarget {
842
938
  exportState(): {
843
939
  type: string;
844
940
  conversationID: string;
845
- messages: any[] | undefined;
941
+ messages: TokenWindowGroupItem<any>[] | undefined;
846
942
  };
847
943
  /** Import conversation state from JSON */
848
944
  importState(state: any): void;
849
945
  /** Clone this instance without any message history */
850
946
  clone(): IntelliWeave;
947
+ /** Get all messages in the conversation history */
948
+ get messages(): TokenWindowGroupItem<any>[];
851
949
  }
852
950
 
853
951
  /**
@@ -1646,9 +1744,11 @@ declare class WebWeaverEmbed extends BaseComponent {
1646
1744
  /** Process input text from the user */
1647
1745
  processInput(inputText: string): Promise<void>;
1648
1746
  /** Called when the AI responds with some text */
1649
- onAIMessage(msg: string): Promise<void>;
1650
- /** Called when the AI starts running a tool */
1651
- onAIToolStart(toolName: string, args: any): void;
1747
+ onAIMessage(messages: TokenWindowGroupItemParams<unknown>[], isPartial: boolean): Promise<void>;
1748
+ /** Updates a text element */
1749
+ updateTextElement(elementID: string, text: string): void;
1750
+ /** Updates an info block element */
1751
+ updateInfoElement(elementID: string, text: string, iconType: string): void;
1652
1752
  /** Called when a suggestion button is clicked */
1653
1753
  onSuggestionClick(e: Event, suggestion: string): void;
1654
1754
  /** Called when an LLM model is selected */
@@ -1704,55 +1804,44 @@ declare function getDefaultUserID(): string;
1704
1804
  /** Convert an IntelliWeave parameter list to JSON schema. Does not modify if it's already a JSON schema. */
1705
1805
  declare function convertParamsToJSONSchema(params: KnowledgeBaseActionParameterSchema): JSONSchema7;
1706
1806
 
1707
- /** ChatGPT message */
1708
- interface ChatGPTMessage {
1709
- /** Role of the message */
1710
- role: 'user' | 'assistant' | 'system' | 'tool';
1711
- /** Content of the message */
1712
- content: string;
1713
- /** Tool call ID */
1714
- tool_call_id?: string;
1715
- /** Tool calls made by the AI */
1716
- tool_calls?: any[];
1807
+ /** OpenRouter message extensions */
1808
+ interface OpenRouterMessage extends OpenAI.Chat.ChatCompletionAssistantMessageParam {
1809
+ reasoning?: string;
1717
1810
  }
1811
+ /** OpenAI message format */
1812
+ type DataType$1 = OpenRouterMessage | OpenAI.Chat.Completions.ChatCompletionMessageParam;
1718
1813
  /**
1719
- * API for interacting with ChatGPT APIs.
1814
+ * API for interacting with Anthropic APIs.
1720
1815
  */
1721
- declare class ChatGPT extends ChatBase<ChatGPTMessage> {
1722
- /** Send a message, and get the response */
1723
- sendMessage(message: string): Promise<string>;
1724
- /** Insert a message as if the assistant has written it */
1725
- addAssistantMessage(message: string): void;
1726
- /** Insert a message sent from a user. Note that doing this instead of using `sendMessage()` means you'll need to manually call `await processMessages()` and then `getLatestMessage()` to get the response. */
1816
+ declare class ChatGPT extends ChatBase<DataType$1> {
1817
+ /** Add a user message to the message history */
1727
1818
  addUserMessage(message: string): void;
1728
- /** Get all messages */
1729
- getMessages(): ChatGPTMessage[];
1730
- /** Get latest message */
1731
- getLatestMessage(): ChatGPTMessage | undefined;
1732
- /** @private Process messages in the chat history */
1733
- processMessages(): Promise<void>;
1819
+ /** Add an assistant message to the message history */
1820
+ addAssistantMessage(message: string): void;
1821
+ /** Create the OpenAI client */
1822
+ protected createOpenAIClient(): OpenAI;
1823
+ /** Send a message, and get the response string. */
1824
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType$1>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
1825
+ /** Parse a message block into our format */
1826
+ protected parseMessageBlock(messageID: string, message: OpenRouterMessage, usage: OpenAI.Completions.CompletionUsage | undefined, isPartial: boolean): TokenWindowGroupItemParams<DataType$1>;
1734
1827
  /** Trim message list */
1735
1828
  trimMessages(): Promise<void>;
1736
- /** @private Send HTTP request to the API and return the response */
1737
- sendRequest(payload: any): Promise<Response>;
1738
- /** @private Send message list to the API and store the response */
1739
- sendToAPI(generatePayloadOnly?: boolean): Promise<any>;
1740
- /** @private Process a tool call request from the AI */
1741
- processToolCall(toolCall: any): Promise<void>;
1742
1829
  }
1743
1830
 
1831
+ /** Anthropic message format */
1832
+ type DataType = Anthropic.Messages.MessageParam;
1744
1833
  /**
1745
1834
  * API for interacting with Anthropic APIs.
1746
1835
  */
1747
- declare class AnthropicChat extends ChatBase<Anthropic.Messages.MessageParam> {
1836
+ declare class AnthropicChat extends ChatBase<DataType> {
1748
1837
  /** Add a user message to the message history */
1749
1838
  addUserMessage(message: string): void;
1750
1839
  /** Add an assistant message to the message history */
1751
1840
  addAssistantMessage(message: string): void;
1752
- /** Send a message, and get the response */
1753
- sendMessage(message: string): Promise<string>;
1754
- /** Calls the specified tool and returns a tool_result block */
1755
- protected performToolCall(toolUse: Anthropic.Messages.ToolUseBlockParam): Promise<Anthropic.Messages.ToolResultBlockParam>;
1841
+ /** Send a message, and get the response string. */
1842
+ sendMessage(message: string, onPartial?: (items: TokenWindowGroupItemParams<DataType>[]) => void): Promise<TokenWindowGroupItemParams<any>[]>;
1843
+ /** Parse a message block into our format */
1844
+ protected parseMessageBlock(message: Anthropic.Messages.Message): TokenWindowGroupItemParams<DataType>;
1756
1845
  /** Trim message list */
1757
1846
  trimMessages(): Promise<void>;
1758
1847
  }
@@ -1933,57 +2022,6 @@ interface VoiceSubmitEvent extends BaseStatisticsEvent {
1933
2022
  /** Union type of all specific event types */
1934
2023
  type StatisticsEvent = SessionStartEvent | MessageSendEvent | MessageReceiveEvent | ToolCallEvent | KnowledgeBaseSearchEvent | UIOpenEvent | UICloseEvent | VoiceStartEvent | VoiceEndEvent | VoiceSubmitEvent;
1935
2024
 
1936
- /**
1937
- * Handles a stream of input/output events with the specified AI model.
1938
- *
1939
- * @event event - Fired when an event is emitted, either by the AI or wrapper code etc.
1940
- */
1941
- declare class IntelliWeaveStream extends IntelliWeave {
1942
- /** Events that haven't been sent to the AI yet */
1943
- pendingEvents: IntelliWeaveStreamEvent[];
1944
- /** Get the system message prefix, before the KB entries are added */
1945
- getContextPrefix(): Promise<string>;
1946
- /** Get actions to be added to the context */
1947
- /** Post an event to the AI */
1948
- postEvent(event: Partial<IntelliWeaveStreamEvent>): void;
1949
- /** @private Called when the AI wants to run a KB action */
1950
- toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
1951
- /** Output an event to the event listener(s) */
1952
- private emitEvent;
1953
- private _isProcessingEvents;
1954
- /** Process events */
1955
- private processEvents;
1956
- }
1957
- /** Any event */
1958
- interface IntelliWeaveStreamEvent {
1959
- /** Event name */
1960
- eventName: string;
1961
- /** Event date, unix timestamp, number of milliseconds since Jan 1 1970 */
1962
- timestamp: number;
1963
- /** Human-readable event date */
1964
- timestampDate: string;
1965
- /** Direction of this event. Input events go into the AI for processing, output events come out of the AI system. */
1966
- direction: 'input' | 'output';
1967
- /** Assistant hint, added automatically if there's a KB entry with the same ID. */
1968
- assistantHint?: string;
1969
- /** Any extra data */
1970
- [key: string]: any;
1971
- }
1972
- /** Error event schema */
1973
- interface IntelliWeaveStreamErrorEvent extends IntelliWeaveStreamEvent {
1974
- /** Event name */
1975
- eventName: "error";
1976
- /** Error details */
1977
- errorMessage: string;
1978
- }
1979
- /** AI speaking event */
1980
- interface IntelliWeaveStreamTextOutputEvent extends IntelliWeaveStreamEvent {
1981
- /** Event name */
1982
- eventName: "text";
1983
- /** The text spoken */
1984
- text: string;
1985
- }
1986
-
1987
2025
  /**
1988
2026
  * This is used when someone does `import "web-weaver-embedded"` and gives them a React Component (WebWeaverUI)
1989
2027
  * that they can use in their React code.
@@ -2062,4 +2100,4 @@ declare function useIntelliWeave(): IntelliWeave | undefined;
2062
2100
  /** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
2063
2101
  declare function useIntelliWeaveKnowledge(query: KnowledgeBaseSource['query'], dependencies?: any[]): void;
2064
2102
 
2065
- export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, type ChatGPTMessage, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveStream, type IntelliWeaveStreamErrorEvent, type IntelliWeaveStreamEvent, type IntelliWeaveStreamTextOutputEvent, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };
2103
+ export { AILogic, AnthropicChat, AudioSystem, type BaseStatisticsEvent, type BufferType, BufferedWebSocket, type BuiltInActionFlags, ChatBase, type ChatBaseConfig, type ChatBaseToolConfig, ChatGPT, FixedBufferStream, type InputStatisticsEvent, IntelliWeave, type IntelliWeaveGlobalConfig, type IntelliWeaveInstructConfig, IntelliWeaveMessageParser, type IntelliWeaveParameterDefinition, IntelliWeaveProvider, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseActionParameterSchema, type KnowledgeBaseItem, type KnowledgeBaseSearchEvent, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, type KnowledgeFetcher, Logging, MCPKnowledgeClient, type MessageReceiveEvent, type MessageSendEvent, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SessionStartEvent, type StatisticsEvent, type StatisticsEventType, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, type TokenWindowGroupItemParams, type TokenWindowGroupItemSection, TokenWindowGroupItemSectionType, type ToolCallEvent, type UICloseEvent, type UIOpenEvent, VoiceChunkOutputNode, VoiceDetectionNode, type VoiceEndEvent, type VoiceStartEvent, type VoiceSubmitEvent, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, convertParamsToJSONSchema, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, track, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };