@intelliweave/embedded 1.6.55 → 1.7.57

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,86 @@
1
+ import { Optional } from 'utility-types';
1
2
  import * as onnxruntime_web from 'onnxruntime-web';
2
3
  import { InferenceSession, Tensor } from 'onnxruntime-web';
3
4
  import React from 'react';
4
5
 
6
+ /**
7
+ * This class helps organize groups of tokenized text along with removing items when the window is full.
8
+ */
9
+ declare class TokenWindow {
10
+ /** Token window size */
11
+ size: number;
12
+ /** Token groups */
13
+ groups: TokenWindowGroup<any>[];
14
+ /** Create a new group */
15
+ createGroup(id: string): TokenWindowGroup<unknown>;
16
+ /** Get a group */
17
+ group<CustomDataType>(id: string): TokenWindowGroup<CustomDataType> | undefined;
18
+ /** Calculate current tokens in all groups */
19
+ countTokens(): number;
20
+ /** Remove overflow from all groups. */
21
+ removeOverflow(): void;
22
+ /** Remove one overflow item. Returns null if no items were able to be removed. */
23
+ private removeOneItem;
24
+ }
25
+ /** A token group. */
26
+ declare class TokenWindowGroup<CustomDataType> {
27
+ /** Group ID */
28
+ id: string;
29
+ /** List of items */
30
+ items: TokenWindowGroupItem<CustomDataType>[];
31
+ /**
32
+ * Weight controls how many items from this group should be kept in relation to the entire window. For example if all
33
+ * groups have a weight of 1, each group remove items equally if full. If one has a weight of 2 while the rest are 1,
34
+ * that group will be allowed to keep double the amount of items.
35
+ */
36
+ weight: number;
37
+ /** Current total token count, computed automatically. Don't update this value manually. */
38
+ tokenCount: number;
39
+ /** Group item separator */
40
+ separator: string;
41
+ /** Token count padding added to each item. */
42
+ private itemPadding;
43
+ /** Sets the token count padding added to each item. Useful if you don't know exactly what will be added by the LLM host. */
44
+ setItemPadding(padding: number): this;
45
+ /** Sort function */
46
+ private sortFunction;
47
+ /** Set sort function */
48
+ sortBy(sortFunction: (a: TokenWindowGroupItem<CustomDataType>, b: TokenWindowGroupItem<CustomDataType>) => number): this;
49
+ /** Set separator */
50
+ setSeparator(separator: string): this;
51
+ /** Set weight */
52
+ setWeight(weight: number): this;
53
+ /** Recalculate all tokens. Note this may take a while. */
54
+ recalculateTokens(): void;
55
+ /** Add an item to the group */
56
+ add(item: string | Omit<Optional<TokenWindowGroupItem<CustomDataType>, 'id' | 'dateAdded' | 'sortOrder'>, 'tokenCount'>): TokenWindowGroupItem<CustomDataType>;
57
+ /** Get all items as a string */
58
+ getAllAsString(): string;
59
+ /** Get all items. Doesn't return disabled items. */
60
+ getAll(): TokenWindowGroupItem<CustomDataType>[];
61
+ /** Remove all items from this group */
62
+ empty(): void;
63
+ }
64
+ /** Token group item */
65
+ interface TokenWindowGroupItem<CustomDataType> {
66
+ /** Each item must have a unique ID. */
67
+ id: string;
68
+ /** The string content of the item */
69
+ content: string;
70
+ /** True if this item should never be removed */
71
+ cannotRemove?: boolean;
72
+ /** Sorting order. If not specified, uses dateAdded instead. */
73
+ sortOrder: number;
74
+ /** Date this item was added */
75
+ dateAdded: number;
76
+ /** Token count in the content */
77
+ tokenCount: number;
78
+ /** Anything to attach to this item */
79
+ customData?: CustomDataType;
80
+ /** If disabled, this item will not be included and will not add tot he token count. */
81
+ disabled?: boolean;
82
+ }
83
+
5
84
  /** ChatGPT config options */
6
85
  interface ChatGPTConfig {
7
86
  /** API key */
@@ -43,9 +122,22 @@ interface ChatGPTToolConfig {
43
122
  callback: (params: any) => any;
44
123
  /** If true, this tool call will be removed from the message history after it is executed. */
45
124
  removeFromMessageHistory?: boolean;
125
+ /** If true, this item can be removed if there's not enough context available. */
126
+ canRemove?: boolean;
46
127
  /** Misc app context */
47
128
  [key: string]: any;
48
129
  }
130
+ /** ChatGPT message */
131
+ interface ChatGPTMessage {
132
+ /** Role of the message */
133
+ role: 'user' | 'assistant' | 'system' | 'tool';
134
+ /** Content of the message */
135
+ content: string;
136
+ /** Tool call ID */
137
+ tool_call_id?: string;
138
+ /** Tool calls made by the AI */
139
+ tool_calls?: any[];
140
+ }
49
141
  /**
50
142
  * API for interacting with ChatGPT APIs.
51
143
  */
@@ -56,22 +148,33 @@ declare class ChatGPT {
56
148
  metadata: any;
57
149
  /** Config */
58
150
  config: ChatGPTConfig;
59
- /** List of messages in the chat history */
60
- messages: any[];
61
- /** List of available tools */
62
- tools: ChatGPTToolConfig[];
63
151
  /** The maximum tool calls in sequence the AI can make before an error is thrown. */
64
152
  maxToolCallsPerMessage: number;
65
- private _hasRemovedToolCallHistorySinceLastMessage;
66
153
  /** Statistics */
67
154
  stats: {
68
155
  /** Total tokens used this session */
69
156
  tokensUsed: number;
70
157
  };
158
+ /** Token window management */
159
+ tokenWindow: TokenWindow;
160
+ /** Token window group used for the context message */
161
+ get contextGroup(): TokenWindowGroup<any>;
162
+ /** Token window group used for tools / actions */
163
+ get toolGroup(): TokenWindowGroup<ChatGPTToolConfig>;
164
+ /** Token window group used for messages */
165
+ get messageGroup(): TokenWindowGroup<ChatGPTMessage>;
71
166
  /** Constructor */
72
167
  constructor(config: ChatGPTConfig);
73
168
  /** Send a message, and get the response */
74
169
  sendMessage(message: string): Promise<string>;
170
+ /** Insert a message as if the assistant has written it */
171
+ addAssistantMessage(message: string): void;
172
+ /** Insert a message sent from a user. Note that doing this instead of using `sendMessage()` means you'll need to manually call `await processMessages()` and then `getLatestMessage()` to get the response. */
173
+ addUserMessage(message: string): void;
174
+ /** Get all messages */
175
+ getMessages(): ChatGPTMessage[];
176
+ /** Get latest message */
177
+ getLatestMessage(): ChatGPTMessage | undefined;
75
178
  /** @private Process messages in the chat history */
76
179
  processMessages(): Promise<void>;
77
180
  /** Trim message list */
@@ -397,11 +500,11 @@ declare class IntelliWeave extends EventTarget {
397
500
  exportState(): {
398
501
  type: string;
399
502
  conversationID: string;
400
- messages: any[] | undefined;
503
+ messages: (ChatGPTMessage | undefined)[] | undefined;
401
504
  };
402
505
  /** Import conversation state from JSON */
403
506
  importState(state: any): void;
404
- /** Clone this instance */
507
+ /** Clone this instance without any message history */
405
508
  clone(): IntelliWeave;
406
509
  }
407
510
 
@@ -1317,4 +1420,4 @@ declare function useIntelliWeave(): IntelliWeave | undefined;
1317
1420
  /** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
1318
1421
  declare function useIntelliWeaveKnowledge(query: KnowledgeBaseSource['query'], dependencies?: any[]): void;
1319
1422
 
1320
- export { AILogic, AudioSystem, type BufferType, BufferedWebSocket, ChatGPT, type ChatGPTConfig, type ChatGPTToolConfig, FixedBufferStream, IntelliWeave, IntelliWeaveProvider, IntelliWeaveStream, type IntelliWeaveStreamErrorEvent, type IntelliWeaveStreamEvent, type IntelliWeaveStreamTextOutputEvent, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseItem, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, Logging, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SupportedArrayBuffers, VoiceChunkOutputNode, VoiceDetectionNode, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };
1423
+ export { AILogic, AudioSystem, type BufferType, BufferedWebSocket, ChatGPT, type ChatGPTConfig, type ChatGPTMessage, type ChatGPTToolConfig, FixedBufferStream, IntelliWeave, IntelliWeaveProvider, IntelliWeaveStream, type IntelliWeaveStreamErrorEvent, type IntelliWeaveStreamEvent, type IntelliWeaveStreamTextOutputEvent, IntelliWeaveTranscriptionNode, KnowledgeBase, type KnowledgeBaseItem, type KnowledgeBaseSource, type KnowledgeBaseWebhookActionResponse, type KnowledgeBaseWebhookRequest, type KnowledgeBaseWebhookSearchResponse, Logging, ONNXModel, type ONNXTensors, OpenAITranscriptionNode, PCMPlayerNode, PCMReceiverNode, Resampler, type SupportedArrayBuffers, TokenWindow, TokenWindowGroup, type TokenWindowGroupItem, VoiceChunkOutputNode, VoiceDetectionNode, WebWeaverEmbed, type WebWeaverGPTConfig, WebWeaverSpeechOutput, WebWeaverSpeechRecognition, WebWeaverUI, audioToWav, floatTo16BitPCM, floatTo64BitPCM, getDefaultUserID, int16ToFloat32BitPCM, intelliweaveConfig, intelliweaveGlobalThis, sseEvents, trimWhitespaceInText, useIntelliWeave, useIntelliWeaveKnowledge };