@intelliweave/embedded 1.6.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/custom.d.ts +20 -0
- package/dist/component/component.d.ts +709 -0
- package/dist/component/component.js +375 -0
- package/dist/intelliweave-wordpress.zip +0 -0
- package/dist/node/node.d.ts +1109 -0
- package/dist/node/node.js +26 -0
- package/dist/react/react.d.ts +664 -0
- package/dist/react/react.js +375 -0
- package/dist/script-tag/chunk-FSRPMVAS.js +1 -0
- package/dist/script-tag/ort-wasm-simd-threaded.wasm +0 -0
- package/dist/script-tag/ort.bundle.min-5QOPZPPI.js +1834 -0
- package/dist/script-tag/script-tag.d.ts +2 -0
- package/dist/script-tag/script-tag.js +413 -0
- package/dist/webpack/index.d.ts +1306 -0
- package/dist/webpack/index.js +377 -0
- package/package.json +70 -0
|
@@ -0,0 +1,664 @@
|
|
|
1
|
+
import React from 'react';
|
|
2
|
+
|
|
3
|
+
/** ChatGPT config options */
|
|
4
|
+
interface ChatGPTConfig {
|
|
5
|
+
/** API key */
|
|
6
|
+
apiKey: string;
|
|
7
|
+
/** Provider ID */
|
|
8
|
+
providerID?: string;
|
|
9
|
+
/** Endpoint URL if using a custom URL */
|
|
10
|
+
endpoint: string;
|
|
11
|
+
/** LLM model to use */
|
|
12
|
+
model: string;
|
|
13
|
+
/** System message to describe to the AI how to behave. */
|
|
14
|
+
systemMessage: string;
|
|
15
|
+
/** User ID used to uniquely identify users in ChatGPT's API */
|
|
16
|
+
userID: string;
|
|
17
|
+
/** If true, streams the text responses from the API */
|
|
18
|
+
stream: boolean;
|
|
19
|
+
/** Amount of estimated tokens to keep when trimming */
|
|
20
|
+
maxTokens: number;
|
|
21
|
+
/** Callback before the AI sends info to the LLM */
|
|
22
|
+
onBeforeMessageProcessing?: () => void;
|
|
23
|
+
/** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
|
|
24
|
+
onAIMessage?: (text: string, isChunk: boolean) => void;
|
|
25
|
+
/** Callback when the AI starts performing an action */
|
|
26
|
+
onAIToolStart?: (toolName: string, input: any) => void;
|
|
27
|
+
}
|
|
28
|
+
/** ChatGPT tool config */
|
|
29
|
+
interface ChatGPTToolConfig {
|
|
30
|
+
/** Name of the tool */
|
|
31
|
+
name: string;
|
|
32
|
+
/** Description of the tool */
|
|
33
|
+
description: string;
|
|
34
|
+
/** Parameters for the tool */
|
|
35
|
+
params: {
|
|
36
|
+
name: string;
|
|
37
|
+
type: string;
|
|
38
|
+
description: string;
|
|
39
|
+
}[];
|
|
40
|
+
/** Callback function to process the tool */
|
|
41
|
+
callback: (params: any) => any;
|
|
42
|
+
/** If true, this tool call will be removed from the message history after it is executed. */
|
|
43
|
+
removeFromMessageHistory?: boolean;
|
|
44
|
+
/** Misc app context */
|
|
45
|
+
[key: string]: any;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* API for interacting with ChatGPT APIs.
|
|
49
|
+
*/
|
|
50
|
+
declare class ChatGPT {
|
|
51
|
+
/** ID */
|
|
52
|
+
id: string;
|
|
53
|
+
/** Metadata */
|
|
54
|
+
metadata: any;
|
|
55
|
+
/** Config */
|
|
56
|
+
config: ChatGPTConfig;
|
|
57
|
+
/** List of messages in the chat history */
|
|
58
|
+
messages: any[];
|
|
59
|
+
/** List of available tools */
|
|
60
|
+
tools: ChatGPTToolConfig[];
|
|
61
|
+
/** The maximum tool calls in sequence the AI can make before an error is thrown. */
|
|
62
|
+
maxToolCallsPerMessage: number;
|
|
63
|
+
private _hasRemovedToolCallHistorySinceLastMessage;
|
|
64
|
+
/** Statistics */
|
|
65
|
+
stats: {
|
|
66
|
+
/** Total tokens used this session */
|
|
67
|
+
tokensUsed: number;
|
|
68
|
+
};
|
|
69
|
+
/** Constructor */
|
|
70
|
+
constructor(config: ChatGPTConfig);
|
|
71
|
+
/** Send a message, and get the response */
|
|
72
|
+
sendMessage(message: string): Promise<string>;
|
|
73
|
+
/** @private Process messages in the chat history */
|
|
74
|
+
processMessages(): Promise<void>;
|
|
75
|
+
/** Trim message list */
|
|
76
|
+
trimMessages(): Promise<void>;
|
|
77
|
+
/** @private Send message list to the API and store the response */
|
|
78
|
+
sendToAPI(generatePayloadOnly?: boolean): Promise<any>;
|
|
79
|
+
/** Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
|
|
80
|
+
processIncomingMessage(message: string): void;
|
|
81
|
+
/** Register a tool. */
|
|
82
|
+
registerTool(tool: ChatGPTToolConfig): void;
|
|
83
|
+
/** @private Process a tool call request from the AI */
|
|
84
|
+
processToolCall(toolCall: any): Promise<void>;
|
|
85
|
+
/** Reset the conversation */
|
|
86
|
+
resetConversation(): void;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Speech output
|
|
91
|
+
*
|
|
92
|
+
* - event `speechstart` - When the speech starts
|
|
93
|
+
* - event `speechend` - When the speech ends
|
|
94
|
+
*/
|
|
95
|
+
declare class WebWeaverSpeechOutput extends EventTarget {
|
|
96
|
+
/** Reference to the AI */
|
|
97
|
+
private ai?;
|
|
98
|
+
/** Constructor */
|
|
99
|
+
constructor(ai: IntelliWeave);
|
|
100
|
+
/** Called when the AI speaks */
|
|
101
|
+
onTextOutputFromAI(e: CustomEvent): void;
|
|
102
|
+
/** Current player vars */
|
|
103
|
+
private currentPlayerVolume?;
|
|
104
|
+
private currentPlayer?;
|
|
105
|
+
/** Speak the text */
|
|
106
|
+
speak(text: string): Promise<void>;
|
|
107
|
+
private _speakWithLock;
|
|
108
|
+
/** True if currently playing audio */
|
|
109
|
+
get isSpeaking(): boolean;
|
|
110
|
+
/** Interrupt the previously playing audio */
|
|
111
|
+
interrupt(): Promise<void>;
|
|
112
|
+
/** Called when the speech output ends */
|
|
113
|
+
onSpeechEnd(): void;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* An AudioWorklet module that records data from input and sends it to the host.
|
|
118
|
+
*
|
|
119
|
+
* - event `data` - Fired when data is available to be read.
|
|
120
|
+
*/
|
|
121
|
+
declare class PCMReceiverNode extends AudioWorkletNode {
|
|
122
|
+
/** @type {'int16' | 'float32'} The output data format */
|
|
123
|
+
format: string;
|
|
124
|
+
/**
|
|
125
|
+
* Creates a new PCMRecorderNode ready to receive PCM data.
|
|
126
|
+
*
|
|
127
|
+
* @param context - The audio context to use.
|
|
128
|
+
* @param sampleRate - The sample rate of the output data stream.
|
|
129
|
+
* @param format - The format of the output data stream.
|
|
130
|
+
* @param bufferSize - The size of the output buffer in elements (Int16Array or Float32Array items, depending on `format`).
|
|
131
|
+
*/
|
|
132
|
+
constructor(context: AudioContext, sampleRate: number, format: 'int16' | 'int64' | 'float32', bufferSize: number);
|
|
133
|
+
/** @private Called when a message is received from the worklet */
|
|
134
|
+
onWorkletMessage(e: MessageEvent): void;
|
|
135
|
+
/** Called when data is received */
|
|
136
|
+
onData(buffer: Float32Array | Int16Array | BigInt64Array): void;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* An AudioNode which sends events for when speech is detected
|
|
141
|
+
*
|
|
142
|
+
* - event `speechstart` - Fired when speech is detected
|
|
143
|
+
* - event `speechend` - Fired when speech ends
|
|
144
|
+
*/
|
|
145
|
+
declare class VoiceDetectionNode extends PCMReceiverNode {
|
|
146
|
+
/** True if voice is currently being detected */
|
|
147
|
+
isVoiceActive: boolean;
|
|
148
|
+
/** True if voice is active but may be ending soon */
|
|
149
|
+
get isVoicePossiblyEnding(): boolean;
|
|
150
|
+
/** Last date that voice was detected */
|
|
151
|
+
lastVoiceActiveDate: number;
|
|
152
|
+
/** Amount of time to wait after voice detection to detect that it has ended */
|
|
153
|
+
voiceEndTimeout: number;
|
|
154
|
+
/** Detection sensitivity, if the detection model outputs a number bigger than this it will be considered voice */
|
|
155
|
+
sensitivity: number;
|
|
156
|
+
/** Sensitivity threshold to end speaking */
|
|
157
|
+
sentivityEnd: number;
|
|
158
|
+
/** VAD model */
|
|
159
|
+
static vadModelURL: string;
|
|
160
|
+
/** Loaded VAD model */
|
|
161
|
+
private vad?;
|
|
162
|
+
/** Sample rate */
|
|
163
|
+
get sampleRate(): 8000 | 16000;
|
|
164
|
+
/** Number of samples */
|
|
165
|
+
get numberOfSamples(): number;
|
|
166
|
+
/** Number of sample chunks */
|
|
167
|
+
get numberOfSampleChunks(): number;
|
|
168
|
+
/** Output buffer size */
|
|
169
|
+
get outputBufferSize(): number;
|
|
170
|
+
/** True if the VAD model has been loaded */
|
|
171
|
+
get isModelLoaded(): boolean;
|
|
172
|
+
/** The time when to next reset the VAD model */
|
|
173
|
+
nextVadReset: number;
|
|
174
|
+
/** The current probability of active voice */
|
|
175
|
+
currentProbability: number;
|
|
176
|
+
/** Constructor */
|
|
177
|
+
constructor(audioContext: AudioContext);
|
|
178
|
+
/** Start loading */
|
|
179
|
+
loadModel(): Promise<void>;
|
|
180
|
+
private _lastVoiceActive;
|
|
181
|
+
/** Called when data is received */
|
|
182
|
+
onData(buffer: Float32Array): Promise<void>;
|
|
183
|
+
/** Called when speech is detected */
|
|
184
|
+
onSpeechStart(): void;
|
|
185
|
+
/** Called when speech ends */
|
|
186
|
+
onSpeechEnd(): void;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* An AudioNode which isolates speech and outputs the audio data. Since we are reusing the VAD model node,
|
|
191
|
+
* output data is in 8000Hz Float32 format.
|
|
192
|
+
*
|
|
193
|
+
* - event `voicedata` - Fired when a chunk of voice is detected. `data` contains the recorded chunk of voice in a Float32Array.
|
|
194
|
+
* - event `voicedataend` - Fired when this chunk of voice ends. `data` contains an array of Float32Array containing the entirety of the recorded voice.
|
|
195
|
+
*/
|
|
196
|
+
declare class VoiceChunkOutputNode extends VoiceDetectionNode {
|
|
197
|
+
/** Stored buffers */
|
|
198
|
+
buffers: Float32Array[];
|
|
199
|
+
/** Recorded audio chunks with voice in it */
|
|
200
|
+
recordedBuffers: Float32Array[];
|
|
201
|
+
/** Last active state */
|
|
202
|
+
_voiceRecording: boolean;
|
|
203
|
+
/** Amount of audio data in the buffer, in seconds */
|
|
204
|
+
get bufferDuration(): number;
|
|
205
|
+
/** Amount of data to keep from before the user started speaking */
|
|
206
|
+
backBufferDurationSeconds: number;
|
|
207
|
+
/** Called when data is received */
|
|
208
|
+
onData(buffer: Float32Array): Promise<void>;
|
|
209
|
+
/** Called when a chunk of voice is recorded */
|
|
210
|
+
onVoiceChunk(buffer: Float32Array): void;
|
|
211
|
+
/** Called when the voice recording ends */
|
|
212
|
+
onVoiceEnd(buffers: Float32Array[]): void;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* This AudioNode uses OpenAI's Whisper model to transcribe spoken speech to text.
|
|
217
|
+
*
|
|
218
|
+
* - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text.
|
|
219
|
+
*/
|
|
220
|
+
declare class OpenAITranscriptionNode extends VoiceChunkOutputNode {
|
|
221
|
+
/** OpenAI API key */
|
|
222
|
+
apiKey: string;
|
|
223
|
+
/** Pending buffers */
|
|
224
|
+
private pendingBuffers;
|
|
225
|
+
/** Last request */
|
|
226
|
+
private lastRequestAbortController?;
|
|
227
|
+
/** True if currently transcribing */
|
|
228
|
+
isTranscribing: boolean;
|
|
229
|
+
/** Constructor */
|
|
230
|
+
constructor(audioContext: AudioContext, apiKey: string);
|
|
231
|
+
/** Called when the voice recording ends */
|
|
232
|
+
onVoiceEnd(buffers: Float32Array[]): Promise<void>;
|
|
233
|
+
/** Called when a transcription is ready */
|
|
234
|
+
onVoiceTranscription(text: string): void;
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* This AudioNode uses IntelliWeave's servers to transcribe spoken speech to text.
|
|
239
|
+
*
|
|
240
|
+
* - event `transcription` - Fired when a transcription is ready. `text` contains the transcribed text.
|
|
241
|
+
*/
|
|
242
|
+
declare class IntelliWeaveTranscriptionNode extends VoiceChunkOutputNode {
|
|
243
|
+
/** Debug: Export each recording as a wav file for download */
|
|
244
|
+
static debugExportWav: boolean;
|
|
245
|
+
/** Server address for transcription */
|
|
246
|
+
apiAddress: string;
|
|
247
|
+
/** OpenAI API key */
|
|
248
|
+
apiKey: string;
|
|
249
|
+
/** WebSocket connection */
|
|
250
|
+
private ws?;
|
|
251
|
+
/** True if currently transcribing */
|
|
252
|
+
isTranscribing: boolean;
|
|
253
|
+
/** WebSocket shutdown timer */
|
|
254
|
+
private shutdownTimer?;
|
|
255
|
+
/** Constructor */
|
|
256
|
+
constructor(audioContext: AudioContext, apiKey: string);
|
|
257
|
+
/** Called when a voice chunk is received */
|
|
258
|
+
onVoiceChunk(buffer: Float32Array): Promise<void>;
|
|
259
|
+
/** Called when the voice recording ends */
|
|
260
|
+
onVoiceEnd(buffers: Float32Array[]): Promise<void>;
|
|
261
|
+
/** Called when a transcription is ready */
|
|
262
|
+
onVoiceTranscription(text: string): void;
|
|
263
|
+
/** Called when the WebSocket is closed */
|
|
264
|
+
onSocketClose(): void;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Handles speech recognition from the microphone
|
|
269
|
+
*
|
|
270
|
+
* - event `speechstart` - We have detected the user started speaking
|
|
271
|
+
* - event `speechend` - We have detected the user stopped speaking
|
|
272
|
+
* - event `speech` - Speech recognition result
|
|
273
|
+
* - event `start` - Speech recognition started
|
|
274
|
+
* - event `end` - Speech recognition ended
|
|
275
|
+
*/
|
|
276
|
+
declare class WebWeaverSpeechRecognition extends EventTarget {
|
|
277
|
+
/** Reference to the AI */
|
|
278
|
+
ai?: IntelliWeave;
|
|
279
|
+
/** True if recognition is running */
|
|
280
|
+
isRunning: boolean;
|
|
281
|
+
/** The audio analyser node */
|
|
282
|
+
analyserNode?: AnalyserNode;
|
|
283
|
+
/** The audio analyser buffer */
|
|
284
|
+
analyserBuffer?: Float32Array;
|
|
285
|
+
/** The microphone stream */
|
|
286
|
+
micStream?: MediaStream;
|
|
287
|
+
/** Returns true if speech recognition is supported by this persona and browser */
|
|
288
|
+
get isSupported(): boolean;
|
|
289
|
+
/** Currently active voice detection node */
|
|
290
|
+
voiceDetection?: IntelliWeaveTranscriptionNode | OpenAITranscriptionNode;
|
|
291
|
+
/** Constructor */
|
|
292
|
+
constructor(ai: IntelliWeave);
|
|
293
|
+
private _skipEvents;
|
|
294
|
+
/** Start recognition */
|
|
295
|
+
start(): Promise<void>;
|
|
296
|
+
/** Stop recognition */
|
|
297
|
+
stop(): void;
|
|
298
|
+
/** @private Maximum volume heard this session */
|
|
299
|
+
maxVolumeHeard: number;
|
|
300
|
+
/** Get current (realtime) microphone volume level, from 0 to 1 */
|
|
301
|
+
get volumeLevel(): number;
|
|
302
|
+
/** True if currently detecting words being spoken */
|
|
303
|
+
get wordsCurrentlyBeingSpoken(): boolean;
|
|
304
|
+
/** True if currently transcribing voice to text */
|
|
305
|
+
get isTranscribing(): boolean;
|
|
306
|
+
/** Called when speech has been recorded */
|
|
307
|
+
onTranscription(e: CustomEvent): void;
|
|
308
|
+
/** Called to reset the speech recognizer */
|
|
309
|
+
reset(): Promise<void>;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/** Handles creating and managing the AudioContext */
|
|
313
|
+
declare class AudioSystem {
|
|
314
|
+
/** Reference to the AI */
|
|
315
|
+
private ai?;
|
|
316
|
+
/** The speech recognition module. */
|
|
317
|
+
speechRecognition: WebWeaverSpeechRecognition;
|
|
318
|
+
/** The speech output module. */
|
|
319
|
+
speechOutput: WebWeaverSpeechOutput;
|
|
320
|
+
/** The audio context */
|
|
321
|
+
context?: AudioContext;
|
|
322
|
+
/** List of active named locks */
|
|
323
|
+
locks: string[];
|
|
324
|
+
/** Returns true if speech recognition and output is supported by this persona and browser */
|
|
325
|
+
static get isSupported(): boolean;
|
|
326
|
+
/** Constructor */
|
|
327
|
+
constructor(ai: IntelliWeave);
|
|
328
|
+
/** Create a named lock to enable the audio system */
|
|
329
|
+
beginAccess(namedLock: string): Promise<void>;
|
|
330
|
+
/** Stop accessing the audio system */
|
|
331
|
+
endAccess(namedLock: string): void;
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* This class allows you to use the AI as a logic engine, data extractor, etc.
|
|
336
|
+
*/
|
|
337
|
+
declare class AILogic {
|
|
338
|
+
/** Reference to the AI */
|
|
339
|
+
private ai?;
|
|
340
|
+
/** Constructor */
|
|
341
|
+
constructor(ai: IntelliWeave);
|
|
342
|
+
/** Ask the AI a yes/no question associated with the specified data. Data must be JSON-serializable, or a string of any kind of data. */
|
|
343
|
+
boolean(question: string, data: any): Promise<boolean>;
|
|
344
|
+
/**
|
|
345
|
+
Ask the AI to select a choice from a list of options. The AI will return the selected choice.
|
|
346
|
+
@param question The question to ask the AI.
|
|
347
|
+
@param data The data to provide to the AI. This can be a string or a JSON-serializable object.
|
|
348
|
+
@param options The list of options to choose from.
|
|
349
|
+
*/
|
|
350
|
+
choose(question: string, data: any, options: string[]): Promise<string | undefined>;
|
|
351
|
+
/**
|
|
352
|
+
* Ask the AI to extract data from a text. The AI will return the extracted data. Possibly an array of multiple extractions.
|
|
353
|
+
* Example: extract(myData, true, [
|
|
354
|
+
* { name: "id", type: "string", description: "The user's ID number." },
|
|
355
|
+
* { name: "firstName", type: "string", description: "The user's first name" },
|
|
356
|
+
* ])
|
|
357
|
+
* @param question The question to ask the AI.
|
|
358
|
+
* @param data The data to provide to the AI. This can be a string or a JSON-serializable object.
|
|
359
|
+
* @param allowMultiple Whether to allow multiple extractions or not.
|
|
360
|
+
* @param extractions The list of extractions to perform. Each extraction is an object with the following properties:
|
|
361
|
+
* - name: The name of the extraction. This will be the key in the returned object.
|
|
362
|
+
* - type: The type of the extraction. This can be "string", "number", "boolean", "date", "email", "url", "phone", "address", "name", "organization", "person", "location", "time", "duration", "money", "percentage", "quantity", "custom".
|
|
363
|
+
* - description: A description of the extraction. This is optional.
|
|
364
|
+
*/
|
|
365
|
+
extract(question: string, data: any, allowMultiple: boolean, extractions: {
|
|
366
|
+
name: string;
|
|
367
|
+
type: string;
|
|
368
|
+
description?: string;
|
|
369
|
+
}[]): Promise<any>;
|
|
370
|
+
/**
|
|
371
|
+
* Generate a Markdown document based on the input query from the user.
|
|
372
|
+
*
|
|
373
|
+
* @param query The query to generate the document from.
|
|
374
|
+
* @param callback The callback that will be called when streaming the response. Each call will contain the full text that has been generated so far.
|
|
375
|
+
*/
|
|
376
|
+
generateMarkdown(query: string, callback: (txt: string) => void): Promise<string | null>;
|
|
377
|
+
/**
|
|
378
|
+
* Perform an instruction.
|
|
379
|
+
*
|
|
380
|
+
* @param instruction Describe the action to perform.
|
|
381
|
+
* @param query The user query to pass to the AI.
|
|
382
|
+
* @param callback The callback that will be called when streaming the response. Each call will contain the full text that has been generated so far.
|
|
383
|
+
* @returns The final response from the AI.
|
|
384
|
+
*/
|
|
385
|
+
instruct(instruction: string, query: string, callback: (txt: string) => void): Promise<string | null>;
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
/** Persona config received from the hub */
|
|
389
|
+
interface WebWeaverGPTConfig {
|
|
390
|
+
/** ID */
|
|
391
|
+
id: string;
|
|
392
|
+
/** ChatGPT config */
|
|
393
|
+
model: ChatGPTConfig;
|
|
394
|
+
/** If true, message history will be sent to the IntelliWeave hub for analysis */
|
|
395
|
+
analytics?: boolean;
|
|
396
|
+
/** Persona name */
|
|
397
|
+
name?: string;
|
|
398
|
+
/** Instructions to the AI */
|
|
399
|
+
instructions?: string;
|
|
400
|
+
/** Introduction message, used in the automatic UI */
|
|
401
|
+
introductionMessage?: string;
|
|
402
|
+
/** Voice information */
|
|
403
|
+
voice?: {
|
|
404
|
+
/** Provider ID */
|
|
405
|
+
providerID: string;
|
|
406
|
+
/** API key for the provider */
|
|
407
|
+
apiKey: string;
|
|
408
|
+
/** Voice ID within the provider */
|
|
409
|
+
voiceID: string;
|
|
410
|
+
};
|
|
411
|
+
/** Transcription API information */
|
|
412
|
+
transcription?: {
|
|
413
|
+
/** Provider ID */
|
|
414
|
+
providerID: string;
|
|
415
|
+
/** API key for the provider */
|
|
416
|
+
apiKey: string;
|
|
417
|
+
/** Optional URL for the transcription service */
|
|
418
|
+
url?: string;
|
|
419
|
+
};
|
|
420
|
+
/** Knowledge base sources */
|
|
421
|
+
knowledge?: KnowledgeBaseSource[];
|
|
422
|
+
}
|
|
423
|
+
/**
|
|
424
|
+
* IntelliWeave interface, loads a Persona from the hub and allows you to interact with it. This is the main entry point into the IntelliWeave
|
|
425
|
+
* SDK when not using the built-in UI.
|
|
426
|
+
*
|
|
427
|
+
* - event `load` - Fired when the AI is loaded with a new configuration.
|
|
428
|
+
* - event `error` - Fired when an error occurs during loading.
|
|
429
|
+
* - event `webweaver_loaded` - Fired when the AI is loaded with a new configuration. This is a global event that is fired on the window object.
|
|
430
|
+
* - event `webweaver_error` - Fired when an error occurs during loading. This is a global event that is fired on the window object.
|
|
431
|
+
* - event `input` - Fired when the user sends a message to the AI.
|
|
432
|
+
* - event `output` - Fired when the AI sends a message back to the user. If `event.detail.isChunk` is true, the message is incomplete and will be followed by more chunks.
|
|
433
|
+
* - event `toolstart` - Fired when the AI starts performing an action.
|
|
434
|
+
* - event `tool` - Fired when the AI finishes performing an action.
|
|
435
|
+
*/
|
|
436
|
+
declare class IntelliWeave extends EventTarget {
|
|
437
|
+
/** App version */
|
|
438
|
+
static version: string;
|
|
439
|
+
/** Callback when a message from the AI is returned. If isChunk is true, it may be incomplete and be called again with more updates. */
|
|
440
|
+
onAIMessage?: (text: string, isChunk: boolean) => void;
|
|
441
|
+
/** Callback when the AI starts performing an action */
|
|
442
|
+
onAIToolStart?: ChatGPTConfig['onAIToolStart'];
|
|
443
|
+
/** Current conversation ID */
|
|
444
|
+
conversationID: string;
|
|
445
|
+
/** Knowledge database interface */
|
|
446
|
+
knowledgeBase: KnowledgeBase;
|
|
447
|
+
/** Current knowledge base items */
|
|
448
|
+
private _lastKBentries;
|
|
449
|
+
/** Config loaded from the API */
|
|
450
|
+
config?: WebWeaverGPTConfig;
|
|
451
|
+
/** Available LLMs */
|
|
452
|
+
models: {
|
|
453
|
+
id: string;
|
|
454
|
+
config: ChatGPTConfig;
|
|
455
|
+
priority?: number;
|
|
456
|
+
}[];
|
|
457
|
+
/** Current LLM */
|
|
458
|
+
currentModel?: ChatGPT;
|
|
459
|
+
/** The audio system. Set this to a new instance of AudioSystem to enable audio support */
|
|
460
|
+
audio: AudioSystem | null;
|
|
461
|
+
/** Silero VAD model blob */
|
|
462
|
+
vadModel?: Blob;
|
|
463
|
+
/** True if the AI has loaded */
|
|
464
|
+
get loaded(): boolean;
|
|
465
|
+
/** If loading fails, this stores the last error during load() */
|
|
466
|
+
error?: Error;
|
|
467
|
+
/** IntelliWeave API key */
|
|
468
|
+
apiKey: string;
|
|
469
|
+
/** Tracker for the current voice interaction */
|
|
470
|
+
_voiceTracker?: (text: string) => void;
|
|
471
|
+
/** Logic engine */
|
|
472
|
+
logic: AILogic;
|
|
473
|
+
/** A unique ID to identify this user. Defaults to a value stored in localStorage, or random. */
|
|
474
|
+
userID: string;
|
|
475
|
+
/** Extra data that will be passed to external knowledge base actions. */
|
|
476
|
+
extra: any;
|
|
477
|
+
/** Set model and load data from an API key */
|
|
478
|
+
load(apiKey: string): Promise<WebWeaverGPTConfig>;
|
|
479
|
+
/** Set the current model */
|
|
480
|
+
setModel(id: string): void;
|
|
481
|
+
private _lastSystemMsg;
|
|
482
|
+
/** Get the system message prefix, before the KB entries are added */
|
|
483
|
+
getContextPrefix(): Promise<any>;
|
|
484
|
+
/** Get system message to send to the AI */
|
|
485
|
+
onBeforeMessageProcessing(): Promise<void>;
|
|
486
|
+
/** Called to update the current knowledge base items */
|
|
487
|
+
updateKnowledgeBase(items: KnowledgeBaseItem[]): void;
|
|
488
|
+
private _lastOutput?;
|
|
489
|
+
/** @private Process incoming message from the AI. Can be used to respond to encoded actions in the text response. */
|
|
490
|
+
processIncomingMessage(message: string, isChunk?: boolean): void;
|
|
491
|
+
/** True if currently processing a message */
|
|
492
|
+
isProcessing: boolean;
|
|
493
|
+
/** Send a message, and get the response */
|
|
494
|
+
sendMessage(message: string): Promise<string | null>;
|
|
495
|
+
/** @private Called when the AI wants to run a KB action */
|
|
496
|
+
toolRunKBAction(kb: KnowledgeBaseItem, input: any): Promise<any>;
|
|
497
|
+
/** Submit an analytics event asynchronously */
|
|
498
|
+
submitAnalyticsEvent(data: any): void;
|
|
499
|
+
/** Reset the conversation */
|
|
500
|
+
resetConversation(): void;
|
|
501
|
+
/** Insert a message as if the assistant has written it */
|
|
502
|
+
insertAssistantMessage(message: string): void;
|
|
503
|
+
/** Export conversation state to a JSON object */
|
|
504
|
+
exportState(): {
|
|
505
|
+
type: string;
|
|
506
|
+
conversationID: string;
|
|
507
|
+
messages: any[] | undefined;
|
|
508
|
+
};
|
|
509
|
+
/** Import conversation state from JSON */
|
|
510
|
+
importState(state: any): void;
|
|
511
|
+
/** Clone this instance */
|
|
512
|
+
clone(): IntelliWeave;
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
/**
|
|
516
|
+
* Register knowledge base sources and perform searches.
|
|
517
|
+
*/
|
|
518
|
+
declare class KnowledgeBase {
|
|
519
|
+
/** Reference to the AI */
|
|
520
|
+
ai?: IntelliWeave;
|
|
521
|
+
/** Knowledge base sources */
|
|
522
|
+
_sources: KnowledgeBaseSource[];
|
|
523
|
+
/** List of sources returned from the last window event */
|
|
524
|
+
_windowSources: KnowledgeBaseSource[];
|
|
525
|
+
/** List of last search results */
|
|
526
|
+
lastResults: KnowledgeBaseItem[];
|
|
527
|
+
/** Individual knowledge base entries added manually by the application */
|
|
528
|
+
manualEntries: KnowledgeBaseItem[];
|
|
529
|
+
/** Constructor */
|
|
530
|
+
constructor(ai: IntelliWeave);
|
|
531
|
+
/**
|
|
532
|
+
* Register a new knowledge base source. You can pass either just a query function, or an ID and a query function.
|
|
533
|
+
*
|
|
534
|
+
* @param idOrQuery The ID of the source or a function that performs the query if no ID is provided
|
|
535
|
+
* @param query The function that performs the query. Can be undefined if the first param is a function.
|
|
536
|
+
*/
|
|
537
|
+
registerSource(idOrQuery: string | KnowledgeBaseSource['query'], query?: KnowledgeBaseSource['query']): string;
|
|
538
|
+
/** Remove a knowledge base source */
|
|
539
|
+
removeSource(idOrQuery: string | KnowledgeBaseSource['query']): void;
|
|
540
|
+
/** Add a knowledge base item. */
|
|
541
|
+
addEntry(item: KnowledgeBaseItem): void;
|
|
542
|
+
/** Remove a knowledge base item. */
|
|
543
|
+
removeEntry(id: string): void;
|
|
544
|
+
/** Get all knowledge base sources */
|
|
545
|
+
get sources(): KnowledgeBaseSource[];
|
|
546
|
+
/** Search the knowledge base */
|
|
547
|
+
search(query: string): Promise<KnowledgeBaseItem[]>;
|
|
548
|
+
/** Get the KB entry with the specified ID. Requires the item to have been fetched in the last knowledge base query. */
|
|
549
|
+
getCachedEntry(id: string): KnowledgeBaseItem | undefined;
|
|
550
|
+
/** Create and register an external knowledge base source from a URL */
|
|
551
|
+
registerSourceFromURL(url: string, id?: string): void;
|
|
552
|
+
/** Clone this instance */
|
|
553
|
+
clone(): KnowledgeBase;
|
|
554
|
+
}
|
|
555
|
+
/** Knowledge base source */
|
|
556
|
+
interface KnowledgeBaseSource {
|
|
557
|
+
/** Source ID */
|
|
558
|
+
id?: string;
|
|
559
|
+
/** Source name */
|
|
560
|
+
name?: string;
|
|
561
|
+
/** Optional description */
|
|
562
|
+
description?: string;
|
|
563
|
+
/** Optional icon URL */
|
|
564
|
+
icon?: string;
|
|
565
|
+
/** If true, this source will not be queried. */
|
|
566
|
+
disabled?: boolean;
|
|
567
|
+
/** Source query function. This function should return a list of knowledge base entries that optionally match the query. */
|
|
568
|
+
query?: (query: string) => (KnowledgeBaseItem[] | Promise<KnowledgeBaseItem[]>);
|
|
569
|
+
/** URL query for remote sources */
|
|
570
|
+
url?: string;
|
|
571
|
+
}
|
|
572
|
+
/** Knowledge base item */
|
|
573
|
+
interface KnowledgeBaseItem {
|
|
574
|
+
/** Item ID */
|
|
575
|
+
id?: string;
|
|
576
|
+
/** Item type. */
|
|
577
|
+
type: 'info' | 'action' | 'tour' | 'input-event' | 'output-event';
|
|
578
|
+
/** Item name */
|
|
579
|
+
name: string;
|
|
580
|
+
/** Item tags. Helps with search optimization. */
|
|
581
|
+
tags?: string;
|
|
582
|
+
/** Item content. Can be a function to return a dynamic string. */
|
|
583
|
+
content: string | (() => string);
|
|
584
|
+
/** If true, this item will always be returned from all search results. */
|
|
585
|
+
isContext?: boolean;
|
|
586
|
+
/** If true, this item will not be visible to the AI. */
|
|
587
|
+
disabled?: boolean;
|
|
588
|
+
/** List of parameters for an action function. */
|
|
589
|
+
parameters?: ({
|
|
590
|
+
name: string;
|
|
591
|
+
type: 'string' | 'boolean' | 'number';
|
|
592
|
+
description: string;
|
|
593
|
+
})[];
|
|
594
|
+
/**
|
|
595
|
+
* Item action. The parameters are defined in `parameters`. The response is stringified and sent to the AI.
|
|
596
|
+
* You can return any JSON-serializable object. You can also return a string describing to the AI the action
|
|
597
|
+
* that was performed. If an error is thrown, the AI will respond appropriately to the user.
|
|
598
|
+
*/
|
|
599
|
+
action?: (input: any, ai: IntelliWeave) => (any | Promise<any>);
|
|
600
|
+
/** If true, this item will be removed from the AI's message history after it gets called. This is a special case for LLMs that struggle with follow-up function calls and need to use the KB search functino first. */
|
|
601
|
+
removeFromMessageHistory?: boolean;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
/**
|
|
605
|
+
* This is used when someone does `import "web-weaver-embedded"` and gives them a React Component (WebWeaverUI)
|
|
606
|
+
* that they can use in their React code.
|
|
607
|
+
*/
|
|
608
|
+
|
|
609
|
+
/** IntelliWeave embedded React component. This adds the Web Weaver floating button to the app. */
|
|
610
|
+
declare const WebWeaverUI: (props: {
|
|
611
|
+
/** API key generated on the IntelliWeave hub */
|
|
612
|
+
apiKey: string;
|
|
613
|
+
/** If true, conversations will be recorded on the Hub for analysis. If not specified, uses the value specified on the Hub. */
|
|
614
|
+
analytics?: boolean;
|
|
615
|
+
/** The URL of the logo to use */
|
|
616
|
+
logo?: string;
|
|
617
|
+
/** If true, shows debug information in the console */
|
|
618
|
+
debug?: boolean;
|
|
619
|
+
/** Customize the context passed to the AI. Not needed since it's pulled from the Persona on the hub. */
|
|
620
|
+
context?: string;
|
|
621
|
+
/** The introduction message to show when the user first opens the chat. Not needed since it's pulled from the Persona on the hub. */
|
|
622
|
+
introductionMessage?: string;
|
|
623
|
+
/** Suggestion buttons to show on the first introduction message */
|
|
624
|
+
introductionSuggestions?: string[];
|
|
625
|
+
/** The knowledge base to add to the AI. */
|
|
626
|
+
knowledgeBase?: KnowledgeBaseItem[];
|
|
627
|
+
/** A list of custom sources */
|
|
628
|
+
sources?: KnowledgeBaseSource[];
|
|
629
|
+
/** X offset for the button from the right side of the screen, in pixels */
|
|
630
|
+
offsetX?: number;
|
|
631
|
+
/** Y offset for the button from the bottom of the screen, in pixels */
|
|
632
|
+
offsetY?: number;
|
|
633
|
+
/** User ID */
|
|
634
|
+
userID?: string;
|
|
635
|
+
}) => React.JSX.Element | null;
|
|
636
|
+
/** Global typescript definitions */
|
|
637
|
+
declare global {
|
|
638
|
+
/** Web Component ... React supports any registered Web Component with dashes in the name, but they don't get TypeScript definitions */
|
|
639
|
+
namespace JSX {
|
|
640
|
+
interface IntrinsicElements {
|
|
641
|
+
'intelliweave-embed': any;
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
/** Provide the IntelliWeave object to all child components */
|
|
646
|
+
declare function IntelliWeaveProvider(props: {
|
|
647
|
+
/** Optionally specify your own instance */
|
|
648
|
+
ai?: IntelliWeave;
|
|
649
|
+
/** Child components */
|
|
650
|
+
children: React.ReactNode;
|
|
651
|
+
/** API key generated on the IntelliWeave hub */
|
|
652
|
+
apiKey: string;
|
|
653
|
+
/** If true, conversations will be recorded on the Hub for analysis. If not specified, uses the value specified on the Hub. */
|
|
654
|
+
analytics?: boolean;
|
|
655
|
+
/** If true, shows debug information in the console */
|
|
656
|
+
debug?: boolean;
|
|
657
|
+
}): React.JSX.Element;
|
|
658
|
+
/** React hook to fetch IntelliWeave and be notified once it's loaded. Requires the `<IntelliWeaveProvider />` or `<WebWeaverUI />` component to exist somewhere in your scene graph. */
|
|
659
|
+
declare function useIntelliWeave(): IntelliWeave | undefined;
|
|
660
|
+
/** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
|
|
661
|
+
/** React hook to add an external KB search hook. This can provide static KB entries, or perform an async search and return dynamic entries. */
|
|
662
|
+
declare function useIntelliWeaveKnowledge(query: KnowledgeBaseSource['query'], dependencies?: any[]): void;
|
|
663
|
+
|
|
664
|
+
export { IntelliWeaveProvider, WebWeaverUI, useIntelliWeave, useIntelliWeaveKnowledge };
|