@jchaffin/voicekit 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +369 -0
- package/dist/adapters/deepgram.d.mts +43 -0
- package/dist/adapters/deepgram.d.ts +43 -0
- package/dist/adapters/deepgram.js +216 -0
- package/dist/adapters/deepgram.mjs +162 -0
- package/dist/adapters/elevenlabs.d.mts +41 -0
- package/dist/adapters/elevenlabs.d.ts +41 -0
- package/dist/adapters/elevenlabs.js +304 -0
- package/dist/adapters/elevenlabs.mjs +250 -0
- package/dist/adapters/livekit.d.mts +44 -0
- package/dist/adapters/livekit.d.ts +44 -0
- package/dist/adapters/livekit.js +225 -0
- package/dist/adapters/livekit.mjs +161 -0
- package/dist/adapters/openai.d.mts +41 -0
- package/dist/adapters/openai.d.ts +41 -0
- package/dist/adapters/openai.js +350 -0
- package/dist/adapters/openai.mjs +294 -0
- package/dist/chunk-22WLZIXO.mjs +33 -0
- package/dist/chunk-T3II3DRG.mjs +178 -0
- package/dist/chunk-UZ2VGPZD.mjs +33 -0
- package/dist/chunk-Y6FXYEAI.mjs +10 -0
- package/dist/index.d.mts +693 -0
- package/dist/index.d.ts +693 -0
- package/dist/index.js +1838 -0
- package/dist/index.mjs +1593 -0
- package/dist/server.d.mts +80 -0
- package/dist/server.d.ts +80 -0
- package/dist/server.js +147 -0
- package/dist/server.mjs +119 -0
- package/dist/types-DY31oVB1.d.mts +150 -0
- package/dist/types-DY31oVB1.d.ts +150 -0
- package/dist/types-mThnXW9S.d.mts +150 -0
- package/dist/types-mThnXW9S.d.ts +150 -0
- package/dist/types-uLnzb8NE.d.mts +150 -0
- package/dist/types-uLnzb8NE.d.ts +150 -0
- package/package.json +100 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
1
|
+
import { V as VoiceProviderProps, a as VoiceContextValue, T as TranscriptMessage, A as AgentConfig, b as VoiceAgentConfig, c as ToolDefinition, d as ToolParamDefinition, e as VoiceAdapter, f as VoiceStatus } from './types-DY31oVB1.js';
|
|
2
|
+
export { C as ConnectConfig, S as ServerAdapter, g as ServerSessionConfig, h as SessionEvents, i as SessionOptions, j as TranscriptData, k as VoiceConfig, l as VoiceSession } from './types-DY31oVB1.js';
|
|
3
|
+
import * as react_jsx_runtime from 'react/jsx-runtime';
|
|
4
|
+
import * as React from 'react';
|
|
5
|
+
import React__default, { FC, PropsWithChildren } from 'react';
|
|
6
|
+
import { z } from 'zod';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Provider component that enables voice functionality.
|
|
10
|
+
* Pass a provider adapter (e.g. `openai()`, `livekit()`) to connect to
|
|
11
|
+
* different voice backends without changing any other code.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```tsx
|
|
15
|
+
* import { VoiceProvider, createAgent } from '@jchaffin/voicekit';
|
|
16
|
+
* import { openai } from '@jchaffin/voicekit/openai';
|
|
17
|
+
*
|
|
18
|
+
* const agent = createAgent({
|
|
19
|
+
* name: 'Assistant',
|
|
20
|
+
* instructions: 'You are helpful.'
|
|
21
|
+
* });
|
|
22
|
+
*
|
|
23
|
+
* function App() {
|
|
24
|
+
* return (
|
|
25
|
+
* <VoiceProvider adapter={openai()} agent={agent}>
|
|
26
|
+
* <MyChat />
|
|
27
|
+
* </VoiceProvider>
|
|
28
|
+
* );
|
|
29
|
+
* }
|
|
30
|
+
* ```
|
|
31
|
+
*/
|
|
32
|
+
declare function VoiceProvider({ children, adapter, agent, sessionEndpoint, model, language, onStatusChange, onTranscriptUpdate, onToolCall, onError, }: VoiceProviderProps): react_jsx_runtime.JSX.Element;
|
|
33
|
+
/**
|
|
34
|
+
* Hook to access voice functionality.
|
|
35
|
+
* Must be used within a VoiceProvider.
|
|
36
|
+
*/
|
|
37
|
+
declare function useVoice(): VoiceContextValue;
|
|
38
|
+
|
|
39
|
+
interface TranscriptProps {
|
|
40
|
+
messages: TranscriptMessage[];
|
|
41
|
+
userClassName?: string;
|
|
42
|
+
assistantClassName?: string;
|
|
43
|
+
emptyMessage?: React__default.ReactNode;
|
|
44
|
+
}
|
|
45
|
+
declare function Transcript({ messages, userClassName, assistantClassName, emptyMessage }: TranscriptProps): react_jsx_runtime.JSX.Element;
|
|
46
|
+
interface StatusIndicatorProps {
|
|
47
|
+
className?: string;
|
|
48
|
+
connectedText?: string;
|
|
49
|
+
connectingText?: string;
|
|
50
|
+
disconnectedText?: string;
|
|
51
|
+
}
|
|
52
|
+
declare function StatusIndicator({ className, connectedText, connectingText, disconnectedText }: StatusIndicatorProps): react_jsx_runtime.JSX.Element;
|
|
53
|
+
interface ConnectButtonProps {
|
|
54
|
+
className?: string;
|
|
55
|
+
connectText?: string;
|
|
56
|
+
disconnectText?: string;
|
|
57
|
+
connectingText?: string;
|
|
58
|
+
children?: React__default.ReactNode;
|
|
59
|
+
}
|
|
60
|
+
declare function ConnectButton({ className, connectText, disconnectText, connectingText, children, }: ConnectButtonProps): react_jsx_runtime.JSX.Element;
|
|
61
|
+
interface ChatInputProps {
|
|
62
|
+
placeholder?: string;
|
|
63
|
+
className?: string;
|
|
64
|
+
buttonText?: string;
|
|
65
|
+
onSend?: (text: string) => void;
|
|
66
|
+
}
|
|
67
|
+
declare function ChatInput({ placeholder, className, buttonText, onSend, }: ChatInputProps): react_jsx_runtime.JSX.Element;
|
|
68
|
+
interface VoiceChatProps {
|
|
69
|
+
/** Custom class for the container */
|
|
70
|
+
className?: string;
|
|
71
|
+
/** Height of the chat area */
|
|
72
|
+
height?: string;
|
|
73
|
+
/** Show header with status */
|
|
74
|
+
showHeader?: boolean;
|
|
75
|
+
/** Show input field */
|
|
76
|
+
showInput?: boolean;
|
|
77
|
+
/** Custom empty state */
|
|
78
|
+
emptyState?: React__default.ReactNode;
|
|
79
|
+
/** Custom header content */
|
|
80
|
+
header?: React__default.ReactNode;
|
|
81
|
+
/** Custom footer content */
|
|
82
|
+
footer?: React__default.ReactNode;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Complete voice chat interface component
|
|
86
|
+
*
|
|
87
|
+
* @example
|
|
88
|
+
* ```tsx
|
|
89
|
+
* <VoiceProvider agent={agent}>
|
|
90
|
+
* <VoiceChat height="400px" />
|
|
91
|
+
* </VoiceProvider>
|
|
92
|
+
* ```
|
|
93
|
+
*/
|
|
94
|
+
declare function VoiceChat({ className, height, showHeader, showInput, emptyState, header, footer, }: VoiceChatProps): react_jsx_runtime.JSX.Element;
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Create a voice agent config with simplified configuration.
|
|
98
|
+
* Returns a plain object; the provider adapter converts it to the
|
|
99
|
+
* provider-specific format at connection time.
|
|
100
|
+
*
|
|
101
|
+
* @example
|
|
102
|
+
* ```ts
|
|
103
|
+
* const agent = createAgent({
|
|
104
|
+
* name: 'Assistant',
|
|
105
|
+
* instructions: 'You are a helpful assistant.',
|
|
106
|
+
* tools: [weatherTool, navigationTool]
|
|
107
|
+
* });
|
|
108
|
+
* ```
|
|
109
|
+
*/
|
|
110
|
+
declare function createAgent(config: AgentConfig): VoiceAgentConfig;
|
|
111
|
+
/**
|
|
112
|
+
* Create an agent using a structured template.
|
|
113
|
+
*
|
|
114
|
+
* @example
|
|
115
|
+
* ```ts
|
|
116
|
+
* const agent = createAgentFromTemplate({
|
|
117
|
+
* name: 'Support Bot',
|
|
118
|
+
* role: 'customer support agent for an e-commerce site',
|
|
119
|
+
* personality: 'Friendly, patient, solution-oriented',
|
|
120
|
+
* capabilities: ['Answer product questions', 'Help with order status'],
|
|
121
|
+
* tools: [orderTool, productTool]
|
|
122
|
+
* });
|
|
123
|
+
* ```
|
|
124
|
+
*/
|
|
125
|
+
declare function createAgentFromTemplate(config: {
|
|
126
|
+
name: string;
|
|
127
|
+
role: string;
|
|
128
|
+
personality?: string;
|
|
129
|
+
capabilities?: string[];
|
|
130
|
+
constraints?: string[];
|
|
131
|
+
tools?: ToolDefinition[];
|
|
132
|
+
context?: Record<string, unknown>;
|
|
133
|
+
}): VoiceAgentConfig;
|
|
134
|
+
|
|
135
|
+
type InferParamType<T extends ToolParamDefinition> = T['type'] extends 'string' ? string : T['type'] extends 'number' ? number : T['type'] extends 'boolean' ? boolean : T['type'] extends 'array' ? unknown[] : T['type'] extends 'object' ? Record<string, unknown> : unknown;
|
|
136
|
+
type InferParams<T extends Record<string, ToolParamDefinition>> = {
|
|
137
|
+
[K in keyof T]: InferParamType<T[K]>;
|
|
138
|
+
};
|
|
139
|
+
/**
|
|
140
|
+
* Define a tool with full type inference
|
|
141
|
+
*
|
|
142
|
+
* @example
|
|
143
|
+
* ```ts
|
|
144
|
+
* const weatherTool = defineTool({
|
|
145
|
+
* name: 'get_weather',
|
|
146
|
+
* description: 'Get current weather for a location',
|
|
147
|
+
* parameters: {
|
|
148
|
+
* location: { type: 'string', description: 'City name' },
|
|
149
|
+
* unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
|
|
150
|
+
* },
|
|
151
|
+
* required: ['location'],
|
|
152
|
+
* execute: async ({ location, unit }) => {
|
|
153
|
+
* return { temp: 72, unit, location };
|
|
154
|
+
* }
|
|
155
|
+
* });
|
|
156
|
+
* ```
|
|
157
|
+
*/
|
|
158
|
+
declare function defineTool<TParams extends Record<string, ToolParamDefinition>, TResult = unknown>(config: {
|
|
159
|
+
name: string;
|
|
160
|
+
description: string;
|
|
161
|
+
parameters: TParams;
|
|
162
|
+
required?: (keyof TParams)[];
|
|
163
|
+
execute: (params: InferParams<TParams>) => Promise<TResult> | TResult;
|
|
164
|
+
}): ToolDefinition<InferParams<TParams>, TResult>;
|
|
165
|
+
/**
|
|
166
|
+
* Create a navigation tool for single-page apps
|
|
167
|
+
*
|
|
168
|
+
* @example
|
|
169
|
+
* ```ts
|
|
170
|
+
* const navTool = createNavigationTool(['about', 'projects', 'contact']);
|
|
171
|
+
* ```
|
|
172
|
+
*/
|
|
173
|
+
declare function createNavigationTool(sections: string[]): ToolDefinition<{
|
|
174
|
+
section: string;
|
|
175
|
+
}, {
|
|
176
|
+
success: boolean;
|
|
177
|
+
section?: string;
|
|
178
|
+
error?: string;
|
|
179
|
+
}>;
|
|
180
|
+
/**
|
|
181
|
+
* Create a tool that dispatches a custom event for UI updates
|
|
182
|
+
*
|
|
183
|
+
* @example
|
|
184
|
+
* ```ts
|
|
185
|
+
* const showModalTool = createEventTool({
|
|
186
|
+
* name: 'show_modal',
|
|
187
|
+
* description: 'Show a modal dialog',
|
|
188
|
+
* parameters: { title: { type: 'string' } },
|
|
189
|
+
* eventType: 'voice:show-modal'
|
|
190
|
+
* });
|
|
191
|
+
*
|
|
192
|
+
* // Listen in React:
|
|
193
|
+
* useEffect(() => {
|
|
194
|
+
* const handler = (e) => setModal(e.detail.params);
|
|
195
|
+
* window.addEventListener('voice:show-modal', handler);
|
|
196
|
+
* return () => window.removeEventListener('voice:show-modal', handler);
|
|
197
|
+
* }, []);
|
|
198
|
+
* ```
|
|
199
|
+
*/
|
|
200
|
+
declare function createEventTool<TParams extends Record<string, ToolParamDefinition>>(config: {
|
|
201
|
+
name: string;
|
|
202
|
+
description: string;
|
|
203
|
+
parameters: TParams;
|
|
204
|
+
required?: (keyof TParams)[];
|
|
205
|
+
eventType: string;
|
|
206
|
+
}): ToolDefinition<InferParams<TParams>, {
|
|
207
|
+
success: boolean;
|
|
208
|
+
}>;
|
|
209
|
+
/**
|
|
210
|
+
* Create a tool that calls an API endpoint
|
|
211
|
+
*
|
|
212
|
+
* @example
|
|
213
|
+
* ```ts
|
|
214
|
+
* const searchTool = createAPITool({
|
|
215
|
+
* name: 'search',
|
|
216
|
+
* description: 'Search the database',
|
|
217
|
+
* parameters: { query: { type: 'string' } },
|
|
218
|
+
* required: ['query'],
|
|
219
|
+
* endpoint: '/api/search',
|
|
220
|
+
* method: 'POST'
|
|
221
|
+
* });
|
|
222
|
+
* ```
|
|
223
|
+
*/
|
|
224
|
+
declare function createAPITool<TParams extends Record<string, ToolParamDefinition>, TResult = unknown>(config: {
|
|
225
|
+
name: string;
|
|
226
|
+
description: string;
|
|
227
|
+
parameters: TParams;
|
|
228
|
+
required?: (keyof TParams)[];
|
|
229
|
+
endpoint: string | ((params: InferParams<TParams>) => string);
|
|
230
|
+
method?: 'GET' | 'POST';
|
|
231
|
+
headers?: Record<string, string>;
|
|
232
|
+
transform?: (response: unknown) => TResult;
|
|
233
|
+
}): ToolDefinition<InferParams<TParams>, TResult | {
|
|
234
|
+
success: false;
|
|
235
|
+
error: string;
|
|
236
|
+
}>;
|
|
237
|
+
/**
|
|
238
|
+
* Create a tool that searches projects/content by technology or keyword
|
|
239
|
+
*
|
|
240
|
+
* @example
|
|
241
|
+
* ```ts
|
|
242
|
+
* const findProjectsTool = createSearchTool({
|
|
243
|
+
* name: 'find_projects_by_tech',
|
|
244
|
+
* description: 'Find projects that use a specific technology',
|
|
245
|
+
* searchParam: 'technology',
|
|
246
|
+
* endpoint: '/api/search',
|
|
247
|
+
* // OR custom fetch function:
|
|
248
|
+
* fetch: async (query) => {
|
|
249
|
+
* const res = await fetch(`/api/rag?q=${query}`);
|
|
250
|
+
* return res.json();
|
|
251
|
+
* }
|
|
252
|
+
* });
|
|
253
|
+
* ```
|
|
254
|
+
*/
|
|
255
|
+
declare function createSearchTool<TResult = unknown>(config: {
|
|
256
|
+
name: string;
|
|
257
|
+
description: string;
|
|
258
|
+
/** Parameter name shown to the model (e.g. 'technology', 'query', 'keyword') */
|
|
259
|
+
searchParam?: string;
|
|
260
|
+
/** Simple endpoint - will POST { query: value } */
|
|
261
|
+
endpoint?: string;
|
|
262
|
+
/** Custom fetch function for complex queries */
|
|
263
|
+
fetch?: (query: string) => Promise<TResult>;
|
|
264
|
+
/** Transform the response */
|
|
265
|
+
transform?: (response: TResult) => unknown;
|
|
266
|
+
/** Event to dispatch with results (for UI updates) */
|
|
267
|
+
eventType?: string;
|
|
268
|
+
}): ToolDefinition;
|
|
269
|
+
/**
|
|
270
|
+
* Create a RAG-powered search tool
|
|
271
|
+
*
|
|
272
|
+
* @example
|
|
273
|
+
* ```ts
|
|
274
|
+
* const ragTool = createRAGTool({
|
|
275
|
+
* name: 'search_codebase',
|
|
276
|
+
* description: 'Search the codebase for relevant code snippets',
|
|
277
|
+
* endpoint: '/api/rag'
|
|
278
|
+
* });
|
|
279
|
+
* ```
|
|
280
|
+
*/
|
|
281
|
+
declare function createRAGTool(config: {
|
|
282
|
+
name: string;
|
|
283
|
+
description: string;
|
|
284
|
+
endpoint: string;
|
|
285
|
+
/** Optional: filter by repo name */
|
|
286
|
+
repo?: string;
|
|
287
|
+
/** Number of results to return */
|
|
288
|
+
limit?: number;
|
|
289
|
+
/** Event to dispatch with results */
|
|
290
|
+
eventType?: string;
|
|
291
|
+
}): ToolDefinition;
|
|
292
|
+
/** Event name for tool results */
|
|
293
|
+
declare const TOOL_RESULT_EVENT = "voicekit:tool-result";
|
|
294
|
+
|
|
295
|
+
interface ToolResult<T = unknown> {
|
|
296
|
+
name: string;
|
|
297
|
+
input: unknown;
|
|
298
|
+
result: T;
|
|
299
|
+
timestamp: number;
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Hook to listen for all tool results
|
|
303
|
+
*
|
|
304
|
+
* @example
|
|
305
|
+
* ```tsx
|
|
306
|
+
* function ToolDebugger() {
|
|
307
|
+
* const { results, lastResult } = useToolResults();
|
|
308
|
+
*
|
|
309
|
+
* return (
|
|
310
|
+
* <div>
|
|
311
|
+
* <h3>Last Tool: {lastResult?.name}</h3>
|
|
312
|
+
* <pre>{JSON.stringify(lastResult?.result, null, 2)}</pre>
|
|
313
|
+
* </div>
|
|
314
|
+
* );
|
|
315
|
+
* }
|
|
316
|
+
* ```
|
|
317
|
+
*/
|
|
318
|
+
declare function useToolResults(): {
|
|
319
|
+
results: ToolResult<unknown>[];
|
|
320
|
+
lastResult: ToolResult<unknown>;
|
|
321
|
+
clear: () => void;
|
|
322
|
+
};
|
|
323
|
+
type ToolHandler<T = unknown> = (input: unknown, result: T) => void;
|
|
324
|
+
/**
|
|
325
|
+
* Hook to register handlers for specific tools
|
|
326
|
+
*
|
|
327
|
+
* @example
|
|
328
|
+
* ```tsx
|
|
329
|
+
* function ProjectDisplay() {
|
|
330
|
+
* const [projects, setProjects] = useState([]);
|
|
331
|
+
*
|
|
332
|
+
* useToolListener('get_projects', (input, result) => {
|
|
333
|
+
* if (result.success) {
|
|
334
|
+
* setProjects(result.projects);
|
|
335
|
+
* }
|
|
336
|
+
* });
|
|
337
|
+
*
|
|
338
|
+
* return <ProjectList projects={projects} />;
|
|
339
|
+
* }
|
|
340
|
+
* ```
|
|
341
|
+
*/
|
|
342
|
+
declare function useToolListener<T = unknown>(toolName: string, handler: ToolHandler<T>): void;
|
|
343
|
+
/**
|
|
344
|
+
* Hook to get the latest result from a specific tool
|
|
345
|
+
*
|
|
346
|
+
* @example
|
|
347
|
+
* ```tsx
|
|
348
|
+
* function ContactModal() {
|
|
349
|
+
* const { result, input, clear } = useToolResult('open_contact');
|
|
350
|
+
*
|
|
351
|
+
* if (!result) return null;
|
|
352
|
+
*
|
|
353
|
+
* return (
|
|
354
|
+
* <Modal onClose={clear}>
|
|
355
|
+
* <ContactForm prefill={input} />
|
|
356
|
+
* </Modal>
|
|
357
|
+
* );
|
|
358
|
+
* }
|
|
359
|
+
* ```
|
|
360
|
+
*/
|
|
361
|
+
declare function useToolResult<T = unknown>(toolName: string): {
|
|
362
|
+
input: {} | null;
|
|
363
|
+
result: NonNullable<T> | null;
|
|
364
|
+
hasResult: boolean;
|
|
365
|
+
clear: () => void;
|
|
366
|
+
};
|
|
367
|
+
|
|
368
|
+
/**
|
|
369
|
+
* Hook for recording audio from a MediaStream
|
|
370
|
+
*
|
|
371
|
+
* @example
|
|
372
|
+
* ```tsx
|
|
373
|
+
* const { startRecording, stopRecording, downloadRecording } = useAudioRecorder();
|
|
374
|
+
*
|
|
375
|
+
* // Start recording from audio element
|
|
376
|
+
* const stream = audioElement.srcObject as MediaStream;
|
|
377
|
+
* startRecording(stream);
|
|
378
|
+
*
|
|
379
|
+
* // Later...
|
|
380
|
+
* stopRecording();
|
|
381
|
+
* await downloadRecording(); // Downloads as WAV
|
|
382
|
+
* ```
|
|
383
|
+
*/
|
|
384
|
+
declare function useAudioRecorder(): {
|
|
385
|
+
startRecording: (stream: MediaStream) => Promise<void>;
|
|
386
|
+
stopRecording: () => void;
|
|
387
|
+
downloadRecording: (filename?: string) => Promise<Blob | null>;
|
|
388
|
+
getRecordingBlob: () => Promise<Blob | null>;
|
|
389
|
+
clearRecording: () => void;
|
|
390
|
+
isRecording: () => boolean;
|
|
391
|
+
};
|
|
392
|
+
|
|
393
|
+
interface RealtimeSessionCallbacks {
|
|
394
|
+
onConnectionChange?: (status: VoiceStatus) => void;
|
|
395
|
+
onAgentHandoff?: (agentName: string) => void;
|
|
396
|
+
}
|
|
397
|
+
interface ConnectOptions {
|
|
398
|
+
getEphemeralKey: () => Promise<string>;
|
|
399
|
+
/** Agent config or array of agent configs. First entry is the root agent. */
|
|
400
|
+
initialAgents: VoiceAgentConfig[];
|
|
401
|
+
audioElement?: HTMLAudioElement;
|
|
402
|
+
extraContext?: Record<string, unknown>;
|
|
403
|
+
outputGuardrails?: unknown[];
|
|
404
|
+
/** Provider adapter to use. Required for new-style usage. */
|
|
405
|
+
adapter?: VoiceAdapter;
|
|
406
|
+
}
|
|
407
|
+
declare function useRealtimeSession(callbacks?: RealtimeSessionCallbacks): {
|
|
408
|
+
readonly status: VoiceStatus;
|
|
409
|
+
readonly connect: ({ getEphemeralKey, initialAgents, audioElement, extraContext, outputGuardrails, adapter, }: ConnectOptions) => Promise<void>;
|
|
410
|
+
readonly disconnect: () => Promise<void>;
|
|
411
|
+
readonly sendUserText: (text: string) => void;
|
|
412
|
+
readonly sendEvent: (ev: Record<string, unknown>) => void;
|
|
413
|
+
readonly mute: (m: boolean) => void;
|
|
414
|
+
readonly pushToTalkStart: () => void;
|
|
415
|
+
readonly pushToTalkStop: () => void;
|
|
416
|
+
readonly interrupt: () => void;
|
|
417
|
+
};
|
|
418
|
+
|
|
419
|
+
declare function useSessionHistory(): React.MutableRefObject<{
|
|
420
|
+
handleAgentToolStart: (details: Record<string, unknown>, _agent: unknown, functionCall: Record<string, unknown>) => void;
|
|
421
|
+
handleAgentToolEnd: (details: Record<string, unknown>, _agent: unknown, functionCall: Record<string, unknown>, result: unknown) => void;
|
|
422
|
+
handleHistoryUpdated: (items: Record<string, unknown>[]) => void;
|
|
423
|
+
handleHistoryAdded: (item: Record<string, unknown>) => void;
|
|
424
|
+
handleTranscriptionDelta: (item: Record<string, unknown>, audioPositionMs?: number) => void;
|
|
425
|
+
handleTranscriptionCompleted: (item: Record<string, unknown>) => void;
|
|
426
|
+
isInterrupted: (itemId: string) => boolean;
|
|
427
|
+
handleTruncation: (itemId: string, audioEndMs: number, totalAudioMs: number) => void;
|
|
428
|
+
handleGuardrailTripped: (details: Record<string, unknown>, _agent: unknown, guardrail: Record<string, unknown>) => void;
|
|
429
|
+
}>;
|
|
430
|
+
|
|
431
|
+
interface TranscriptItem {
|
|
432
|
+
itemId: string;
|
|
433
|
+
type: 'MESSAGE' | 'BREADCRUMB';
|
|
434
|
+
role?: 'user' | 'assistant';
|
|
435
|
+
title: string;
|
|
436
|
+
data?: Record<string, unknown>;
|
|
437
|
+
expanded: boolean;
|
|
438
|
+
timestamp: string;
|
|
439
|
+
createdAtMs: number;
|
|
440
|
+
status: 'IN_PROGRESS' | 'DONE';
|
|
441
|
+
isHidden: boolean;
|
|
442
|
+
guardrailResult?: {
|
|
443
|
+
status: 'IN_PROGRESS' | 'DONE';
|
|
444
|
+
category: string;
|
|
445
|
+
rationale: string;
|
|
446
|
+
testText?: string;
|
|
447
|
+
};
|
|
448
|
+
}
|
|
449
|
+
interface TranscriptContextValue {
|
|
450
|
+
transcriptItems: TranscriptItem[];
|
|
451
|
+
addTranscriptMessage: (itemId: string, role: 'user' | 'assistant', text: string, isHidden?: boolean) => void;
|
|
452
|
+
updateTranscriptMessage: (itemId: string, text: string, isDelta: boolean) => void;
|
|
453
|
+
addTranscriptBreadcrumb: (title: string, data?: Record<string, unknown>) => void;
|
|
454
|
+
toggleTranscriptItemExpand: (itemId: string) => void;
|
|
455
|
+
updateTranscriptItem: (itemId: string, updatedProperties: Partial<TranscriptItem>) => void;
|
|
456
|
+
clearTranscript: () => void;
|
|
457
|
+
}
|
|
458
|
+
declare const TranscriptProvider: FC<PropsWithChildren>;
|
|
459
|
+
declare function useTranscript(): TranscriptContextValue;
|
|
460
|
+
|
|
461
|
+
interface LoggedEvent {
|
|
462
|
+
id: number | string;
|
|
463
|
+
direction: 'client' | 'server';
|
|
464
|
+
eventName: string;
|
|
465
|
+
eventData: Record<string, unknown>;
|
|
466
|
+
timestamp: string;
|
|
467
|
+
expanded: boolean;
|
|
468
|
+
}
|
|
469
|
+
interface EventData {
|
|
470
|
+
[key: string]: unknown;
|
|
471
|
+
event_id?: string | number;
|
|
472
|
+
type?: string;
|
|
473
|
+
}
|
|
474
|
+
interface HistoryItem {
|
|
475
|
+
type: string;
|
|
476
|
+
role: string;
|
|
477
|
+
content: unknown[];
|
|
478
|
+
status?: string;
|
|
479
|
+
name?: string;
|
|
480
|
+
}
|
|
481
|
+
interface EventContextValue {
|
|
482
|
+
loggedEvents: LoggedEvent[];
|
|
483
|
+
logClientEvent: (eventObj: EventData, eventNameSuffix?: string) => void;
|
|
484
|
+
logServerEvent: (eventObj: EventData, eventNameSuffix?: string) => void;
|
|
485
|
+
logHistoryItem: (item: HistoryItem) => void;
|
|
486
|
+
toggleExpand: (id: number | string) => void;
|
|
487
|
+
clearEvents: () => void;
|
|
488
|
+
}
|
|
489
|
+
declare const EventProvider: FC<PropsWithChildren>;
|
|
490
|
+
declare function useEvent(): EventContextValue;
|
|
491
|
+
|
|
492
|
+
declare const MODERATION_CATEGORIES: readonly ["OFFENSIVE", "OFF_BRAND", "VIOLENCE", "NONE"];
|
|
493
|
+
type ModerationCategory = (typeof MODERATION_CATEGORIES)[number];
|
|
494
|
+
declare const ModerationCategoryZod: z.ZodEnum<{
|
|
495
|
+
NONE: "NONE";
|
|
496
|
+
OFFENSIVE: "OFFENSIVE";
|
|
497
|
+
OFF_BRAND: "OFF_BRAND";
|
|
498
|
+
VIOLENCE: "VIOLENCE";
|
|
499
|
+
}>;
|
|
500
|
+
declare const GuardrailOutputZod: z.ZodObject<{
|
|
501
|
+
moderationRationale: z.ZodString;
|
|
502
|
+
moderationCategory: z.ZodEnum<{
|
|
503
|
+
NONE: "NONE";
|
|
504
|
+
OFFENSIVE: "OFFENSIVE";
|
|
505
|
+
OFF_BRAND: "OFF_BRAND";
|
|
506
|
+
VIOLENCE: "VIOLENCE";
|
|
507
|
+
}>;
|
|
508
|
+
testText: z.ZodOptional<z.ZodString>;
|
|
509
|
+
}, z.core.$strict>;
|
|
510
|
+
type GuardrailOutput = z.infer<typeof GuardrailOutputZod>;
|
|
511
|
+
interface GuardrailResult {
|
|
512
|
+
status: 'IN_PROGRESS' | 'DONE';
|
|
513
|
+
testText?: string;
|
|
514
|
+
category?: ModerationCategory;
|
|
515
|
+
rationale?: string;
|
|
516
|
+
}
|
|
517
|
+
interface OutputGuardrailResult {
|
|
518
|
+
tripwireTriggered: boolean;
|
|
519
|
+
outputInfo: GuardrailOutput | {
|
|
520
|
+
error: string;
|
|
521
|
+
};
|
|
522
|
+
}
|
|
523
|
+
interface OutputGuardrailArgs {
|
|
524
|
+
agentOutput: string;
|
|
525
|
+
agent?: unknown;
|
|
526
|
+
context?: unknown;
|
|
527
|
+
}
|
|
528
|
+
interface OutputGuardrail {
|
|
529
|
+
name: string;
|
|
530
|
+
execute: (args: OutputGuardrailArgs) => Promise<OutputGuardrailResult>;
|
|
531
|
+
}
|
|
532
|
+
interface GuardrailClassifierConfig {
|
|
533
|
+
apiEndpoint?: string;
|
|
534
|
+
model?: string;
|
|
535
|
+
categories?: readonly string[];
|
|
536
|
+
companyName?: string;
|
|
537
|
+
}
|
|
538
|
+
/**
|
|
539
|
+
* Run the guardrail classifier against a message
|
|
540
|
+
*/
|
|
541
|
+
declare function runGuardrailClassifier(message: string, config?: GuardrailClassifierConfig): Promise<GuardrailOutput | null>;
|
|
542
|
+
/**
|
|
543
|
+
* Create a moderation guardrail for output filtering
|
|
544
|
+
*/
|
|
545
|
+
declare function createModerationGuardrail(config?: GuardrailClassifierConfig): OutputGuardrail;
|
|
546
|
+
/**
|
|
547
|
+
* Create a custom guardrail with your own classifier function
|
|
548
|
+
*/
|
|
549
|
+
declare function createCustomGuardrail(name: string, classifier: (output: string) => Promise<{
|
|
550
|
+
triggered: boolean;
|
|
551
|
+
info: unknown;
|
|
552
|
+
}>): OutputGuardrail;
|
|
553
|
+
|
|
554
|
+
/**
|
|
555
|
+
* User-in-the-loop suggestion system.
|
|
556
|
+
*
|
|
557
|
+
* Tools can emit structured suggestions that render as interactive chips.
|
|
558
|
+
* When a user clicks one, a message is sent back to the voice agent,
|
|
559
|
+
* closing the loop.
|
|
560
|
+
*/
|
|
561
|
+
interface SuggestionItem {
|
|
562
|
+
id: string;
|
|
563
|
+
label: string;
|
|
564
|
+
/** Text message to send to the agent when selected */
|
|
565
|
+
message: string;
|
|
566
|
+
/** Optional icon identifier for the consumer to map */
|
|
567
|
+
icon?: string;
|
|
568
|
+
/** Optional secondary text */
|
|
569
|
+
description?: string;
|
|
570
|
+
/** Freeform metadata the consumer can use for rendering */
|
|
571
|
+
meta?: Record<string, unknown>;
|
|
572
|
+
}
|
|
573
|
+
type SuggestionType = 'action' | 'project' | 'experience' | 'skill' | 'section' | string;
|
|
574
|
+
interface SuggestionGroup {
|
|
575
|
+
/** Category of these suggestions */
|
|
576
|
+
type: SuggestionType;
|
|
577
|
+
/** Items to display */
|
|
578
|
+
items: SuggestionItem[];
|
|
579
|
+
/** Optional heading shown above the chips */
|
|
580
|
+
prompt?: string;
|
|
581
|
+
}
|
|
582
|
+
interface SuggestionsContextValue {
|
|
583
|
+
/** Current suggestion group (null when none are active) */
|
|
584
|
+
suggestions: SuggestionGroup | null;
|
|
585
|
+
/** Push a new suggestion group (typically called from a tool) */
|
|
586
|
+
setSuggestions: (group: SuggestionGroup | null) => void;
|
|
587
|
+
/** Handle a user clicking a suggestion: sends the message and clears */
|
|
588
|
+
selectSuggestion: (item: SuggestionItem) => void;
|
|
589
|
+
/** Clear current suggestions */
|
|
590
|
+
clearSuggestions: () => void;
|
|
591
|
+
}
|
|
592
|
+
declare const SUGGESTION_EVENT = "voicekit:suggestions";
|
|
593
|
+
|
|
594
|
+
interface SuggestionProviderProps {
|
|
595
|
+
children: React__default.ReactNode;
|
|
596
|
+
/**
|
|
597
|
+
* Called when the user clicks a suggestion.
|
|
598
|
+
* Typically wired to `sendMessage(item.message)` on the voice session.
|
|
599
|
+
*/
|
|
600
|
+
onSelect?: (item: SuggestionItem) => void;
|
|
601
|
+
/** If true, auto-clear suggestions on select (default: true) */
|
|
602
|
+
autoClear?: boolean;
|
|
603
|
+
}
|
|
604
|
+
declare function SuggestionProvider({ children, onSelect, autoClear, }: SuggestionProviderProps): react_jsx_runtime.JSX.Element;
|
|
605
|
+
/**
|
|
606
|
+
* Access the suggestion system from any component inside a SuggestionProvider.
|
|
607
|
+
*/
|
|
608
|
+
declare function useSuggestions(): SuggestionsContextValue;
|
|
609
|
+
|
|
610
|
+
/**
|
|
611
|
+
* Emit a suggestion group from a tool.
|
|
612
|
+
* Works in both React (via DOM event → SuggestionProvider) and vanilla contexts.
|
|
613
|
+
*
|
|
614
|
+
* @example
|
|
615
|
+
* ```ts
|
|
616
|
+
* import { emitSuggestions } from '@jchaffin/voicekit';
|
|
617
|
+
*
|
|
618
|
+
* const getProjects = defineTool({
|
|
619
|
+
* name: 'get_projects',
|
|
620
|
+
* execute: async () => {
|
|
621
|
+
* const projects = await fetchProjects();
|
|
622
|
+
* emitSuggestions({
|
|
623
|
+
* type: 'project',
|
|
624
|
+
* prompt: 'Projects:',
|
|
625
|
+
* items: projects.map(p => ({
|
|
626
|
+
* id: p.name,
|
|
627
|
+
* label: p.name,
|
|
628
|
+
* message: `Tell me about the ${p.name} project`,
|
|
629
|
+
* description: p.description,
|
|
630
|
+
* meta: { url: p.url, github: p.github },
|
|
631
|
+
* })),
|
|
632
|
+
* });
|
|
633
|
+
* return { success: true, projects };
|
|
634
|
+
* }
|
|
635
|
+
* });
|
|
636
|
+
* ```
|
|
637
|
+
*/
|
|
638
|
+
declare function emitSuggestions(group: SuggestionGroup): void;
|
|
639
|
+
/**
|
|
640
|
+
* Clear the current suggestions.
|
|
641
|
+
*/
|
|
642
|
+
declare function clearSuggestions(): void;
|
|
643
|
+
|
|
644
|
+
interface SuggestionChipsProps {
|
|
645
|
+
/** Override the group from context (optional; defaults to useSuggestions()) */
|
|
646
|
+
group?: SuggestionGroup | null;
|
|
647
|
+
/** Custom renderer for each item. Receives the item and a click handler. */
|
|
648
|
+
renderItem?: (item: SuggestionItem, onSelect: () => void) => React__default.ReactNode;
|
|
649
|
+
/** Extra CSS class on the outer wrapper */
|
|
650
|
+
className?: string;
|
|
651
|
+
/** Extra CSS class on each chip button */
|
|
652
|
+
chipClassName?: string;
|
|
653
|
+
}
|
|
654
|
+
declare function SuggestionChips({ group: groupOverride, renderItem, className, chipClassName, }: SuggestionChipsProps): react_jsx_runtime.JSX.Element | null;
|
|
655
|
+
|
|
656
|
+
/**
|
|
657
|
+
* Encodes a Float32Array as a WAV file.
|
|
658
|
+
*/
|
|
659
|
+
declare function encodeWAV(samples: Float32Array, sampleRate: number): ArrayBuffer;
|
|
660
|
+
/**
|
|
661
|
+
* Converts a WebM audio blob to a WAV blob.
|
|
662
|
+
*/
|
|
663
|
+
declare function convertWebMToWav(blob: Blob): Promise<Blob>;
|
|
664
|
+
/**
|
|
665
|
+
* Get audio format for codec
|
|
666
|
+
*/
|
|
667
|
+
declare function audioFormatForCodec(codec: string): "pcm16" | "g711_ulaw";
|
|
668
|
+
/**
|
|
669
|
+
* Apply codec preferences to RTCPeerConnection
|
|
670
|
+
*/
|
|
671
|
+
declare function applyCodecPreferences(pc: RTCPeerConnection, codec: string): RTCPeerConnection;
|
|
672
|
+
|
|
673
|
+
type Handler = (...args: any[]) => void;
|
|
674
|
+
type EventMap = {
|
|
675
|
+
[event: string]: Handler;
|
|
676
|
+
};
|
|
677
|
+
/**
|
|
678
|
+
* Minimal typed event emitter for adapter session implementations.
|
|
679
|
+
*/
|
|
680
|
+
declare class EventEmitter<Events extends EventMap = EventMap> {
|
|
681
|
+
private handlers;
|
|
682
|
+
on<E extends string & keyof Events>(event: E, handler: Events[E]): void;
|
|
683
|
+
off<E extends string & keyof Events>(event: E, handler: Events[E]): void;
|
|
684
|
+
protected emit<E extends string & keyof Events>(event: E, ...args: Parameters<Events[E]>): void;
|
|
685
|
+
removeAllListeners(): void;
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
/**
|
|
689
|
+
* @deprecated Use `VoiceAgentConfig` instead. `RealtimeAgent` was the OpenAI-specific type.
|
|
690
|
+
*/
|
|
691
|
+
type RealtimeAgent = VoiceAgentConfig;
|
|
692
|
+
|
|
693
|
+
export { AgentConfig, ChatInput, ConnectButton, type ConnectOptions, EventEmitter, EventProvider, type GuardrailClassifierConfig, type GuardrailOutput, GuardrailOutputZod, type GuardrailResult, type LoggedEvent, MODERATION_CATEGORIES, type ModerationCategory, ModerationCategoryZod, type OutputGuardrail, type OutputGuardrailArgs, type OutputGuardrailResult, type RealtimeAgent, type RealtimeSessionCallbacks, SUGGESTION_EVENT, StatusIndicator, SuggestionChips, type SuggestionChipsProps, type SuggestionGroup, type SuggestionItem, SuggestionProvider, type SuggestionType, type SuggestionsContextValue, TOOL_RESULT_EVENT, ToolDefinition, ToolParamDefinition, Transcript, type TranscriptItem, TranscriptMessage, TranscriptProvider, VoiceAdapter, VoiceAgentConfig, VoiceChat, type VoiceChatProps, VoiceContextValue, VoiceProvider, VoiceProviderProps, VoiceStatus, applyCodecPreferences, audioFormatForCodec, clearSuggestions, convertWebMToWav, createAPITool, createAgent, createAgentFromTemplate, createCustomGuardrail, createEventTool, createModerationGuardrail, createNavigationTool, createRAGTool, createSearchTool, defineTool, emitSuggestions, encodeWAV, runGuardrailClassifier, useAudioRecorder, useEvent, useRealtimeSession, useSessionHistory, useSuggestions, useToolListener, useToolResult, useToolResults, useTranscript, useVoice };
|