@modelnex/sdk 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +66 -0
- package/dist/aom-J6NYMGDW.mjs +69 -0
- package/dist/dom-sync-L5KIP45X.mjs +55 -0
- package/dist/index.d.mts +562 -0
- package/dist/index.d.ts +562 -0
- package/dist/index.js +6004 -0
- package/dist/index.mjs +5808 -0
- package/package.json +58 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,562 @@
|
|
|
1
|
+
import React$1 from 'react';
|
|
2
|
+
import { z } from 'zod';
|
|
3
|
+
import * as react_jsx_runtime from 'react/jsx-runtime';
|
|
4
|
+
|
|
5
|
+
type TourStepType = 'narrate' | 'act' | 'ask_and_fill' | 'user_input' | 'ask_or_fill';
|
|
6
|
+
interface TourStepElement {
|
|
7
|
+
/** data-testid value — primary anchor; survives DOM restructuring */
|
|
8
|
+
testId?: string;
|
|
9
|
+
/** DOM fingerprint captured at record time — first fallback */
|
|
10
|
+
fingerprint: string;
|
|
11
|
+
/** Visible label at record time — second fallback */
|
|
12
|
+
textContaining: string;
|
|
13
|
+
}
|
|
14
|
+
interface TourStep {
|
|
15
|
+
order: number;
|
|
16
|
+
type: TourStepType;
|
|
17
|
+
/** Route pattern for navigation (e.g. "/documents", "/documents/*") */
|
|
18
|
+
url?: string;
|
|
19
|
+
/** Target element for highlight / interaction */
|
|
20
|
+
element?: TourStepElement;
|
|
21
|
+
/** LLM-polished narration spoken via TTS to end user */
|
|
22
|
+
narration: string;
|
|
23
|
+
/** Subscriber's original spoken words — stored for re-editing */
|
|
24
|
+
rawNarration: string;
|
|
25
|
+
/** Question the AI speaks to the user (ask_and_fill / ask_or_fill) */
|
|
26
|
+
ask?: string;
|
|
27
|
+
/** Plain-English goal for act steps — AI plans DOM actions */
|
|
28
|
+
goal?: string;
|
|
29
|
+
/** Pacing for narrate steps */
|
|
30
|
+
waitFor?: 'voice_next' | 'auto_advance';
|
|
31
|
+
}
|
|
32
|
+
type TourTrigger = 'first_visit' | 'feature_unlock' | 'idle_on_empty_state' | 'return_visit' | 'new_feature' | 'manual';
|
|
33
|
+
interface TourVoiceConfig {
|
|
34
|
+
language: string;
|
|
35
|
+
ttsVoice?: string;
|
|
36
|
+
}
|
|
37
|
+
interface Tour {
|
|
38
|
+
id: string;
|
|
39
|
+
name: string;
|
|
40
|
+
websiteId: string;
|
|
41
|
+
targetUserTypes: string[];
|
|
42
|
+
trigger: TourTrigger;
|
|
43
|
+
voice: TourVoiceConfig;
|
|
44
|
+
steps: TourStep[];
|
|
45
|
+
status: 'draft' | 'published';
|
|
46
|
+
createdAt: string;
|
|
47
|
+
updatedAt: string;
|
|
48
|
+
}
|
|
49
|
+
/** User profile passed to ModelNexProvider — used for tour targeting */
|
|
50
|
+
interface UserProfile {
|
|
51
|
+
/** User type matched against tour targetUserTypes (e.g. "admin", "free") */
|
|
52
|
+
type: string;
|
|
53
|
+
/** Whether this is a new user — triggers first_visit tours */
|
|
54
|
+
isNewUser?: boolean;
|
|
55
|
+
/** User ID for per-user completion state */
|
|
56
|
+
userId?: string;
|
|
57
|
+
}
|
|
58
|
+
/** Playback state machine states */
|
|
59
|
+
type TourPlaybackState = 'idle' | 'intro' | 'executing' | 'waiting_voice' | 'waiting_input' | 'complete';
|
|
60
|
+
/** Internal recording step being built */
|
|
61
|
+
interface RecordingStep {
|
|
62
|
+
order: number;
|
|
63
|
+
type: TourStepType;
|
|
64
|
+
url: string;
|
|
65
|
+
element?: TourStepElement & {
|
|
66
|
+
label?: string;
|
|
67
|
+
};
|
|
68
|
+
rawNarration: string;
|
|
69
|
+
narration: string;
|
|
70
|
+
ask?: string;
|
|
71
|
+
goal?: string;
|
|
72
|
+
waitFor?: 'voice_next' | 'auto_advance';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Automatic DOM element extraction with stable fingerprinting.
|
|
77
|
+
*
|
|
78
|
+
* Scans the live DOM for all interactive elements (buttons, links, inputs, forms, etc.)
|
|
79
|
+
* and assigns each a deterministic fingerprint based on semantic anchors — NOT fragile
|
|
80
|
+
* DOM paths. A MutationObserver re-scans on changes (debounced).
|
|
81
|
+
*/
|
|
82
|
+
interface ExtractedElement {
|
|
83
|
+
/** Stable fingerprint derived from semantic anchors */
|
|
84
|
+
fingerprint: string;
|
|
85
|
+
/** HTML tag name (lowercase) */
|
|
86
|
+
tagName: string;
|
|
87
|
+
/** Visible text content (truncated) */
|
|
88
|
+
text: string;
|
|
89
|
+
/** Element role (button, link, input, form, etc.) */
|
|
90
|
+
role: string;
|
|
91
|
+
/** Bounding box relative to viewport */
|
|
92
|
+
rect: {
|
|
93
|
+
top: number;
|
|
94
|
+
left: number;
|
|
95
|
+
width: number;
|
|
96
|
+
height: number;
|
|
97
|
+
};
|
|
98
|
+
/** Semantic attributes found on the element */
|
|
99
|
+
attributes: {
|
|
100
|
+
id?: string;
|
|
101
|
+
name?: string;
|
|
102
|
+
type?: string;
|
|
103
|
+
ariaLabel?: string;
|
|
104
|
+
dataTestId?: string;
|
|
105
|
+
href?: string;
|
|
106
|
+
placeholder?: string;
|
|
107
|
+
};
|
|
108
|
+
/** Nearest ancestor heading text (for context) */
|
|
109
|
+
nearestHeading: string | null;
|
|
110
|
+
/** Parent container context (e.g. "dialog:Confirm Delete", "menu", "popover") */
|
|
111
|
+
parentContainer: string | null;
|
|
112
|
+
/** Whether the element is currently disabled */
|
|
113
|
+
disabled: boolean;
|
|
114
|
+
/** Reference to the live DOM element (not serializable) */
|
|
115
|
+
element: HTMLElement;
|
|
116
|
+
}
|
|
117
|
+
declare function generateFingerprint(el: HTMLElement): string;
|
|
118
|
+
declare function extractInteractiveElements(): ExtractedElement[];
|
|
119
|
+
/**
|
|
120
|
+
* Hook that automatically extracts all interactive DOM elements and keeps
|
|
121
|
+
* the list up-to-date via MutationObserver (debounced at 500ms).
|
|
122
|
+
*/
|
|
123
|
+
declare function useAutoExtract(): ExtractedElement[];
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Tag store for human-in-the-loop tagging.
|
|
127
|
+
* Persists semantic descriptions for auto-extracted elements via localStorage.
|
|
128
|
+
*/
|
|
129
|
+
interface TagData {
|
|
130
|
+
/** The element fingerprint this tag is associated with (can be empty if selector is used) */
|
|
131
|
+
fingerprint: string;
|
|
132
|
+
/** Optional CSS selector for pattern-based matching across instances */
|
|
133
|
+
selector?: string;
|
|
134
|
+
/** Optional human-readable ID for the pattern (e.g. 'delete-btn') */
|
|
135
|
+
patternId?: string;
|
|
136
|
+
/** Human-provided semantic description */
|
|
137
|
+
description: string;
|
|
138
|
+
/** What happens when the user interacts with this element (e.g. "Opens a confirmation dialog") */
|
|
139
|
+
behavior?: string;
|
|
140
|
+
/** The page route where this element was discovered (e.g. "/documents") */
|
|
141
|
+
sourcePage?: string;
|
|
142
|
+
/** How this element becomes visible (e.g. "Inside dropdown after clicking '...' menu") */
|
|
143
|
+
displayContext?: string;
|
|
144
|
+
/** Optional category */
|
|
145
|
+
category?: 'button' | 'navigation' | 'form-field' | 'action' | 'display' | 'other';
|
|
146
|
+
/** Optional freeform metadata */
|
|
147
|
+
metadata?: Record<string, unknown>;
|
|
148
|
+
/** ISO timestamp of creation */
|
|
149
|
+
createdAt: string;
|
|
150
|
+
/** ISO timestamp of last update */
|
|
151
|
+
updatedAt: string;
|
|
152
|
+
/** Contextual signature (nearestHeading, parentRole) to help resolve if selector fails */
|
|
153
|
+
contextualSignature?: {
|
|
154
|
+
nearestHeading?: string;
|
|
155
|
+
parentRole?: string;
|
|
156
|
+
};
|
|
157
|
+
/** Whether the agent must ask for user confirmation before executing this action */
|
|
158
|
+
requiresConfirmation?: boolean;
|
|
159
|
+
}
|
|
160
|
+
interface TagStore {
|
|
161
|
+
tags: Map<string, TagData>;
|
|
162
|
+
getTag: (fingerprint: string) => TagData | undefined;
|
|
163
|
+
setTag: (fingerprint: string, description: string, category?: TagData['category'], metadata?: Record<string, unknown>, selector?: string, patternId?: string, behavior?: string, sourcePage?: string, displayContext?: string) => void;
|
|
164
|
+
deleteTag: (fingerprint: string) => void;
|
|
165
|
+
getAllTags: () => TagData[];
|
|
166
|
+
/** Export all tags as a JSON string (for debugging / backup) */
|
|
167
|
+
exportTags: () => string;
|
|
168
|
+
/** Import tags from a JSON string (merges with existing) */
|
|
169
|
+
importTags: (json: string) => void;
|
|
170
|
+
}
|
|
171
|
+
declare function useTagStore(options?: {
|
|
172
|
+
serverUrl?: string;
|
|
173
|
+
websiteId?: string;
|
|
174
|
+
}): TagStore;
|
|
175
|
+
|
|
176
|
+
/** LLM input for a single step */
|
|
177
|
+
interface AgentTraceLlmInput {
|
|
178
|
+
systemPrompt?: string;
|
|
179
|
+
userMessage?: string;
|
|
180
|
+
}
|
|
181
|
+
/** Single step in agent execution trace */
|
|
182
|
+
interface AgentTraceStep {
|
|
183
|
+
step: number;
|
|
184
|
+
/** Chain-of-thought reasoning from the agent */
|
|
185
|
+
reasoning?: string;
|
|
186
|
+
llmInput?: AgentTraceLlmInput;
|
|
187
|
+
llmOutput: string;
|
|
188
|
+
actions: Array<{
|
|
189
|
+
actionId: string;
|
|
190
|
+
params?: Record<string, unknown>;
|
|
191
|
+
}>;
|
|
192
|
+
results?: Array<{
|
|
193
|
+
actionId: string;
|
|
194
|
+
success?: boolean;
|
|
195
|
+
result?: unknown;
|
|
196
|
+
error?: string;
|
|
197
|
+
}>;
|
|
198
|
+
}
|
|
199
|
+
/** Debug payload from /agent/command */
|
|
200
|
+
interface AgentDebug {
|
|
201
|
+
actions?: Array<{
|
|
202
|
+
actionId: string;
|
|
203
|
+
params?: Record<string, unknown>;
|
|
204
|
+
}>;
|
|
205
|
+
llmInput?: {
|
|
206
|
+
systemPrompt?: string;
|
|
207
|
+
systemPromptLength?: number;
|
|
208
|
+
userMessage?: string;
|
|
209
|
+
};
|
|
210
|
+
llmOutput?: string[];
|
|
211
|
+
traces?: AgentTraceStep[];
|
|
212
|
+
}
|
|
213
|
+
interface RunCommandResult {
|
|
214
|
+
success: boolean;
|
|
215
|
+
actionsExecuted?: number;
|
|
216
|
+
sessionEnded?: boolean;
|
|
217
|
+
/** User-facing summary of what the agent did (when session ends) */
|
|
218
|
+
summary?: string | null;
|
|
219
|
+
/** Suggested next steps for the user */
|
|
220
|
+
nextSteps?: string[] | null;
|
|
221
|
+
debug?: AgentDebug;
|
|
222
|
+
[key: string]: unknown;
|
|
223
|
+
}
|
|
224
|
+
/**
|
|
225
|
+
* Run a natural language command via the agent.
|
|
226
|
+
* Uses serverUrl from ModelNexProvider when available, else default.
|
|
227
|
+
* Pass an AbortSignal to cancel the request mid-flight.
|
|
228
|
+
*
|
|
229
|
+
* Before sending, pre-searches the full tag store for elements relevant to the
|
|
230
|
+
* command and attaches them as `relevantTaggedElements` so the server can inject
|
|
231
|
+
* them into the system prompt alongside the documentation context — giving the
|
|
232
|
+
* agent both doc knowledge and element discovery in a single context window.
|
|
233
|
+
*/
|
|
234
|
+
declare function useRunCommand(serverUrlOverride?: string): (command: string, signal?: AbortSignal) => Promise<RunCommandResult>;
|
|
235
|
+
|
|
236
|
+
/** Chat message for persistent conversation history */
|
|
237
|
+
interface ChatMessage {
|
|
238
|
+
role: 'user' | 'assistant';
|
|
239
|
+
content: string;
|
|
240
|
+
summary?: string;
|
|
241
|
+
nextSteps?: string[];
|
|
242
|
+
debug?: AgentDebug;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
interface UseRegisterActionOptions<T extends z.ZodTypeAny = z.ZodTypeAny> {
|
|
246
|
+
id: string;
|
|
247
|
+
description: string;
|
|
248
|
+
schema: T;
|
|
249
|
+
execute: (params: z.infer<T>) => unknown | Promise<unknown>;
|
|
250
|
+
}
|
|
251
|
+
/**
|
|
252
|
+
* Register a custom action with the ModelNex agent.
|
|
253
|
+
* Use this to add app-specific actions (e.g. navigation, domain operations).
|
|
254
|
+
* The action is synced to the server with built-in actions and can be invoked by the agent.
|
|
255
|
+
*
|
|
256
|
+
* Must be used within ModelNexProvider.
|
|
257
|
+
* Memoize schema (define outside component) and execute (useCallback) to avoid unnecessary re-registration.
|
|
258
|
+
*/
|
|
259
|
+
declare function useRegisterAction<T extends z.ZodTypeAny = z.ZodTypeAny>(action: UseRegisterActionOptions<T>): void;
|
|
260
|
+
|
|
261
|
+
interface UIStateProviderProps {
|
|
262
|
+
children: React$1.ReactNode;
|
|
263
|
+
/** Agent context id (default: "agent-ui-state") */
|
|
264
|
+
id?: string;
|
|
265
|
+
/** Agent context type (default: "ui") */
|
|
266
|
+
type?: string;
|
|
267
|
+
/** Initial state */
|
|
268
|
+
initialState?: Record<string, unknown>;
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Provider that holds UI state and syncs it to the agent.
|
|
272
|
+
* Use within ModelNexProvider for agent visibility.
|
|
273
|
+
*/
|
|
274
|
+
declare function UIStateProvider({ children, id, type, initialState }: UIStateProviderProps): react_jsx_runtime.JSX.Element;
|
|
275
|
+
/**
|
|
276
|
+
* Hook to read and update UI state that is synced to the agent.
|
|
277
|
+
*/
|
|
278
|
+
declare function useUIState<T extends Record<string, unknown> = Record<string, unknown>>(): [
|
|
279
|
+
T,
|
|
280
|
+
(update: Partial<T>) => void
|
|
281
|
+
];
|
|
282
|
+
|
|
283
|
+
/**
|
|
284
|
+
* Returns a ref to attach to an element. When the element enters/leaves the viewport,
|
|
285
|
+
* its id is added/removed from the visible set.
|
|
286
|
+
*/
|
|
287
|
+
declare function useViewportTrack(id: string): React.RefObject<HTMLDivElement | null>;
|
|
288
|
+
/**
|
|
289
|
+
* Returns the list of ids currently visible in the viewport.
|
|
290
|
+
* Use with useViewportTrack - each tracked element adds its id when visible.
|
|
291
|
+
*/
|
|
292
|
+
declare function useVisibleIds(): string[];
|
|
293
|
+
interface UseAgentViewportOptions {
|
|
294
|
+
/** Agent context id (default: "agent-viewport") */
|
|
295
|
+
id?: string;
|
|
296
|
+
/** Agent context type (default: "viewport") */
|
|
297
|
+
type?: string;
|
|
298
|
+
}
|
|
299
|
+
/**
|
|
300
|
+
* Registers visible ids with the agent context.
|
|
301
|
+
* Call this in a parent component; use useViewportTrack in child elements.
|
|
302
|
+
*/
|
|
303
|
+
declare function useAgentViewport(options?: UseAgentViewportOptions): {
|
|
304
|
+
visibleIds: string[];
|
|
305
|
+
};
|
|
306
|
+
|
|
307
|
+
interface ModelNexChatBubbleProps {
|
|
308
|
+
/** Placeholder for the command input */
|
|
309
|
+
placeholder?: string;
|
|
310
|
+
/** Default command when input is empty */
|
|
311
|
+
defaultCommand?: string;
|
|
312
|
+
/** Additional class names for the container */
|
|
313
|
+
className?: string;
|
|
314
|
+
/** Custom fetch - use your own inference/backend. Omit to use agent server from Provider. */
|
|
315
|
+
onCommand?: (command: string) => Promise<{
|
|
316
|
+
actionsExecuted?: number;
|
|
317
|
+
summary?: string;
|
|
318
|
+
nextSteps?: string[];
|
|
319
|
+
debug?: AgentDebug;
|
|
320
|
+
[key: string]: unknown;
|
|
321
|
+
}>;
|
|
322
|
+
/** Welcome message when chat is empty */
|
|
323
|
+
welcomeMessage?: string;
|
|
324
|
+
/** Display name of the app — used in tour intro narration */
|
|
325
|
+
appName?: string;
|
|
326
|
+
}
|
|
327
|
+
/**
|
|
328
|
+
* Chat interface for natural language commands.
|
|
329
|
+
* Shows conversation history; on exit, the agent summarizes what it did and suggests next steps.
|
|
330
|
+
* Use within ModelNexProvider. Omit to use your own UI with useRunCommand.
|
|
331
|
+
*/
|
|
332
|
+
declare function ModelNexChatBubble({ placeholder, defaultCommand, className, onCommand, welcomeMessage, appName, }: ModelNexChatBubbleProps): react_jsx_runtime.JSX.Element;
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* Toggle and read the action-highlight overlay state.
|
|
336
|
+
* When enabled, all elements with `data-modelnex-action-id` are visually highlighted.
|
|
337
|
+
*/
|
|
338
|
+
declare function useActionHighlight(): {
|
|
339
|
+
enabled: boolean;
|
|
340
|
+
toggle: () => void;
|
|
341
|
+
set: (value: boolean) => void;
|
|
342
|
+
};
|
|
343
|
+
|
|
344
|
+
interface StudioOverlayProps {
|
|
345
|
+
elements: ExtractedElement[];
|
|
346
|
+
tagStore: TagStore;
|
|
347
|
+
}
|
|
348
|
+
declare function StudioOverlay({ elements, tagStore }: StudioOverlayProps): react_jsx_runtime.JSX.Element;
|
|
349
|
+
|
|
350
|
+
interface SpeechRecognitionEvent {
|
|
351
|
+
results: SpeechRecognitionResultList;
|
|
352
|
+
resultIndex: number;
|
|
353
|
+
}
|
|
354
|
+
interface SpeechRecognitionResultList {
|
|
355
|
+
readonly length: number;
|
|
356
|
+
[index: number]: SpeechRecognitionResult;
|
|
357
|
+
}
|
|
358
|
+
interface SpeechRecognitionResult {
|
|
359
|
+
readonly length: number;
|
|
360
|
+
[index: number]: SpeechRecognitionAlternative;
|
|
361
|
+
readonly isFinal: boolean;
|
|
362
|
+
}
|
|
363
|
+
interface SpeechRecognitionAlternative {
|
|
364
|
+
readonly transcript: string;
|
|
365
|
+
readonly confidence: number;
|
|
366
|
+
}
|
|
367
|
+
interface SpeechRecognitionErrorEvent {
|
|
368
|
+
error: string;
|
|
369
|
+
message: string;
|
|
370
|
+
}
|
|
371
|
+
interface ISpeechRecognition {
|
|
372
|
+
continuous: boolean;
|
|
373
|
+
interimResults: boolean;
|
|
374
|
+
lang: string;
|
|
375
|
+
onresult: ((event: SpeechRecognitionEvent) => void) | null;
|
|
376
|
+
onerror: ((event: SpeechRecognitionErrorEvent) => void) | null;
|
|
377
|
+
onend: (() => void) | null;
|
|
378
|
+
start(): void;
|
|
379
|
+
stop(): void;
|
|
380
|
+
}
|
|
381
|
+
interface SpeechRecognitionConstructor {
|
|
382
|
+
new (): ISpeechRecognition;
|
|
383
|
+
}
|
|
384
|
+
declare global {
|
|
385
|
+
interface Window {
|
|
386
|
+
SpeechRecognition?: SpeechRecognitionConstructor;
|
|
387
|
+
webkitSpeechRecognition?: SpeechRecognitionConstructor;
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
interface VoiceHook {
|
|
391
|
+
/** Speak text via Deepgram TTS (proxied through server). Uses WebRTC loopback for AEC when available. */
|
|
392
|
+
speak: (text: string, voiceId?: string) => Promise<void>;
|
|
393
|
+
/** Stop any in-progress TTS playback immediately */
|
|
394
|
+
stopSpeaking: () => void;
|
|
395
|
+
/** Start STT listening — Deepgram STT with AEC when supported, else Web Speech API */
|
|
396
|
+
startListening: (onResult: (transcript: string) => void, onInterruption?: (transcript: string) => void, onError?: (err: string) => void, options?: {
|
|
397
|
+
continuous?: boolean;
|
|
398
|
+
lang?: string;
|
|
399
|
+
}) => void;
|
|
400
|
+
/** Stop STT listening */
|
|
401
|
+
stopListening: () => void;
|
|
402
|
+
isSpeaking: boolean;
|
|
403
|
+
isListening: boolean;
|
|
404
|
+
/** When muted, speak() is a no-op and no audio plays */
|
|
405
|
+
isMuted: boolean;
|
|
406
|
+
toggleMute: () => void;
|
|
407
|
+
/** Whether STT is supported (Deepgram or Web Speech API) */
|
|
408
|
+
sttSupported: boolean;
|
|
409
|
+
}
|
|
410
|
+
declare function useVoice(serverUrl: string): VoiceHook;
|
|
411
|
+
|
|
412
|
+
interface TourPlaybackHook {
|
|
413
|
+
/** Whether a tour is currently running */
|
|
414
|
+
isActive: boolean;
|
|
415
|
+
/** Current step index (0-based) */
|
|
416
|
+
currentStepIndex: number;
|
|
417
|
+
/** Total steps in current tour */
|
|
418
|
+
totalSteps: number;
|
|
419
|
+
/** The active tour */
|
|
420
|
+
activeTour: Tour | null;
|
|
421
|
+
/** Playback state */
|
|
422
|
+
playbackState: TourPlaybackState;
|
|
423
|
+
/** Start a specific tour manually */
|
|
424
|
+
startTour: (tour: Tour) => void;
|
|
425
|
+
/** Advance to next step (voice "next" command) */
|
|
426
|
+
advanceStep: () => void;
|
|
427
|
+
/** Skip the entire tour */
|
|
428
|
+
skipTour: () => void;
|
|
429
|
+
/** Repeat current step narration */
|
|
430
|
+
repeatStep: () => void;
|
|
431
|
+
/** Handle a voice input during the tour */
|
|
432
|
+
handleVoiceInput: (transcript: string) => void;
|
|
433
|
+
/** Handle a text input during an ask step */
|
|
434
|
+
handleTextInput: (text: string) => void;
|
|
435
|
+
}
|
|
436
|
+
interface UseTourPlaybackOptions {
|
|
437
|
+
serverUrl: string;
|
|
438
|
+
/** Base URL for agent commands (e.g. /api/modelnex for same-origin). Default: serverUrl */
|
|
439
|
+
commandUrl?: string;
|
|
440
|
+
/** Same-origin path for tour API (e.g. /api/modelnex/api) — avoids CORS */
|
|
441
|
+
toursApiBase?: string;
|
|
442
|
+
/** Socket ID for agent/command (required for act/ask steps to execute) */
|
|
443
|
+
socketId?: string | null;
|
|
444
|
+
websiteId?: string;
|
|
445
|
+
userProfile?: UserProfile;
|
|
446
|
+
voice: VoiceHook;
|
|
447
|
+
appName?: string;
|
|
448
|
+
/** Pass the page's current state to the LLM agent */
|
|
449
|
+
extractedElements?: any[];
|
|
450
|
+
/** Called when a tour step changes — for updating progress panel */
|
|
451
|
+
onStepChange?: (stepIndex: number, total: number, tour: Tour) => void;
|
|
452
|
+
/** Called when tour completes or is skipped */
|
|
453
|
+
onTourEnd?: () => void;
|
|
454
|
+
}
|
|
455
|
+
declare function useTourPlayback({ serverUrl, commandUrl, toursApiBase, socketId, websiteId, userProfile, voice, appName, extractedElements, onStepChange, onTourEnd, }: UseTourPlaybackOptions): TourPlaybackHook;
|
|
456
|
+
|
|
457
|
+
interface RecordedElement {
|
|
458
|
+
el: HTMLElement;
|
|
459
|
+
testId?: string;
|
|
460
|
+
fingerprint: string;
|
|
461
|
+
textContaining: string;
|
|
462
|
+
label: string;
|
|
463
|
+
rect: DOMRect;
|
|
464
|
+
}
|
|
465
|
+
type RecordingPhase = 'idle' | 'active' | 'selecting' | 'configuring' | 'narrating' | 'reviewing' | 'saved' | 'finishing';
|
|
466
|
+
interface RecordingModeHook {
|
|
467
|
+
phase: RecordingPhase;
|
|
468
|
+
steps: RecordingStep[];
|
|
469
|
+
stepCount: number;
|
|
470
|
+
isRecording: boolean;
|
|
471
|
+
selectedElement: RecordedElement | null;
|
|
472
|
+
pendingNarration: string;
|
|
473
|
+
polishedNarration: string;
|
|
474
|
+
selectedStepType: TourStepType;
|
|
475
|
+
startRecording: () => void;
|
|
476
|
+
markStep: () => void;
|
|
477
|
+
selectElement: (recorded: RecordedElement) => void;
|
|
478
|
+
selectPageLevel: () => void;
|
|
479
|
+
cancelSelection: () => void;
|
|
480
|
+
setStepType: (type: TourStepType) => void;
|
|
481
|
+
startNarration: () => void;
|
|
482
|
+
submitTextNarration: (text: string) => Promise<void>;
|
|
483
|
+
approveNarration: () => void;
|
|
484
|
+
redoNarration: () => void;
|
|
485
|
+
editNarration: (text: string) => void;
|
|
486
|
+
continueRecording: () => void;
|
|
487
|
+
undoLastStep: () => void;
|
|
488
|
+
previewSteps: () => void;
|
|
489
|
+
stopRecording: (tourName: string, targetUserTypes: string[]) => Promise<string | null>;
|
|
490
|
+
/** Abandon recording — discard all steps and exit recording mode */
|
|
491
|
+
cancelRecording: () => void;
|
|
492
|
+
handleVoiceCommand: (transcript: string) => void;
|
|
493
|
+
}
|
|
494
|
+
interface UseRecordingModeOptions {
|
|
495
|
+
serverUrl: string;
|
|
496
|
+
websiteId?: string;
|
|
497
|
+
voice: VoiceHook;
|
|
498
|
+
onPreview?: (steps: RecordingStep[]) => void;
|
|
499
|
+
}
|
|
500
|
+
declare function useRecordingMode({ serverUrl, websiteId, voice, onPreview, }: UseRecordingModeOptions): RecordingModeHook;
|
|
501
|
+
|
|
502
|
+
interface TourProgressPanelProps {
|
|
503
|
+
tour: Tour;
|
|
504
|
+
currentStepIndex: number;
|
|
505
|
+
onSkip: () => void;
|
|
506
|
+
/** When true, hide Skip button — user says "skip" instead (fully voice-driven) */
|
|
507
|
+
voiceOnly?: boolean;
|
|
508
|
+
}
|
|
509
|
+
/** Compact progress bar + Skip — users care about flow, not step details */
|
|
510
|
+
declare function TourProgressPanel({ tour, currentStepIndex, onSkip, voiceOnly }: TourProgressPanelProps): react_jsx_runtime.JSX.Element;
|
|
511
|
+
|
|
512
|
+
interface RecordingOverlayProps {
|
|
513
|
+
stepIndex: number;
|
|
514
|
+
stepCount: number;
|
|
515
|
+
phase: RecordingPhase;
|
|
516
|
+
pendingNarration: string;
|
|
517
|
+
polishedNarration: string;
|
|
518
|
+
isListening: boolean;
|
|
519
|
+
onMarkStep: () => void;
|
|
520
|
+
onElementSelected: (recorded: RecordedElement) => void;
|
|
521
|
+
onPageLevelStep: () => void;
|
|
522
|
+
onStepTypeConfirmed: (type: TourStepType) => void;
|
|
523
|
+
onStartNarration: () => void;
|
|
524
|
+
onSubmitTextNarration: (text: string) => void;
|
|
525
|
+
onNarrationApprove: () => void;
|
|
526
|
+
onNarrationRedo: () => void;
|
|
527
|
+
onNarrationEdit: (text: string) => void;
|
|
528
|
+
onAddNextStep: () => void;
|
|
529
|
+
onDoneRecording: () => void;
|
|
530
|
+
onCancel: () => void;
|
|
531
|
+
onCancelRecording: () => void;
|
|
532
|
+
onStopRecording: () => void;
|
|
533
|
+
}
|
|
534
|
+
declare function RecordingOverlay({ stepIndex, stepCount, phase, pendingNarration, polishedNarration, isListening, onMarkStep, onElementSelected, onPageLevelStep, onStepTypeConfirmed, onStartNarration, onSubmitTextNarration, onNarrationApprove, onNarrationRedo, onNarrationEdit, onAddNextStep, onDoneRecording, onCancel, onCancelRecording, onStopRecording, }: RecordingOverlayProps): react_jsx_runtime.JSX.Element;
|
|
535
|
+
|
|
536
|
+
interface ModelNexProviderProps {
|
|
537
|
+
children: React$1.ReactNode;
|
|
538
|
+
serverUrl?: string;
|
|
539
|
+
/** Base URL for agent commands. Use same-origin path (e.g. /api/modelnex) to avoid CORS. Default: serverUrl */
|
|
540
|
+
commandUrl?: string;
|
|
541
|
+
/** Identifier for the integrated website, for multi-tenancy */
|
|
542
|
+
websiteId?: string;
|
|
543
|
+
/**
|
|
544
|
+
* End-user profile for Voice Tour targeting.
|
|
545
|
+
* type — matched against tour targetUserTypes (e.g. "admin", "free")
|
|
546
|
+
* isNewUser — triggers first_visit tours
|
|
547
|
+
* userId — used for per-user tour completion state
|
|
548
|
+
*/
|
|
549
|
+
userProfile?: UserProfile;
|
|
550
|
+
/**
|
|
551
|
+
* Same-origin base for tour API (e.g. /api/modelnex/api).
|
|
552
|
+
* When set, ?modelnex_test_tour= fetches via this path to avoid CORS.
|
|
553
|
+
*/
|
|
554
|
+
toursApiBase?: string;
|
|
555
|
+
/**
|
|
556
|
+
* Enable SDK dev tools unconditionally (tour recording, studio mode)
|
|
557
|
+
*/
|
|
558
|
+
devMode?: boolean;
|
|
559
|
+
}
|
|
560
|
+
declare const ModelNexProvider: React$1.FC<ModelNexProviderProps>;
|
|
561
|
+
|
|
562
|
+
export { type AgentDebug, type AgentTraceLlmInput, type AgentTraceStep, type ChatMessage, type ExtractedElement, ModelNexChatBubble, type ModelNexChatBubbleProps, ModelNexProvider, type ModelNexProviderProps, type RecordingModeHook, RecordingOverlay, type RunCommandResult, StudioOverlay, type TagData, type TagStore, type Tour, type TourPlaybackHook, type TourPlaybackState, TourProgressPanel, type TourStep, type TourStepType, type TourTrigger, UIStateProvider, type UseRegisterActionOptions, type UserProfile, type VoiceHook, extractInteractiveElements, generateFingerprint, useActionHighlight, useAgentViewport, useAutoExtract, useRecordingMode, useRegisterAction, useRunCommand, useTagStore, useTourPlayback, useUIState, useViewportTrack, useVisibleIds, useVoice };
|