cursor-buddy 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2193 @@
1
+ import { atom } from "nanostores";
2
+ import html2canvas from "html2canvas-pro";
3
+ //#region src/core/atoms.ts
4
+ /**
5
+ * Nanostores atoms for reactive values that don't need state machine semantics.
6
+ * These update frequently (e.g., 60fps audio levels) and are framework-agnostic.
7
+ */
8
+ const $audioLevel = atom(0);
9
+ const $cursorPosition = atom({
10
+ x: 0,
11
+ y: 0
12
+ });
13
+ const $buddyPosition = atom({
14
+ x: 0,
15
+ y: 0
16
+ });
17
+ const $buddyRotation = atom(0);
18
+ const $buddyScale = atom(1);
19
+ const $pointingTarget = atom(null);
20
+ const $isEnabled = atom(true);
21
+ atom(false);
22
+ const $conversationHistory = atom([]);
23
+ //#endregion
24
+ //#region src/core/pointing.ts
25
+ /**
26
+ * Parses POINT tags from AI responses.
27
+ *
28
+ * Supports two formats:
29
+ * - Marker-based: [POINT:5:label] - 3 parts, references a numbered marker
30
+ * - Coordinate-based: [POINT:640,360:label] - 4 parts, raw pixel coordinates
31
+ */
32
+ const POINTING_TAG_REGEX = /\[POINT:(\d+)(?:,(\d+))?:([^\]]+)\]\s*$/;
33
+ const PARTIAL_POINTING_PREFIXES = new Set([
34
+ "[",
35
+ "[P",
36
+ "[PO",
37
+ "[POI",
38
+ "[POIN",
39
+ "[POINT",
40
+ "[POINT:"
41
+ ]);
42
+ function stripTrailingPointingTag(response, trimResult) {
43
+ const stripped = response.replace(POINTING_TAG_REGEX, "");
44
+ return trimResult ? stripped.trim() : stripped;
45
+ }
46
+ function getPartialPointingTagStart(response) {
47
+ const lastOpenBracket = response.lastIndexOf("[");
48
+ if (lastOpenBracket === -1) return -1;
49
+ const suffix = response.slice(lastOpenBracket).trimEnd();
50
+ if (suffix.includes("]")) return -1;
51
+ if (suffix.startsWith("[POINT:")) {
52
+ let start = lastOpenBracket;
53
+ while (start > 0 && /\s/.test(response[start - 1] ?? "")) start--;
54
+ return start;
55
+ }
56
+ return PARTIAL_POINTING_PREFIXES.has(suffix) ? lastOpenBracket : -1;
57
+ }
58
+ /**
59
+ * Parse pointing tag into structured result.
60
+ * Returns null if no valid POINT tag is found at the end.
61
+ */
62
+ function parsePointingTagRaw(response) {
63
+ const match = response.match(POINTING_TAG_REGEX);
64
+ if (!match) return null;
65
+ const first = Number.parseInt(match[1], 10);
66
+ const second = match[2] ? Number.parseInt(match[2], 10) : null;
67
+ const label = match[3].trim();
68
+ if (second !== null) return {
69
+ type: "coordinates",
70
+ x: first,
71
+ y: second,
72
+ label
73
+ };
74
+ return {
75
+ type: "marker",
76
+ markerId: first,
77
+ label
78
+ };
79
+ }
80
+ /**
81
+ * Remove POINT tag from response text for display/TTS.
82
+ */
83
+ function stripPointingTag(response) {
84
+ return stripTrailingPointingTag(response, true);
85
+ }
86
+ /**
87
+ * Strip complete or partial trailing POINT syntax while the response streams.
88
+ * This keeps the visible text and TTS input stable even if the tag arrives
89
+ * incrementally over multiple chunks.
90
+ */
91
+ function stripTrailingPointingSyntax(response) {
92
+ const withoutCompleteTag = stripTrailingPointingTag(response, false);
93
+ const partialTagStart = getPartialPointingTagStart(withoutCompleteTag);
94
+ if (partialTagStart === -1) return withoutCompleteTag.trimEnd();
95
+ return withoutCompleteTag.slice(0, partialTagStart).trimEnd();
96
+ }
97
+ //#endregion
98
+ //#region src/core/utils/error.ts
99
+ /**
100
+ * Normalize unknown thrown values into Error instances.
101
+ */
102
+ function toError(error, fallbackMessage = "Unknown error") {
103
+ if (error instanceof Error) return error;
104
+ if (typeof error === "string" && error) return new Error(error);
105
+ return new Error(fallbackMessage);
106
+ }
107
+ //#endregion
108
+ //#region src/core/services/audio-playback.ts
109
+ /**
110
+ * Framework-agnostic service for audio playback with abort support.
111
+ */
112
+ var AudioPlaybackService = class {
113
+ audio = null;
114
+ currentUrl = null;
115
+ settlePlayback = null;
116
+ removeAbortListener = null;
117
+ /**
118
+ * Play audio from a blob. Stops any currently playing audio first.
119
+ * @param blob - Audio blob to play
120
+ * @param signal - Optional AbortSignal to cancel playback
121
+ * @returns Promise that resolves when playback completes
122
+ */
123
+ async play(blob, signal) {
124
+ this.stop();
125
+ if (signal?.aborted) return;
126
+ const url = URL.createObjectURL(blob);
127
+ this.currentUrl = url;
128
+ this.audio = new Audio(url);
129
+ return new Promise((resolve, reject) => {
130
+ if (!this.audio) {
131
+ this.cleanup();
132
+ resolve();
133
+ return;
134
+ }
135
+ let settled = false;
136
+ const audio = this.audio;
137
+ const settle = (outcome, error) => {
138
+ if (settled) return;
139
+ settled = true;
140
+ if (this.settlePlayback === settle) this.settlePlayback = null;
141
+ this.removeAbortListener?.();
142
+ this.removeAbortListener = null;
143
+ if (this.audio === audio) {
144
+ this.audio.onended = null;
145
+ this.audio.onerror = null;
146
+ this.audio = null;
147
+ }
148
+ this.cleanup();
149
+ if (outcome === "resolve") {
150
+ resolve();
151
+ return;
152
+ }
153
+ reject(error ?? /* @__PURE__ */ new Error("Audio playback failed"));
154
+ };
155
+ this.settlePlayback = settle;
156
+ const abortHandler = () => {
157
+ audio.pause();
158
+ settle("resolve");
159
+ };
160
+ if (signal) {
161
+ signal.addEventListener("abort", abortHandler, { once: true });
162
+ this.removeAbortListener = () => {
163
+ signal.removeEventListener("abort", abortHandler);
164
+ };
165
+ }
166
+ this.audio.onended = () => {
167
+ settle("resolve");
168
+ };
169
+ this.audio.onerror = () => {
170
+ settle("reject", /* @__PURE__ */ new Error("Audio playback failed"));
171
+ };
172
+ this.audio.play().catch((err) => {
173
+ settle("reject", toError(err, "Audio playback failed"));
174
+ });
175
+ });
176
+ }
177
+ /**
178
+ * Stop any currently playing audio.
179
+ */
180
+ stop() {
181
+ if (this.audio) this.audio.pause();
182
+ if (this.settlePlayback) {
183
+ const settlePlayback = this.settlePlayback;
184
+ this.settlePlayback = null;
185
+ settlePlayback("resolve");
186
+ return;
187
+ }
188
+ this.removeAbortListener?.();
189
+ this.removeAbortListener = null;
190
+ if (this.audio) {
191
+ this.audio.onended = null;
192
+ this.audio.onerror = null;
193
+ this.audio = null;
194
+ }
195
+ this.cleanup();
196
+ }
197
+ cleanup() {
198
+ if (this.currentUrl) {
199
+ URL.revokeObjectURL(this.currentUrl);
200
+ this.currentUrl = null;
201
+ }
202
+ }
203
+ };
204
+ //#endregion
205
+ //#region src/core/utils/web-speech.ts
206
+ /**
207
+ * Normalize browser speech input and transcript output to a single-space form
208
+ * so UI state and speech synthesis stay stable across browser event quirks.
209
+ */
210
+ function normalizeSpeechText(text) {
211
+ return text.replace(/\s+/g, " ").trim();
212
+ }
213
+ /**
214
+ * Resolve the best browser locale to use for Web Speech APIs.
215
+ *
216
+ * We prefer the document language when the host app declares one, then fall
217
+ * back to the browser locale, and finally to English as a stable default.
218
+ */
219
+ function resolveBrowserLanguage() {
220
+ if (typeof document !== "undefined") {
221
+ const documentLanguage = document.documentElement.lang.trim();
222
+ if (documentLanguage) return documentLanguage;
223
+ }
224
+ if (typeof navigator !== "undefined" && navigator.language) return navigator.language;
225
+ return "en-US";
226
+ }
227
+ //#endregion
228
+ //#region src/core/services/browser-speech.ts
229
+ function getSpeechSynthesis() {
230
+ return typeof globalThis.speechSynthesis === "undefined" ? void 0 : globalThis.speechSynthesis;
231
+ }
232
+ function getSpeechSynthesisUtterance() {
233
+ return typeof globalThis.SpeechSynthesisUtterance === "undefined" ? void 0 : globalThis.SpeechSynthesisUtterance;
234
+ }
235
+ function toSpeechError(event) {
236
+ const errorCode = event?.error;
237
+ return /* @__PURE__ */ new Error(errorCode ? `Browser speech failed: ${errorCode}` : "Browser speech failed");
238
+ }
239
+ /**
240
+ * Browser-backed speech synthesis using the Web Speech API.
241
+ */
242
+ var BrowserSpeechService = class {
243
+ removeAbortListener = null;
244
+ settleSpeech = null;
245
+ utterance = null;
246
+ /**
247
+ * Report whether this runtime exposes the browser Web Speech synthesis APIs.
248
+ */
249
+ isAvailable() {
250
+ return Boolean(getSpeechSynthesis() && getSpeechSynthesisUtterance());
251
+ }
252
+ /**
253
+ * Speak a single text segment in the browser.
254
+ *
255
+ * Each queue item owns its own utterance. We only stop an existing utterance
256
+ * when this service still has one in flight, so streamed playback does not
257
+ * spam global `speechSynthesis.cancel()` between already-completed segments.
258
+ */
259
+ async speak(text, signal) {
260
+ const speechSynthesis = getSpeechSynthesis();
261
+ const SpeechSynthesisUtteranceCtor = getSpeechSynthesisUtterance();
262
+ if (!speechSynthesis || !SpeechSynthesisUtteranceCtor) throw new Error("Browser speech is not supported");
263
+ if (this.hasActiveSpeech()) this.stop();
264
+ const normalizedText = normalizeSpeechText(text);
265
+ if (!normalizedText || signal?.aborted) return;
266
+ const utterance = new SpeechSynthesisUtteranceCtor(normalizedText);
267
+ utterance.lang = resolveBrowserLanguage();
268
+ this.utterance = utterance;
269
+ return new Promise((resolve, reject) => {
270
+ let settled = false;
271
+ const settle = (outcome, error) => {
272
+ if (settled) return;
273
+ settled = true;
274
+ if (this.settleSpeech === settle) this.settleSpeech = null;
275
+ this.removeAbortListener?.();
276
+ this.removeAbortListener = null;
277
+ this.clearUtterance(utterance);
278
+ if (outcome === "resolve") {
279
+ resolve();
280
+ return;
281
+ }
282
+ reject(error ?? /* @__PURE__ */ new Error("Browser speech failed"));
283
+ };
284
+ this.settleSpeech = settle;
285
+ const abortHandler = () => {
286
+ try {
287
+ speechSynthesis.cancel();
288
+ } catch {}
289
+ settle("resolve");
290
+ };
291
+ if (signal) {
292
+ signal.addEventListener("abort", abortHandler, { once: true });
293
+ this.removeAbortListener = () => {
294
+ signal.removeEventListener("abort", abortHandler);
295
+ };
296
+ }
297
+ utterance.onend = () => {
298
+ settle("resolve");
299
+ };
300
+ utterance.onerror = (event) => {
301
+ if (signal?.aborted) {
302
+ settle("resolve");
303
+ return;
304
+ }
305
+ settle("reject", toSpeechError(event));
306
+ };
307
+ try {
308
+ speechSynthesis.speak(utterance);
309
+ } catch (error) {
310
+ settle("reject", toError(error, "Browser speech failed to start"));
311
+ }
312
+ });
313
+ }
314
+ /**
315
+ * Stop the current utterance owned by this service, if one is active.
316
+ *
317
+ * We intentionally do nothing when the service is idle so we do not cancel
318
+ * unrelated speech synthesis work that host apps may be doing elsewhere.
319
+ */
320
+ stop() {
321
+ if (!this.hasActiveSpeech()) return;
322
+ const speechSynthesis = getSpeechSynthesis();
323
+ if (speechSynthesis) try {
324
+ speechSynthesis.cancel();
325
+ } catch {}
326
+ if (this.settleSpeech) {
327
+ const settleSpeech = this.settleSpeech;
328
+ this.settleSpeech = null;
329
+ settleSpeech("resolve");
330
+ return;
331
+ }
332
+ this.removeAbortListener?.();
333
+ this.removeAbortListener = null;
334
+ this.clearUtterance(this.utterance);
335
+ }
336
+ hasActiveSpeech() {
337
+ return Boolean(this.utterance || this.settleSpeech);
338
+ }
339
+ clearUtterance(utterance) {
340
+ if (!utterance) return;
341
+ utterance.onend = null;
342
+ utterance.onerror = null;
343
+ if (this.utterance === utterance) this.utterance = null;
344
+ }
345
+ };
346
+ //#endregion
347
+ //#region src/core/services/live-transcription.ts
348
+ function getSpeechRecognitionConstructor() {
349
+ const globalScope = globalThis;
350
+ return globalScope.SpeechRecognition ?? globalScope.webkitSpeechRecognition;
351
+ }
352
+ function toRecognitionError(event) {
353
+ const errorCode = event?.error;
354
+ const message = event?.message || (errorCode ? `Browser transcription failed: ${errorCode}` : "Browser transcription failed");
355
+ return new Error(message);
356
+ }
357
+ function buildTranscripts(results) {
358
+ let finalTranscript = "";
359
+ let interimTranscript = "";
360
+ for (let index = 0; index < results.length; index += 1) {
361
+ const result = results[index];
362
+ const transcript = (result?.[0])?.transcript ?? "";
363
+ if (!transcript) continue;
364
+ if (result.isFinal) finalTranscript += `${transcript} `;
365
+ else interimTranscript += `${transcript} `;
366
+ }
367
+ const normalizedFinal = normalizeSpeechText(finalTranscript);
368
+ return {
369
+ finalTranscript: normalizedFinal,
370
+ liveTranscript: normalizeSpeechText([normalizedFinal, normalizeSpeechText(interimTranscript)].filter(Boolean).join(" "))
371
+ };
372
+ }
373
+ /**
374
+ * Browser-backed live transcription using the Web Speech API.
375
+ */
376
+ var LiveTranscriptionService = class {
377
+ finalTranscript = "";
378
+ hasStarted = false;
379
+ hasEnded = false;
380
+ lastError = null;
381
+ partialCallback = null;
382
+ recognition = null;
383
+ startReject = null;
384
+ startResolve = null;
385
+ stopReject = null;
386
+ stopResolve = null;
387
+ isAvailable() {
388
+ return Boolean(getSpeechRecognitionConstructor());
389
+ }
390
+ /**
391
+ * Register a callback for the latest browser transcript while the user is
392
+ * still speaking.
393
+ */
394
+ onPartial(callback) {
395
+ this.partialCallback = callback;
396
+ }
397
+ /**
398
+ * Start a new Web Speech recognition session.
399
+ */
400
+ async start() {
401
+ const SpeechRecognitionCtor = getSpeechRecognitionConstructor();
402
+ if (!SpeechRecognitionCtor) throw new Error("Browser transcription is not supported");
403
+ this.dispose();
404
+ const recognition = new SpeechRecognitionCtor();
405
+ this.recognition = recognition;
406
+ recognition.continuous = true;
407
+ recognition.interimResults = true;
408
+ recognition.maxAlternatives = 1;
409
+ recognition.lang = resolveBrowserLanguage();
410
+ recognition.onstart = () => {
411
+ this.hasStarted = true;
412
+ this.startResolve?.();
413
+ this.startResolve = null;
414
+ this.startReject = null;
415
+ };
416
+ recognition.onresult = (event) => {
417
+ const transcripts = buildTranscripts(event.results);
418
+ this.finalTranscript = transcripts.finalTranscript;
419
+ this.partialCallback?.(transcripts.liveTranscript);
420
+ };
421
+ recognition.onerror = (event) => {
422
+ this.lastError = toRecognitionError(event);
423
+ if (!this.hasStarted) {
424
+ this.startReject?.(this.lastError);
425
+ this.startResolve = null;
426
+ this.startReject = null;
427
+ }
428
+ };
429
+ recognition.onend = () => {
430
+ this.hasEnded = true;
431
+ if (!this.hasStarted) {
432
+ const error = this.lastError ?? /* @__PURE__ */ new Error("Browser transcription ended before it could start");
433
+ this.startReject?.(error);
434
+ this.startResolve = null;
435
+ this.startReject = null;
436
+ }
437
+ if (this.stopResolve || this.stopReject) {
438
+ if (this.lastError) this.stopReject?.(this.lastError);
439
+ else this.stopResolve?.(normalizeSpeechText(this.finalTranscript));
440
+ this.stopResolve = null;
441
+ this.stopReject = null;
442
+ }
443
+ };
444
+ const started = new Promise((resolve, reject) => {
445
+ this.startResolve = resolve;
446
+ this.startReject = reject;
447
+ });
448
+ try {
449
+ recognition.start();
450
+ } catch (error) {
451
+ this.clearRecognition();
452
+ throw toError(error, "Browser transcription failed to start");
453
+ }
454
+ try {
455
+ await started;
456
+ } catch (error) {
457
+ this.clearRecognition();
458
+ throw toError(error, "Browser transcription failed to start");
459
+ }
460
+ }
461
+ /**
462
+ * Stop the current recognition session and resolve with the final transcript.
463
+ */
464
+ async stop() {
465
+ if (!this.recognition) {
466
+ if (this.lastError) throw this.lastError;
467
+ return normalizeSpeechText(this.finalTranscript);
468
+ }
469
+ if (this.hasEnded) {
470
+ const transcript = normalizeSpeechText(this.finalTranscript);
471
+ const error = this.lastError;
472
+ this.clearRecognition();
473
+ if (error) throw error;
474
+ return transcript;
475
+ }
476
+ const recognition = this.recognition;
477
+ return normalizeSpeechText(await new Promise((resolve, reject) => {
478
+ this.stopResolve = resolve;
479
+ this.stopReject = reject;
480
+ try {
481
+ recognition.stop();
482
+ } catch (error) {
483
+ reject(toError(error, "Browser transcription failed to stop"));
484
+ }
485
+ }).finally(() => {
486
+ this.clearRecognition();
487
+ }));
488
+ }
489
+ /**
490
+ * Abort the current recognition session and reset the service for reuse.
491
+ */
492
+ dispose() {
493
+ if (this.recognition) try {
494
+ this.recognition.abort();
495
+ } catch {}
496
+ this.startReject?.(/* @__PURE__ */ new Error("Browser transcription aborted"));
497
+ this.stopResolve?.(normalizeSpeechText(this.finalTranscript));
498
+ this.startResolve = null;
499
+ this.startReject = null;
500
+ this.stopResolve = null;
501
+ this.stopReject = null;
502
+ this.clearRecognition();
503
+ this.resetSessionState();
504
+ }
505
+ clearRecognition() {
506
+ if (!this.recognition) return;
507
+ this.recognition.onstart = null;
508
+ this.recognition.onresult = null;
509
+ this.recognition.onerror = null;
510
+ this.recognition.onend = null;
511
+ this.recognition = null;
512
+ }
513
+ resetSessionState() {
514
+ this.finalTranscript = "";
515
+ this.hasStarted = false;
516
+ this.hasEnded = false;
517
+ this.lastError = null;
518
+ this.partialCallback?.("");
519
+ }
520
+ };
521
+ //#endregion
522
+ //#region src/core/bezier.ts
523
+ /**
524
+ * Bezier flight animation for cursor pointing.
525
+ */
526
+ /**
527
+ * Quadratic bezier curve: B(t) = (1-t)²P₀ + 2(1-t)t·P₁ + t²P₂
528
+ */
529
+ function quadraticBezier(p0, p1, p2, t) {
530
+ const oneMinusT = 1 - t;
531
+ return {
532
+ x: oneMinusT * oneMinusT * p0.x + 2 * oneMinusT * t * p1.x + t * t * p2.x,
533
+ y: oneMinusT * oneMinusT * p0.y + 2 * oneMinusT * t * p1.y + t * t * p2.y
534
+ };
535
+ }
536
+ /**
537
+ * Bezier tangent (derivative): B'(t) = 2(1-t)(P₁-P₀) + 2t(P₂-P₁)
538
+ */
539
+ function bezierTangent(p0, p1, p2, t) {
540
+ const oneMinusT = 1 - t;
541
+ return {
542
+ x: 2 * oneMinusT * (p1.x - p0.x) + 2 * t * (p2.x - p1.x),
543
+ y: 2 * oneMinusT * (p1.y - p0.y) + 2 * t * (p2.y - p1.y)
544
+ };
545
+ }
546
+ /**
547
+ * Ease-in-out cubic for smooth acceleration/deceleration
548
+ */
549
+ function easeInOutCubic(t) {
550
+ return t < .5 ? 4 * t * t * t : 1 - (-2 * t + 2) ** 3 / 2;
551
+ }
552
+ /**
553
+ * Animate cursor along a parabolic bezier arc from start to end.
554
+ * Used when the AI points at a UI element.
555
+ *
556
+ * @param from - Starting position
557
+ * @param to - Target position
558
+ * @param durationMs - Flight duration in milliseconds
559
+ * @param callbacks - Frame and completion callbacks
560
+ * @returns Cancel function to stop the animation
561
+ */
562
+ function animateBezierFlight(from, to, durationMs, callbacks) {
563
+ const startTime = performance.now();
564
+ const distance = Math.hypot(to.x - from.x, to.y - from.y);
565
+ const controlPoint = {
566
+ x: (from.x + to.x) / 2,
567
+ y: Math.min(from.y, to.y) - distance * .2
568
+ };
569
+ let animationFrameId;
570
+ function animate(now) {
571
+ const elapsed = now - startTime;
572
+ const linearProgress = Math.min(elapsed / durationMs, 1);
573
+ const easedProgress = easeInOutCubic(linearProgress);
574
+ const position = quadraticBezier(from, controlPoint, to, easedProgress);
575
+ const tangent = bezierTangent(from, controlPoint, to, easedProgress);
576
+ const rotation = Math.atan2(tangent.y, tangent.x);
577
+ const scale = 1 + Math.sin(linearProgress * Math.PI) * .3;
578
+ callbacks.onFrame(position, rotation, scale);
579
+ if (linearProgress < 1) animationFrameId = requestAnimationFrame(animate);
580
+ else callbacks.onComplete();
581
+ }
582
+ animationFrameId = requestAnimationFrame(animate);
583
+ return () => cancelAnimationFrame(animationFrameId);
584
+ }
585
+ //#endregion
586
+ //#region src/core/services/pointer-controller.ts
587
+ const POINTING_LOCK_TIMEOUT_MS = 1e4;
588
+ /**
589
+ * Controller for cursor pointing behavior.
590
+ * Manages the pointer state machine (follow -> flying -> anchored -> follow)
591
+ * and cursor animation.
592
+ */
593
+ var PointerController = class {
594
+ mode = "follow";
595
+ cancelAnimation = null;
596
+ releaseTimeout = null;
597
+ listeners = /* @__PURE__ */ new Set();
598
+ /**
599
+ * Animate cursor to point at a target.
600
+ */
601
+ pointAt(target) {
602
+ this.release();
603
+ this.mode = "flying";
604
+ $pointingTarget.set(target);
605
+ const startPos = $buddyPosition.get();
606
+ const endPos = {
607
+ x: target.x,
608
+ y: target.y
609
+ };
610
+ this.cancelAnimation = animateBezierFlight(startPos, endPos, 800, {
611
+ onFrame: (position, rotation, scale) => {
612
+ $buddyPosition.set(position);
613
+ $buddyRotation.set(rotation);
614
+ $buddyScale.set(scale);
615
+ },
616
+ onComplete: () => {
617
+ this.cancelAnimation = null;
618
+ this.mode = "anchored";
619
+ $buddyPosition.set(endPos);
620
+ $buddyRotation.set(0);
621
+ $buddyScale.set(1);
622
+ this.scheduleRelease();
623
+ this.notify();
624
+ }
625
+ });
626
+ this.notify();
627
+ }
628
+ /**
629
+ * Release the cursor from pointing mode back to follow mode.
630
+ */
631
+ release() {
632
+ if (this.cancelAnimation) {
633
+ this.cancelAnimation();
634
+ this.cancelAnimation = null;
635
+ }
636
+ if (this.releaseTimeout) {
637
+ clearTimeout(this.releaseTimeout);
638
+ this.releaseTimeout = null;
639
+ }
640
+ this.mode = "follow";
641
+ $pointingTarget.set(null);
642
+ $buddyPosition.set($cursorPosition.get());
643
+ $buddyRotation.set(0);
644
+ $buddyScale.set(1);
645
+ this.notify();
646
+ }
647
+ /**
648
+ * Check if cursor is currently pointing (flying or anchored).
649
+ */
650
+ isPointing() {
651
+ return this.mode !== "follow";
652
+ }
653
+ /**
654
+ * Get current pointer mode.
655
+ */
656
+ getMode() {
657
+ return this.mode;
658
+ }
659
+ /**
660
+ * Subscribe to pointer state changes.
661
+ */
662
+ subscribe(listener) {
663
+ this.listeners.add(listener);
664
+ return () => this.listeners.delete(listener);
665
+ }
666
+ /**
667
+ * Update buddy position to follow cursor when in follow mode.
668
+ * Call this on cursor position changes.
669
+ */
670
+ updateFollowPosition() {
671
+ if (this.mode === "follow") {
672
+ $buddyPosition.set($cursorPosition.get());
673
+ $buddyRotation.set(0);
674
+ $buddyScale.set(1);
675
+ }
676
+ }
677
+ scheduleRelease() {
678
+ this.releaseTimeout = setTimeout(() => {
679
+ this.releaseTimeout = null;
680
+ this.release();
681
+ }, POINTING_LOCK_TIMEOUT_MS);
682
+ }
683
+ notify() {
684
+ this.listeners.forEach((listener) => listener());
685
+ }
686
+ };
687
+ //#endregion
688
+ //#region src/core/utils/annotations.ts
689
+ const DEFAULT_STYLE = {
690
+ borderColor: "rgba(255, 0, 0, 0.8)",
691
+ labelBackground: "rgba(255, 0, 0, 0.9)",
692
+ labelColor: "#ffffff",
693
+ borderWidth: 2,
694
+ fontSize: 11,
695
+ labelPadding: 4
696
+ };
697
+ /**
698
+ * Draw annotation markers onto a canvas.
699
+ * Modifies the canvas in place.
700
+ *
701
+ * @param ctx Canvas 2D context to draw on
702
+ * @param markers Marker map from element discovery
703
+ * @param style Optional style overrides
704
+ */
705
+ function drawAnnotations(ctx, markers, style = {}) {
706
+ const s = {
707
+ ...DEFAULT_STYLE,
708
+ ...style
709
+ };
710
+ ctx.save();
711
+ for (const marker of markers.values()) {
712
+ const { rect, id } = marker;
713
+ ctx.strokeStyle = s.borderColor;
714
+ ctx.lineWidth = s.borderWidth;
715
+ ctx.strokeRect(rect.left, rect.top, rect.width, rect.height);
716
+ const label = String(id);
717
+ ctx.font = `bold ${s.fontSize}px monospace`;
718
+ const textWidth = ctx.measureText(label).width;
719
+ const textHeight = s.fontSize;
720
+ const labelWidth = textWidth + s.labelPadding * 2;
721
+ const labelHeight = textHeight + s.labelPadding;
722
+ const labelX = rect.left - s.borderWidth;
723
+ const labelY = rect.top < labelHeight + 4 ? rect.top + 2 : rect.top - labelHeight;
724
+ ctx.fillStyle = s.labelBackground;
725
+ ctx.beginPath();
726
+ ctx.roundRect(labelX, labelY, labelWidth, labelHeight, 2);
727
+ ctx.fill();
728
+ ctx.fillStyle = s.labelColor;
729
+ ctx.textBaseline = "top";
730
+ ctx.fillText(label, labelX + s.labelPadding, labelY + s.labelPadding / 2);
731
+ }
732
+ ctx.restore();
733
+ }
734
+ /**
735
+ * Create an annotated copy of a canvas.
736
+ * Does not modify the original canvas.
737
+ *
738
+ * @param sourceCanvas Original screenshot canvas
739
+ * @param markers Marker map from element discovery
740
+ * @returns New canvas with annotations drawn
741
+ */
742
+ function createAnnotatedCanvas(sourceCanvas, markers) {
743
+ const canvas = document.createElement("canvas");
744
+ canvas.width = sourceCanvas.width;
745
+ canvas.height = sourceCanvas.height;
746
+ const ctx = canvas.getContext("2d");
747
+ if (!ctx) throw new Error("Failed to get canvas 2D context");
748
+ ctx.drawImage(sourceCanvas, 0, 0);
749
+ drawAnnotations(ctx, markers);
750
+ return canvas;
751
+ }
752
+ /**
753
+ * Generate marker context string for AI prompt.
754
+ * Lists available markers with their descriptions.
755
+ *
756
+ * @param markers Marker map from element discovery
757
+ * @returns Formatted string listing markers
758
+ */
759
+ function generateMarkerContext(markers) {
760
+ if (markers.size === 0) return "No interactive elements detected.";
761
+ const lines = ["Interactive elements (use marker number to point):"];
762
+ for (const marker of markers.values()) lines.push(` ${marker.id}: ${marker.description}`);
763
+ return lines.join("\n");
764
+ }
765
+ //#endregion
766
+ //#region src/core/utils/elements.ts
767
+ /**
768
+ * Element discovery for annotated screenshots.
769
+ * Finds visible interactive elements and assigns marker IDs.
770
+ */
771
+ /** Max characters for element descriptions passed to the model. */
772
+ const MAX_DESCRIPTION_LENGTH = 50;
773
+ /** Pixels tolerance for grouping elements into the same visual row. */
774
+ const ROW_TOLERANCE_PX = 20;
775
+ /**
776
+ * Interactive element selectors - elements users would want to click/interact with.
777
+ * Mirrors accessibility roles from agent-browser but using CSS selectors.
778
+ */
779
+ const INTERACTIVE_SELECTORS = [
780
+ "button",
781
+ "[role=\"button\"]",
782
+ "input[type=\"button\"]",
783
+ "input[type=\"submit\"]",
784
+ "input[type=\"reset\"]",
785
+ "a[href]",
786
+ "[role=\"link\"]",
787
+ "input:not([type=\"hidden\"])",
788
+ "textarea",
789
+ "select",
790
+ "[role=\"textbox\"]",
791
+ "[role=\"searchbox\"]",
792
+ "[role=\"combobox\"]",
793
+ "[role=\"listbox\"]",
794
+ "[role=\"slider\"]",
795
+ "[role=\"spinbutton\"]",
796
+ "[role=\"checkbox\"]",
797
+ "[role=\"radio\"]",
798
+ "[role=\"switch\"]",
799
+ "[role=\"menuitem\"]",
800
+ "[role=\"menuitemcheckbox\"]",
801
+ "[role=\"menuitemradio\"]",
802
+ "[role=\"option\"]",
803
+ "[role=\"tab\"]",
804
+ "[role=\"treeitem\"]",
805
+ "video",
806
+ "audio",
807
+ "[data-cursor-buddy-interactive]"
808
+ ];
809
+ /**
810
+ * Check if an element is visible in the viewport.
811
+ */
812
+ function isElementVisible(element, rect = element.getBoundingClientRect()) {
813
+ if (rect.width <= 0 || rect.height <= 0) return false;
814
+ if (rect.bottom < 0 || rect.top > window.innerHeight || rect.right < 0 || rect.left > window.innerWidth) return false;
815
+ const style = window.getComputedStyle(element);
816
+ if (style.visibility === "hidden" || style.display === "none") return false;
817
+ if (Number.parseFloat(style.opacity) === 0) return false;
818
+ return true;
819
+ }
820
+ function truncateDescription(value) {
821
+ return value.slice(0, MAX_DESCRIPTION_LENGTH);
822
+ }
823
+ /**
824
+ * Generate a brief description for an element.
825
+ */
826
+ function describeElement(element) {
827
+ const tag = element.tagName.toLowerCase();
828
+ const ariaLabel = element.getAttribute("aria-label");
829
+ if (ariaLabel) return truncateDescription(ariaLabel);
830
+ if (tag === "button" || tag === "a") {
831
+ const text = element.textContent?.trim();
832
+ if (text) return truncateDescription(text);
833
+ }
834
+ if (tag === "input" || tag === "textarea") {
835
+ const placeholder = element.getAttribute("placeholder");
836
+ if (placeholder) return truncateDescription(placeholder);
837
+ return `${element.getAttribute("type") || "text"} input`;
838
+ }
839
+ if (tag === "img") {
840
+ const alt = element.getAttribute("alt");
841
+ if (alt) return truncateDescription(alt);
842
+ return "image";
843
+ }
844
+ const role = element.getAttribute("role");
845
+ if (role) return role;
846
+ return tag;
847
+ }
848
+ function collectVisibleInteractiveElements() {
849
+ const selector = INTERACTIVE_SELECTORS.join(",");
850
+ const allElements = document.querySelectorAll(selector);
851
+ const visible = [];
852
+ for (const element of allElements) {
853
+ const rect = element.getBoundingClientRect();
854
+ if (!isElementVisible(element, rect)) continue;
855
+ visible.push({
856
+ element,
857
+ rect
858
+ });
859
+ }
860
+ visible.sort((a, b) => {
861
+ const rowDiff = Math.floor(a.rect.top / ROW_TOLERANCE_PX) - Math.floor(b.rect.top / ROW_TOLERANCE_PX);
862
+ if (rowDiff !== 0) return rowDiff;
863
+ return a.rect.left - b.rect.left;
864
+ });
865
+ return visible;
866
+ }
867
+ /**
868
+ * Create marker map from visible interactive elements.
869
+ * Assigns sequential IDs starting from 1.
870
+ */
871
+ function createMarkerMap() {
872
+ const elements = collectVisibleInteractiveElements();
873
+ const map = /* @__PURE__ */ new Map();
874
+ elements.forEach(({ element, rect }, index) => {
875
+ const id = index + 1;
876
+ map.set(id, {
877
+ id,
878
+ element,
879
+ rect,
880
+ description: describeElement(element)
881
+ });
882
+ });
883
+ return map;
884
+ }
885
+ /**
886
+ * Get the center point of an element in viewport coordinates.
887
+ */
888
+ function getElementCenter(element) {
889
+ const rect = element.getBoundingClientRect();
890
+ return {
891
+ x: Math.round(rect.left + rect.width / 2),
892
+ y: Math.round(rect.top + rect.height / 2)
893
+ };
894
+ }
895
+ /**
896
+ * Resolve a marker ID to viewport coordinates.
897
+ * Returns null if marker not found or element no longer visible.
898
+ */
899
+ function resolveMarkerToCoordinates(markerMap, markerId) {
900
+ const marker = markerMap.get(markerId);
901
+ if (!marker) return null;
902
+ if (!document.contains(marker.element)) return null;
903
+ if (!isElementVisible(marker.element)) return null;
904
+ return getElementCenter(marker.element);
905
+ }
906
+ //#endregion
907
+ //#region src/core/utils/screenshot.ts
908
+ const CLONE_RESOURCE_TIMEOUT_MS = 3e3;
909
+ function getCaptureMetrics() {
910
+ return {
911
+ viewportWidth: window.innerWidth,
912
+ viewportHeight: window.innerHeight
913
+ };
914
+ }
915
+ function waitForNextPaint(doc) {
916
+ const view = doc.defaultView;
917
+ if (!view?.requestAnimationFrame) return Promise.resolve();
918
+ return new Promise((resolve) => {
919
+ view.requestAnimationFrame(() => {
920
+ view.requestAnimationFrame(() => resolve());
921
+ });
922
+ });
923
+ }
924
+ function isStylesheetReady(link) {
925
+ const sheet = link.sheet;
926
+ if (!sheet) return false;
927
+ try {
928
+ sheet.cssRules;
929
+ return true;
930
+ } catch (error) {
931
+ return error instanceof DOMException && error.name === "SecurityError";
932
+ }
933
+ }
934
+ function waitForStylesheetLink(link) {
935
+ if (isStylesheetReady(link)) return Promise.resolve();
936
+ return new Promise((resolve) => {
937
+ let settled = false;
938
+ let timeoutId = 0;
939
+ const finish = () => {
940
+ if (settled) return;
941
+ settled = true;
942
+ window.clearTimeout(timeoutId);
943
+ link.removeEventListener("load", handleReady);
944
+ link.removeEventListener("error", handleReady);
945
+ resolve();
946
+ };
947
+ const handleReady = () => {
948
+ if (isStylesheetReady(link)) {
949
+ finish();
950
+ return;
951
+ }
952
+ window.requestAnimationFrame(() => {
953
+ if (isStylesheetReady(link)) finish();
954
+ });
955
+ };
956
+ timeoutId = window.setTimeout(finish, CLONE_RESOURCE_TIMEOUT_MS);
957
+ link.addEventListener("load", handleReady, { once: true });
958
+ link.addEventListener("error", finish, { once: true });
959
+ handleReady();
960
+ });
961
+ }
962
+ async function waitForClonedDocumentStyles(doc) {
963
+ const stylesheetLinks = Array.from(doc.querySelectorAll("link[rel=\"stylesheet\"][href]"));
964
+ await Promise.all(stylesheetLinks.map(waitForStylesheetLink));
965
+ if (doc.fonts?.ready) await doc.fonts.ready;
966
+ await waitForNextPaint(doc);
967
+ }
968
+ function getHtml2CanvasOptions(captureMetrics) {
969
+ return {
970
+ scale: 1,
971
+ useCORS: true,
972
+ logging: false,
973
+ width: captureMetrics.viewportWidth,
974
+ height: captureMetrics.viewportHeight,
975
+ windowWidth: captureMetrics.viewportWidth,
976
+ windowHeight: captureMetrics.viewportHeight,
977
+ x: window.scrollX,
978
+ y: window.scrollY,
979
+ scrollX: window.scrollX,
980
+ scrollY: window.scrollY,
981
+ onclone: async (doc) => {
982
+ await waitForClonedDocumentStyles(doc);
983
+ }
984
+ };
985
+ }
986
+ /**
987
+ * Create a fallback canvas when screenshot capture fails.
988
+ * Returns a simple gray canvas with an error message.
989
+ */
990
+ function createFallbackCanvas() {
991
+ const canvas = document.createElement("canvas");
992
+ canvas.width = window.innerWidth;
993
+ canvas.height = window.innerHeight;
994
+ const ctx = canvas.getContext("2d");
995
+ if (ctx) {
996
+ ctx.fillStyle = "#f0f0f0";
997
+ ctx.fillRect(0, 0, canvas.width, canvas.height);
998
+ ctx.fillStyle = "#666";
999
+ ctx.font = "16px sans-serif";
1000
+ ctx.textAlign = "center";
1001
+ ctx.fillText("Screenshot unavailable", canvas.width / 2, canvas.height / 2);
1002
+ }
1003
+ return canvas;
1004
+ }
1005
+ /**
1006
+ * Capture a screenshot of the current viewport.
1007
+ * Uses html2canvas to render the DOM to a canvas, then exports as JPEG.
1008
+ * Falls back to a placeholder if capture fails (e.g., due to unsupported CSS).
1009
+ */
1010
+ async function captureViewport() {
1011
+ const captureMetrics = getCaptureMetrics();
1012
+ let canvas;
1013
+ try {
1014
+ canvas = await html2canvas(document.body, getHtml2CanvasOptions(captureMetrics));
1015
+ } catch {
1016
+ canvas = createFallbackCanvas();
1017
+ }
1018
+ return {
1019
+ imageData: canvas.toDataURL("image/png"),
1020
+ width: canvas.width,
1021
+ height: canvas.height,
1022
+ viewportWidth: captureMetrics.viewportWidth,
1023
+ viewportHeight: captureMetrics.viewportHeight
1024
+ };
1025
+ }
1026
+ /**
1027
+ * Capture an annotated screenshot of the current viewport.
1028
+ * Interactive elements are marked with numbered labels.
1029
+ * Returns both the annotated image and a marker map for resolving IDs.
1030
+ */
1031
+ async function captureAnnotatedViewport() {
1032
+ const captureMetrics = getCaptureMetrics();
1033
+ const markerMap = createMarkerMap();
1034
+ let sourceCanvas;
1035
+ try {
1036
+ sourceCanvas = await html2canvas(document.body, getHtml2CanvasOptions(captureMetrics));
1037
+ } catch {
1038
+ sourceCanvas = createFallbackCanvas();
1039
+ }
1040
+ const canvas = markerMap.size > 0 ? createAnnotatedCanvas(sourceCanvas, markerMap) : sourceCanvas;
1041
+ const markerContext = generateMarkerContext(markerMap);
1042
+ return {
1043
+ imageData: canvas.toDataURL("image/png"),
1044
+ width: canvas.width,
1045
+ height: canvas.height,
1046
+ viewportWidth: captureMetrics.viewportWidth,
1047
+ viewportHeight: captureMetrics.viewportHeight,
1048
+ markerMap,
1049
+ markerContext
1050
+ };
1051
+ }
1052
+ //#endregion
1053
+ //#region src/core/services/screen-capture.ts
1054
+ /**
1055
+ * Framework-agnostic service for capturing viewport screenshots.
1056
+ */
1057
+ var ScreenCaptureService = class {
1058
+ /**
1059
+ * Capture a screenshot of the current viewport.
1060
+ * @returns Screenshot result with image data and dimensions
1061
+ */
1062
+ async capture() {
1063
+ return captureViewport();
1064
+ }
1065
+ /**
1066
+ * Capture an annotated screenshot with marker overlays.
1067
+ * Interactive elements are marked with numbered labels.
1068
+ * @returns Annotated screenshot result with marker map
1069
+ */
1070
+ async captureAnnotated() {
1071
+ return captureAnnotatedViewport();
1072
+ }
1073
+ };
1074
+ //#endregion
1075
+ //#region src/core/services/tts-playback-queue.ts
1076
+ /**
1077
+ * Queues sentence-level speech preparation immediately while keeping playback
1078
+ * strictly ordered.
1079
+ *
1080
+ * Preparation is allowed to run ahead of playback so server synthesis can
1081
+ * overlap with the currently playing segment, but the returned playback tasks
1082
+ * still execute one-by-one in enqueue order.
1083
+ */
1084
+ var TTSPlaybackQueue = class {
1085
+ error = null;
1086
+ hasStartedPlayback = false;
1087
+ onError;
1088
+ onPlaybackStart;
1089
+ playbackChain = Promise.resolve();
1090
+ prepare;
1091
+ signal;
1092
+ constructor(options) {
1093
+ this.onError = options.onError;
1094
+ this.onPlaybackStart = options.onPlaybackStart;
1095
+ this.prepare = options.prepare;
1096
+ this.signal = options.signal;
1097
+ }
1098
+ /**
1099
+ * Queue a speakable text segment.
1100
+ */
1101
+ enqueue(text) {
1102
+ const normalizedText = text.trim();
1103
+ if (!normalizedText || this.error || this.signal?.aborted) return;
1104
+ const preparedPlaybackTask = this.prepare(normalizedText, this.signal);
1105
+ preparedPlaybackTask.catch((error) => {
1106
+ this.fail(toError(error));
1107
+ });
1108
+ this.playbackChain = this.playbackChain.then(async () => {
1109
+ if (this.signal?.aborted) return;
1110
+ const play = await preparedPlaybackTask;
1111
+ if (this.signal?.aborted) return;
1112
+ if (!this.hasStartedPlayback) {
1113
+ this.hasStartedPlayback = true;
1114
+ this.onPlaybackStart?.();
1115
+ }
1116
+ await play();
1117
+ }).catch((error) => {
1118
+ this.fail(toError(error));
1119
+ });
1120
+ }
1121
+ /**
1122
+ * Wait until every queued segment has either played or the queue failed.
1123
+ */
1124
+ async waitForCompletion() {
1125
+ await this.playbackChain;
1126
+ if (this.error) throw this.error;
1127
+ }
1128
+ fail(error) {
1129
+ if (this.error) return;
1130
+ this.error = error;
1131
+ this.onError?.(error);
1132
+ }
1133
+ };
1134
+ //#endregion
1135
+ //#region src/core/utils/audio.ts
1136
+ /**
1137
+ * Audio conversion utilities for voice capture.
1138
+ * Converts Float32 audio data to WAV format for server transcription.
1139
+ */
1140
+ /**
1141
+ * Merge multiple Float32Array chunks into a single array
1142
+ */
1143
+ function mergeAudioChunks(chunks) {
1144
+ const totalLength = chunks.reduce((acc, chunk) => acc + chunk.length, 0);
1145
+ const result = new Float32Array(totalLength);
1146
+ let offset = 0;
1147
+ for (const chunk of chunks) {
1148
+ result.set(chunk, offset);
1149
+ offset += chunk.length;
1150
+ }
1151
+ return result;
1152
+ }
1153
+ /**
1154
+ * Convert Float32 audio data to 16-bit PCM
1155
+ */
1156
+ function floatTo16BitPCM(output, offset, input) {
1157
+ for (let i = 0; i < input.length; i++, offset += 2) {
1158
+ const sample = Math.max(-1, Math.min(1, input[i]));
1159
+ output.setInt16(offset, sample < 0 ? sample * 32768 : sample * 32767, true);
1160
+ }
1161
+ }
1162
+ /**
1163
+ * Write a string to a DataView
1164
+ */
1165
+ function writeString(view, offset, string) {
1166
+ for (let i = 0; i < string.length; i++) view.setUint8(offset + i, string.charCodeAt(i));
1167
+ }
1168
+ /**
1169
+ * Encode Float32 audio data as a WAV file
1170
+ */
1171
+ function encodeWAV(samples, sampleRate) {
1172
+ const numChannels = 1;
1173
+ const bitsPerSample = 16;
1174
+ const bytesPerSample = bitsPerSample / 8;
1175
+ const blockAlign = numChannels * bytesPerSample;
1176
+ const dataLength = samples.length * bytesPerSample;
1177
+ const buffer = new ArrayBuffer(44 + dataLength);
1178
+ const view = new DataView(buffer);
1179
+ writeString(view, 0, "RIFF");
1180
+ view.setUint32(4, 36 + dataLength, true);
1181
+ writeString(view, 8, "WAVE");
1182
+ writeString(view, 12, "fmt ");
1183
+ view.setUint32(16, 16, true);
1184
+ view.setUint16(20, 1, true);
1185
+ view.setUint16(22, numChannels, true);
1186
+ view.setUint32(24, sampleRate, true);
1187
+ view.setUint32(28, sampleRate * blockAlign, true);
1188
+ view.setUint16(32, blockAlign, true);
1189
+ view.setUint16(34, bitsPerSample, true);
1190
+ writeString(view, 36, "data");
1191
+ view.setUint32(40, dataLength, true);
1192
+ floatTo16BitPCM(view, 44, samples);
1193
+ return new Blob([buffer], { type: "audio/wav" });
1194
+ }
1195
+ //#endregion
1196
+ //#region src/core/utils/audio-worklet.ts
1197
+ /**
1198
+ * AudioWorklet processor code for voice capture.
1199
+ * Inlined as a blob URL to avoid separate file serving requirements.
1200
+ */
1201
+ const workletCode = `
1202
+ class AudioCaptureProcessor extends AudioWorkletProcessor {
1203
+ constructor() {
1204
+ super()
1205
+ this.isRecording = true
1206
+ this.audioChunkSize = 2048
1207
+ this.audioBuffer = new Float32Array(this.audioChunkSize)
1208
+ this.audioBufferIndex = 0
1209
+ this.levelFramesPerUpdate = 4
1210
+ this.levelFrameCount = 0
1211
+ this.levelRmsSum = 0
1212
+ this.levelPeak = 0
1213
+
1214
+ this.port.onmessage = (event) => {
1215
+ if (event.data?.type === "flush") {
1216
+ this.flushAudio()
1217
+ this.flushLevel()
1218
+ this.port.postMessage({ type: "flush-complete" })
1219
+ }
1220
+ }
1221
+ }
1222
+
1223
+ flushAudio() {
1224
+ if (this.audioBufferIndex === 0) return
1225
+
1226
+ const chunk = this.audioBuffer.slice(0, this.audioBufferIndex)
1227
+ this.port.postMessage({
1228
+ type: "audio",
1229
+ data: chunk
1230
+ })
1231
+ this.audioBufferIndex = 0
1232
+ }
1233
+
1234
+ flushLevel() {
1235
+ if (this.levelFrameCount === 0) return
1236
+
1237
+ this.port.postMessage({
1238
+ type: "level",
1239
+ rms: this.levelRmsSum / this.levelFrameCount,
1240
+ peak: this.levelPeak
1241
+ })
1242
+
1243
+ this.levelFrameCount = 0
1244
+ this.levelRmsSum = 0
1245
+ this.levelPeak = 0
1246
+ }
1247
+
1248
+ process(inputs) {
1249
+ if (!this.isRecording) return false
1250
+
1251
+ const input = inputs[0]
1252
+ if (input && input.length > 0) {
1253
+ const channelData = input[0]
1254
+ let sum = 0
1255
+ let peak = 0
1256
+ for (let i = 0; i < channelData.length; i++) {
1257
+ const sample = channelData[i]
1258
+ sum += sample * sample
1259
+ const absolute = Math.abs(sample)
1260
+ if (absolute > peak) peak = absolute
1261
+ }
1262
+
1263
+ this.levelRmsSum += Math.sqrt(sum / channelData.length)
1264
+ this.levelPeak = Math.max(this.levelPeak, peak)
1265
+ this.levelFrameCount += 1
1266
+
1267
+ if (this.levelFrameCount >= this.levelFramesPerUpdate) {
1268
+ this.flushLevel()
1269
+ }
1270
+
1271
+ let readOffset = 0
1272
+ while (readOffset < channelData.length) {
1273
+ const remaining = this.audioBuffer.length - this.audioBufferIndex
1274
+ const copyLength = Math.min(remaining, channelData.length - readOffset)
1275
+
1276
+ this.audioBuffer.set(
1277
+ channelData.subarray(readOffset, readOffset + copyLength),
1278
+ this.audioBufferIndex
1279
+ )
1280
+
1281
+ this.audioBufferIndex += copyLength
1282
+ readOffset += copyLength
1283
+
1284
+ if (this.audioBufferIndex >= this.audioBuffer.length) {
1285
+ this.flushAudio()
1286
+ }
1287
+ }
1288
+ }
1289
+
1290
+ return true
1291
+ }
1292
+ }
1293
+
1294
+ registerProcessor("audio-capture-processor", AudioCaptureProcessor)
1295
+ `;
1296
+ let cachedBlobURL = null;
1297
+ /**
1298
+ * Create a blob URL for the audio worklet processor.
1299
+ * Caches the URL to avoid creating multiple blobs.
1300
+ */
1301
+ function createWorkletBlobURL() {
1302
+ if (!cachedBlobURL) {
1303
+ const blob = new Blob([workletCode], { type: "application/javascript" });
1304
+ cachedBlobURL = URL.createObjectURL(blob);
1305
+ }
1306
+ return cachedBlobURL;
1307
+ }
1308
+ //#endregion
1309
+ //#region src/core/services/voice-capture.ts
1310
+ const SAMPLE_RATE = 16e3;
1311
+ const AUDIO_LEVEL_NOISE_GATE = 5e-4;
1312
+ const AUDIO_LEVEL_INPUT_GAIN = 600;
1313
+ const AUDIO_LEVEL_ATTACK = .7;
1314
+ const AUDIO_LEVEL_RELEASE = .25;
1315
+ function clamp$1(value, min, max) {
1316
+ return Math.min(Math.max(value, min), max);
1317
+ }
1318
+ function normalizeAudioLevel(rms) {
1319
+ const gatedRms = Math.max(0, rms - AUDIO_LEVEL_NOISE_GATE);
1320
+ return clamp$1(Math.log1p(gatedRms * AUDIO_LEVEL_INPUT_GAIN) / Math.log1p(AUDIO_LEVEL_INPUT_GAIN), 0, 1);
1321
+ }
1322
+ function smoothAudioLevel(current, target) {
1323
+ const smoothing = target > current ? AUDIO_LEVEL_ATTACK : AUDIO_LEVEL_RELEASE;
1324
+ return current + (target - current) * smoothing;
1325
+ }
1326
+ /**
1327
+ * Framework-agnostic service for voice capture using AudioWorkletNode.
1328
+ */
1329
+ var VoiceCaptureService = class {
1330
+ audioContext = null;
1331
+ workletNode = null;
1332
+ sourceNode = null;
1333
+ silentGainNode = null;
1334
+ stream = null;
1335
+ chunks = [];
1336
+ levelCallback = null;
1337
+ visualLevel = 0;
1338
+ flushResolve = null;
1339
+ /**
1340
+ * Register a callback to receive audio level updates (0-1).
1341
+ * Called at ~60fps during recording for waveform visualization.
1342
+ */
1343
+ onLevel(callback) {
1344
+ this.levelCallback = callback;
1345
+ }
1346
+ /**
1347
+ * Start recording audio from the microphone.
1348
+ * @throws Error if microphone access is denied
1349
+ */
1350
+ async start() {
1351
+ this.chunks = [];
1352
+ this.visualLevel = 0;
1353
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: {
1354
+ sampleRate: SAMPLE_RATE,
1355
+ channelCount: 1,
1356
+ echoCancellation: true,
1357
+ noiseSuppression: true
1358
+ } });
1359
+ this.stream = stream;
1360
+ const audioContext = new AudioContext({ sampleRate: SAMPLE_RATE });
1361
+ this.audioContext = audioContext;
1362
+ await audioContext.resume();
1363
+ const workletURL = createWorkletBlobURL();
1364
+ await audioContext.audioWorklet.addModule(workletURL);
1365
+ const source = audioContext.createMediaStreamSource(stream);
1366
+ this.sourceNode = source;
1367
+ const workletNode = new AudioWorkletNode(audioContext, "audio-capture-processor");
1368
+ this.workletNode = workletNode;
1369
+ const silentGainNode = audioContext.createGain();
1370
+ silentGainNode.gain.value = 0;
1371
+ this.silentGainNode = silentGainNode;
1372
+ workletNode.port.onmessage = (event) => {
1373
+ const { type, data, rms, peak } = event.data;
1374
+ if (type === "audio") this.chunks.push(data);
1375
+ else if (type === "level" && this.levelCallback) {
1376
+ const targetLevel = normalizeAudioLevel(Math.max(rms ?? 0, (peak ?? 0) * .6));
1377
+ this.visualLevel = smoothAudioLevel(this.visualLevel, targetLevel);
1378
+ this.levelCallback(this.visualLevel);
1379
+ } else if (type === "flush-complete") {
1380
+ this.flushResolve?.();
1381
+ this.flushResolve = null;
1382
+ }
1383
+ };
1384
+ source.connect(workletNode);
1385
+ workletNode.connect(silentGainNode);
1386
+ silentGainNode.connect(audioContext.destination);
1387
+ }
1388
+ /**
1389
+ * Stop recording and return the captured audio as a WAV blob.
1390
+ */
1391
+ async stop() {
1392
+ await this.flushPendingAudio();
1393
+ if (this.stream) {
1394
+ this.stream.getTracks().forEach((track) => track.stop());
1395
+ this.stream = null;
1396
+ }
1397
+ if (this.sourceNode) {
1398
+ this.sourceNode.disconnect();
1399
+ this.sourceNode = null;
1400
+ }
1401
+ if (this.workletNode) {
1402
+ this.workletNode.disconnect();
1403
+ this.workletNode = null;
1404
+ }
1405
+ if (this.silentGainNode) {
1406
+ this.silentGainNode.disconnect();
1407
+ this.silentGainNode = null;
1408
+ }
1409
+ if (this.audioContext) {
1410
+ await this.audioContext.close();
1411
+ this.audioContext = null;
1412
+ }
1413
+ this.visualLevel = 0;
1414
+ this.levelCallback?.(0);
1415
+ const wavBlob = encodeWAV(mergeAudioChunks(this.chunks), SAMPLE_RATE);
1416
+ this.chunks = [];
1417
+ return wavBlob;
1418
+ }
1419
+ /**
1420
+ * Clean up all resources.
1421
+ *
1422
+ * The level callback is intentionally preserved so the same service instance
1423
+ * can be reused across multiple push-to-talk turns without re-registering
1424
+ * the waveform subscription from the client.
1425
+ */
1426
+ dispose() {
1427
+ if (this.stream) {
1428
+ this.stream.getTracks().forEach((track) => track.stop());
1429
+ this.stream = null;
1430
+ }
1431
+ if (this.sourceNode) {
1432
+ this.sourceNode.disconnect();
1433
+ this.sourceNode = null;
1434
+ }
1435
+ if (this.workletNode) {
1436
+ this.workletNode.disconnect();
1437
+ this.workletNode = null;
1438
+ }
1439
+ if (this.silentGainNode) {
1440
+ this.silentGainNode.disconnect();
1441
+ this.silentGainNode = null;
1442
+ }
1443
+ if (this.audioContext) {
1444
+ this.audioContext.close();
1445
+ this.audioContext = null;
1446
+ }
1447
+ this.chunks = [];
1448
+ this.visualLevel = 0;
1449
+ this.levelCallback?.(0);
1450
+ this.flushResolve = null;
1451
+ }
1452
+ async flushPendingAudio() {
1453
+ if (!this.workletNode) return;
1454
+ await new Promise((resolve) => {
1455
+ const timeoutId = setTimeout(() => {
1456
+ this.flushResolve = null;
1457
+ resolve();
1458
+ }, 50);
1459
+ this.flushResolve = () => {
1460
+ clearTimeout(timeoutId);
1461
+ resolve();
1462
+ };
1463
+ this.workletNode?.port.postMessage({ type: "flush" });
1464
+ });
1465
+ }
1466
+ };
1467
+ //#endregion
1468
+ //#region src/core/state-machine.ts
1469
+ /**
1470
+ * State transition table for the voice interaction flow.
1471
+ * Maps current state + event type to next state.
1472
+ */
1473
+ const transitions = {
1474
+ idle: { HOTKEY_PRESSED: "listening" },
1475
+ listening: {
1476
+ HOTKEY_RELEASED: "processing",
1477
+ ERROR: "idle"
1478
+ },
1479
+ processing: {
1480
+ RESPONSE_STARTED: "responding",
1481
+ TTS_COMPLETE: "idle",
1482
+ HOTKEY_PRESSED: "listening",
1483
+ ERROR: "idle"
1484
+ },
1485
+ responding: {
1486
+ TTS_COMPLETE: "idle",
1487
+ HOTKEY_PRESSED: "listening",
1488
+ ERROR: "idle"
1489
+ }
1490
+ };
1491
+ /**
1492
+ * Create a simple typed state machine for the voice interaction flow.
1493
+ *
1494
+ * States: idle -> listening -> processing -> responding -> idle
1495
+ *
1496
+ * Supports interruption: pressing hotkey during processing or responding
1497
+ * immediately transitions back to listening.
1498
+ */
1499
+ function createStateMachine(initial = "idle") {
1500
+ let state = initial;
1501
+ const listeners = /* @__PURE__ */ new Set();
1502
+ function notify() {
1503
+ listeners.forEach((listener) => listener());
1504
+ }
1505
+ return {
1506
+ getState: () => state,
1507
+ transition: (event) => {
1508
+ const nextState = transitions[state][event.type];
1509
+ if (!nextState) return false;
1510
+ state = nextState;
1511
+ notify();
1512
+ return true;
1513
+ },
1514
+ subscribe: (listener) => {
1515
+ listeners.add(listener);
1516
+ return () => listeners.delete(listener);
1517
+ },
1518
+ reset: () => {
1519
+ state = "idle";
1520
+ notify();
1521
+ }
1522
+ };
1523
+ }
1524
+ //#endregion
1525
+ //#region src/core/utils/response-processor.ts
1526
+ const COMMON_ABBREVIATIONS = [
1527
+ "mr.",
1528
+ "mrs.",
1529
+ "ms.",
1530
+ "dr.",
1531
+ "prof.",
1532
+ "sr.",
1533
+ "jr.",
1534
+ "e.g.",
1535
+ "i.e."
1536
+ ];
1537
+ const CLOSING_PUNCTUATION = new Set([
1538
+ "\"",
1539
+ "'",
1540
+ "”",
1541
+ "’",
1542
+ ")",
1543
+ "]",
1544
+ "}"
1545
+ ]);
1546
+ const SHORT_SEGMENT_THRESHOLD = 24;
1547
+ function isLikelySentenceBoundary(text, index) {
1548
+ const char = text[index];
1549
+ if (char === "!" || char === "?" || char === "…" || char === "\n") return true;
1550
+ if (char !== ".") return false;
1551
+ const previousChar = text[index - 1] ?? "";
1552
+ const nextChar = text[index + 1] ?? "";
1553
+ if (/\d/.test(previousChar) && /\d/.test(nextChar)) return false;
1554
+ const lookback = text.slice(Math.max(0, index - 10), index + 1).toLowerCase();
1555
+ if (COMMON_ABBREVIATIONS.some((abbreviation) => lookback.endsWith(abbreviation))) return false;
1556
+ return true;
1557
+ }
1558
+ function findBoundaryEnd(text, start) {
1559
+ for (let index = start; index < text.length; index++) {
1560
+ if (text[index] === "\n") {
1561
+ let end = index + 1;
1562
+ while (end < text.length && /\s/.test(text[end] ?? "")) end++;
1563
+ return end;
1564
+ }
1565
+ if (!isLikelySentenceBoundary(text, index)) continue;
1566
+ let end = index + 1;
1567
+ while (end < text.length && CLOSING_PUNCTUATION.has(text[end] ?? "")) end++;
1568
+ if (end < text.length) {
1569
+ const nextChar = text[end] ?? "";
1570
+ if (!/\s/.test(nextChar) && !/[A-Z0-9]/.test(nextChar)) continue;
1571
+ }
1572
+ while (end < text.length && /\s/.test(text[end] ?? "")) end++;
1573
+ return end;
1574
+ }
1575
+ return null;
1576
+ }
1577
+ function extractCompletedSegments(text) {
1578
+ const segments = [];
1579
+ let consumedLength = 0;
1580
+ while (consumedLength < text.length) {
1581
+ const boundaryEnd = findBoundaryEnd(text, consumedLength);
1582
+ if (boundaryEnd === null) break;
1583
+ const segment = text.slice(consumedLength, boundaryEnd).trim();
1584
+ if (segment) segments.push(segment);
1585
+ consumedLength = boundaryEnd;
1586
+ }
1587
+ return {
1588
+ consumedLength,
1589
+ segments
1590
+ };
1591
+ }
1592
+ /**
1593
+ * Tracks a streaming assistant response, exposes a tag-free visible version for
1594
+ * the UI, and emits speakable segments as sentence boundaries become stable.
1595
+ */
1596
+ var ProgressiveResponseProcessor = class {
1597
+ consumedVisibleTextLength = 0;
1598
+ pendingShortSegment = "";
1599
+ rawResponse = "";
1600
+ push(chunk) {
1601
+ this.rawResponse += chunk;
1602
+ const visibleText = stripTrailingPointingSyntax(this.rawResponse);
1603
+ const { consumedLength, segments } = extractCompletedSegments(visibleText.slice(this.consumedVisibleTextLength));
1604
+ this.consumedVisibleTextLength += consumedLength;
1605
+ return {
1606
+ visibleText,
1607
+ speechSegments: this.coalesceSegments(segments)
1608
+ };
1609
+ }
1610
+ finish() {
1611
+ const finalResponseText = stripPointingTag(this.rawResponse);
1612
+ const trailingText = finalResponseText.slice(this.consumedVisibleTextLength).trim();
1613
+ const finalSegmentParts = [this.pendingShortSegment, trailingText].filter(Boolean);
1614
+ this.pendingShortSegment = "";
1615
+ return {
1616
+ fullResponse: this.rawResponse,
1617
+ finalResponseText,
1618
+ speechSegments: finalSegmentParts.length ? [finalSegmentParts.join(" ").trim()] : []
1619
+ };
1620
+ }
1621
+ coalesceSegments(segments) {
1622
+ const speechSegments = [];
1623
+ for (const segment of segments) {
1624
+ const normalizedSegment = segment.trim();
1625
+ if (!normalizedSegment) continue;
1626
+ const candidate = this.pendingShortSegment ? `${this.pendingShortSegment} ${normalizedSegment}` : normalizedSegment;
1627
+ if (candidate.length < SHORT_SEGMENT_THRESHOLD) {
1628
+ this.pendingShortSegment = candidate;
1629
+ continue;
1630
+ }
1631
+ this.pendingShortSegment = "";
1632
+ speechSegments.push(candidate);
1633
+ }
1634
+ return speechSegments;
1635
+ }
1636
+ };
1637
+ //#endregion
1638
+ //#region src/core/client.ts
1639
+ function clamp(value, min, max) {
1640
+ return Math.min(Math.max(value, min), max);
1641
+ }
1642
+ async function readErrorMessage(response, fallbackMessage) {
1643
+ try {
1644
+ if ((response.headers.get("Content-Type") ?? "").includes("application/json")) {
1645
+ const body = await response.json();
1646
+ if (body?.error) return body.error;
1647
+ }
1648
+ const text = await response.text();
1649
+ if (text) return text;
1650
+ } catch {}
1651
+ return fallbackMessage;
1652
+ }
1653
+ /**
1654
+ * Map coordinate-based pointing from screenshot space to viewport space.
1655
+ */
1656
+ function mapCoordinatesToViewport(x, y, screenshot) {
1657
+ if (screenshot.width <= 0 || screenshot.height <= 0) return {
1658
+ x,
1659
+ y
1660
+ };
1661
+ const scaleX = screenshot.viewportWidth / screenshot.width;
1662
+ const scaleY = screenshot.viewportHeight / screenshot.height;
1663
+ return {
1664
+ x: clamp(Math.round(x * scaleX), 0, Math.max(screenshot.viewportWidth - 1, 0)),
1665
+ y: clamp(Math.round(y * scaleY), 0, Math.max(screenshot.viewportHeight - 1, 0))
1666
+ };
1667
+ }
1668
+ /**
1669
+ * Framework-agnostic client for cursor buddy voice interactions.
1670
+ *
1671
+ * Manages the complete voice interaction flow:
1672
+ * idle -> listening -> processing -> responding -> idle
1673
+ *
1674
+ * Supports interruption: pressing hotkey during any state aborts
1675
+ * in-flight work and immediately transitions to listening.
1676
+ */
1677
+ var CursorBuddyClient = class {
1678
+ endpoint;
1679
+ options;
1680
+ voiceCapture;
1681
+ audioPlayback;
1682
+ browserSpeech;
1683
+ liveTranscription;
1684
+ screenCapture;
1685
+ pointerController;
1686
+ stateMachine;
1687
+ liveTranscript = "";
1688
+ transcript = "";
1689
+ response = "";
1690
+ error = null;
1691
+ abortController = null;
1692
+ historyCommittedForTurn = false;
1693
+ speechProviderForTurn = null;
1694
+ cachedSnapshot;
1695
+ listeners = /* @__PURE__ */ new Set();
1696
+ constructor(endpoint, options = {}, services = {}) {
1697
+ this.endpoint = endpoint;
1698
+ this.options = options;
1699
+ this.voiceCapture = services.voiceCapture ?? new VoiceCaptureService();
1700
+ this.audioPlayback = services.audioPlayback ?? new AudioPlaybackService();
1701
+ this.browserSpeech = services.browserSpeech ?? new BrowserSpeechService();
1702
+ this.liveTranscription = services.liveTranscription ?? new LiveTranscriptionService();
1703
+ this.screenCapture = services.screenCapture ?? new ScreenCaptureService();
1704
+ this.pointerController = services.pointerController ?? new PointerController();
1705
+ this.stateMachine = createStateMachine();
1706
+ this.cachedSnapshot = this.buildSnapshot();
1707
+ this.voiceCapture.onLevel((level) => $audioLevel.set(level));
1708
+ this.liveTranscription.onPartial((text) => {
1709
+ if (this.liveTranscript === text) return;
1710
+ this.liveTranscript = text;
1711
+ this.notify();
1712
+ });
1713
+ this.stateMachine.subscribe(() => {
1714
+ this.options.onStateChange?.(this.stateMachine.getState());
1715
+ this.notify();
1716
+ });
1717
+ this.pointerController.subscribe(() => this.notify());
1718
+ }
1719
+ /**
1720
+ * Start listening for voice input.
1721
+ * Aborts any in-flight work from previous session.
1722
+ */
1723
+ startListening() {
1724
+ this.abort();
1725
+ this.liveTranscript = "";
1726
+ this.transcript = "";
1727
+ this.response = "";
1728
+ this.error = null;
1729
+ this.historyCommittedForTurn = false;
1730
+ this.speechProviderForTurn = null;
1731
+ this.pointerController.release();
1732
+ this.stateMachine.transition({ type: "HOTKEY_PRESSED" });
1733
+ this.notify();
1734
+ this.abortController = new AbortController();
1735
+ const signal = this.abortController.signal;
1736
+ this.beginListeningSession(signal).catch((error) => {
1737
+ if (signal.aborted) return;
1738
+ this.voiceCapture.dispose();
1739
+ this.liveTranscription.dispose();
1740
+ this.handleError(toError(error, "Failed to start listening"));
1741
+ });
1742
+ }
1743
+ /**
1744
+ * Stop listening and process the voice input.
1745
+ */
1746
+ async stopListening() {
1747
+ if (this.stateMachine.getState() !== "listening") return;
1748
+ this.stateMachine.transition({ type: "HOTKEY_RELEASED" });
1749
+ const signal = this.abortController?.signal;
1750
+ let turnFailure = null;
1751
+ const failTurn = (error) => {
1752
+ if (turnFailure || signal?.aborted) return;
1753
+ turnFailure = error;
1754
+ this.audioPlayback.stop();
1755
+ this.browserSpeech.stop();
1756
+ this.abortController?.abort();
1757
+ };
1758
+ try {
1759
+ const [audioBlob, screenshot, browserTranscript] = await Promise.all([
1760
+ this.voiceCapture.stop(),
1761
+ this.screenCapture.captureAnnotated(),
1762
+ this.stopLiveTranscription()
1763
+ ]);
1764
+ if (turnFailure) throw turnFailure;
1765
+ if (signal?.aborted) return;
1766
+ const transcript = await this.resolveTranscript(browserTranscript, audioBlob, signal);
1767
+ if (turnFailure) throw turnFailure;
1768
+ if (signal?.aborted) return;
1769
+ this.liveTranscript = "";
1770
+ this.transcript = transcript;
1771
+ this.options.onTranscript?.(transcript);
1772
+ this.notify();
1773
+ this.prepareSpeechMode();
1774
+ const { cleanResponse, fullResponse, playbackQueue } = await this.chatAndSpeak(transcript, screenshot, signal, {
1775
+ onFailure: failTurn,
1776
+ onPlaybackStart: () => {
1777
+ this.stateMachine.transition({ type: "RESPONSE_STARTED" });
1778
+ }
1779
+ });
1780
+ if (turnFailure) throw turnFailure;
1781
+ if (signal?.aborted) return;
1782
+ const parsed = parsePointingTagRaw(fullResponse);
1783
+ this.options.onResponse?.(cleanResponse);
1784
+ let pointTarget = null;
1785
+ if (parsed) if (parsed.type === "marker") {
1786
+ const coords = resolveMarkerToCoordinates(screenshot.markerMap, parsed.markerId);
1787
+ if (coords) pointTarget = {
1788
+ ...coords,
1789
+ label: parsed.label
1790
+ };
1791
+ } else pointTarget = {
1792
+ ...mapCoordinatesToViewport(parsed.x, parsed.y, screenshot),
1793
+ label: parsed.label
1794
+ };
1795
+ if (pointTarget) {
1796
+ this.options.onPoint?.(pointTarget);
1797
+ this.pointerController.pointAt(pointTarget);
1798
+ }
1799
+ await playbackQueue.waitForCompletion();
1800
+ if (turnFailure) throw turnFailure;
1801
+ if (signal?.aborted) return;
1802
+ const newHistory = [
1803
+ ...$conversationHistory.get(),
1804
+ {
1805
+ role: "user",
1806
+ content: transcript
1807
+ },
1808
+ {
1809
+ role: "assistant",
1810
+ content: cleanResponse
1811
+ }
1812
+ ];
1813
+ $conversationHistory.set(newHistory);
1814
+ this.historyCommittedForTurn = true;
1815
+ this.stateMachine.transition({ type: "TTS_COMPLETE" });
1816
+ } catch (err) {
1817
+ if (turnFailure) {
1818
+ this.handleError(turnFailure);
1819
+ return;
1820
+ }
1821
+ if (signal?.aborted) return;
1822
+ this.handleError(toError(err));
1823
+ }
1824
+ }
1825
+ /**
1826
+ * Enable or disable the buddy.
1827
+ */
1828
+ setEnabled(enabled) {
1829
+ $isEnabled.set(enabled);
1830
+ this.notify();
1831
+ }
1832
+ /**
1833
+ * Manually point at coordinates.
1834
+ */
1835
+ pointAt(x, y, label) {
1836
+ this.pointerController.pointAt({
1837
+ x,
1838
+ y,
1839
+ label
1840
+ });
1841
+ }
1842
+ /**
1843
+ * Dismiss the current pointing target.
1844
+ */
1845
+ dismissPointing() {
1846
+ this.pointerController.release();
1847
+ }
1848
+ /**
1849
+ * Reset to idle state and stop any in-progress work.
1850
+ */
1851
+ reset() {
1852
+ this.abort();
1853
+ this.liveTranscript = "";
1854
+ this.transcript = "";
1855
+ this.response = "";
1856
+ this.error = null;
1857
+ this.historyCommittedForTurn = false;
1858
+ this.pointerController.release();
1859
+ this.stateMachine.reset();
1860
+ this.notify();
1861
+ }
1862
+ /**
1863
+ * Update buddy position to follow cursor.
1864
+ * Call this on cursor position changes.
1865
+ */
1866
+ updateCursorPosition() {
1867
+ this.pointerController.updateFollowPosition();
1868
+ }
1869
+ /**
1870
+ * Subscribe to state changes.
1871
+ */
1872
+ subscribe(listener) {
1873
+ this.listeners.add(listener);
1874
+ return () => this.listeners.delete(listener);
1875
+ }
1876
+ /**
1877
+ * Get current state snapshot for React's useSyncExternalStore.
1878
+ * Returns a cached object to ensure referential stability.
1879
+ */
1880
+ getSnapshot() {
1881
+ return this.cachedSnapshot;
1882
+ }
1883
+ /**
1884
+ * Build a new snapshot object.
1885
+ */
1886
+ buildSnapshot() {
1887
+ return {
1888
+ state: this.stateMachine.getState(),
1889
+ liveTranscript: this.liveTranscript,
1890
+ transcript: this.transcript,
1891
+ response: this.response,
1892
+ error: this.error,
1893
+ isPointing: this.pointerController.isPointing(),
1894
+ isEnabled: $isEnabled.get()
1895
+ };
1896
+ }
1897
+ abort() {
1898
+ this.commitPartialHistory();
1899
+ this.abortController?.abort();
1900
+ this.abortController = null;
1901
+ this.voiceCapture.dispose();
1902
+ this.liveTranscription.dispose();
1903
+ this.audioPlayback.stop();
1904
+ this.browserSpeech.stop();
1905
+ this.speechProviderForTurn = null;
1906
+ $audioLevel.set(0);
1907
+ }
1908
+ /**
1909
+ * Commit partial turn to history when interrupted.
1910
+ * Only commits if we have both transcript and response,
1911
+ * and haven't already committed for this turn.
1912
+ */
1913
+ commitPartialHistory() {
1914
+ if (this.historyCommittedForTurn) return;
1915
+ if (!this.transcript || !this.response) return;
1916
+ const newHistory = [
1917
+ ...$conversationHistory.get(),
1918
+ {
1919
+ role: "user",
1920
+ content: this.transcript
1921
+ },
1922
+ {
1923
+ role: "assistant",
1924
+ content: this.response
1925
+ }
1926
+ ];
1927
+ $conversationHistory.set(newHistory);
1928
+ this.historyCommittedForTurn = true;
1929
+ }
1930
+ async transcribe(blob, signal) {
1931
+ const formData = new FormData();
1932
+ formData.append("audio", blob, "recording.wav");
1933
+ const response = await fetch(`${this.endpoint}/transcribe`, {
1934
+ method: "POST",
1935
+ body: formData,
1936
+ signal
1937
+ });
1938
+ if (!response.ok) throw new Error(await readErrorMessage(response, "Transcription failed"));
1939
+ const { text } = await response.json();
1940
+ return text;
1941
+ }
1942
+ /**
1943
+ * Stream the chat response, keep the visible text updated, and feed complete
1944
+ * speech segments into the TTS queue as soon as they are ready.
1945
+ */
1946
+ async chatAndSpeak(transcript, screenshot, signal, options) {
1947
+ const history = $conversationHistory.get();
1948
+ const response = await fetch(`${this.endpoint}/chat`, {
1949
+ method: "POST",
1950
+ headers: { "Content-Type": "application/json" },
1951
+ body: JSON.stringify({
1952
+ screenshot: screenshot.imageData,
1953
+ capture: {
1954
+ width: screenshot.width,
1955
+ height: screenshot.height
1956
+ },
1957
+ transcript,
1958
+ history,
1959
+ markerContext: screenshot.markerContext
1960
+ }),
1961
+ signal
1962
+ });
1963
+ if (!response.ok) throw new Error("Chat request failed");
1964
+ const reader = response.body?.getReader();
1965
+ if (!reader) throw new Error("No response body");
1966
+ const decoder = new TextDecoder();
1967
+ const responseProcessor = new ProgressiveResponseProcessor();
1968
+ const playbackQueue = new TTSPlaybackQueue({
1969
+ onError: options.onFailure,
1970
+ onPlaybackStart: options.onPlaybackStart,
1971
+ prepare: (text, currentSignal) => this.prepareSpeechSegment(text, currentSignal),
1972
+ signal
1973
+ });
1974
+ const shouldStreamSpeech = this.isSpeechStreamingEnabled();
1975
+ while (true) {
1976
+ const { done, value } = await reader.read();
1977
+ if (done) break;
1978
+ const chunk = decoder.decode(value, { stream: true });
1979
+ const { speechSegments, visibleText } = responseProcessor.push(chunk);
1980
+ if (shouldStreamSpeech) for (const speechSegment of speechSegments) playbackQueue.enqueue(speechSegment);
1981
+ this.updateResponse(visibleText);
1982
+ }
1983
+ const trailingChunk = decoder.decode();
1984
+ if (trailingChunk) {
1985
+ const { speechSegments, visibleText } = responseProcessor.push(trailingChunk);
1986
+ if (shouldStreamSpeech) for (const speechSegment of speechSegments) playbackQueue.enqueue(speechSegment);
1987
+ this.updateResponse(visibleText);
1988
+ }
1989
+ const finalizedResponse = responseProcessor.finish();
1990
+ if (shouldStreamSpeech) for (const speechSegment of finalizedResponse.speechSegments) playbackQueue.enqueue(speechSegment);
1991
+ else playbackQueue.enqueue(finalizedResponse.finalResponseText);
1992
+ this.updateResponse(finalizedResponse.finalResponseText);
1993
+ return {
1994
+ cleanResponse: finalizedResponse.finalResponseText,
1995
+ fullResponse: finalizedResponse.fullResponse,
1996
+ playbackQueue
1997
+ };
1998
+ }
1999
+ /**
2000
+ * Request server-side TTS audio for one text segment.
2001
+ */
2002
+ async synthesizeSpeech(text, signal) {
2003
+ const response = await fetch(`${this.endpoint}/tts`, {
2004
+ method: "POST",
2005
+ headers: { "Content-Type": "application/json" },
2006
+ body: JSON.stringify({ text }),
2007
+ signal
2008
+ });
2009
+ if (!response.ok) throw new Error(await readErrorMessage(response, "TTS request failed"));
2010
+ return response.blob();
2011
+ }
2012
+ /**
2013
+ * Resolve the initial speech provider for this turn.
2014
+ *
2015
+ * Decision tree:
2016
+ * 1. In `server` mode, always synthesize on the server.
2017
+ * 2. In `browser` mode, require browser speech support up front.
2018
+ * 3. In `auto` mode, prefer browser speech when available and keep that
2019
+ * choice cached so later segments stay on the same provider unless a
2020
+ * browser failure forces a one-way fallback to the server.
2021
+ */
2022
+ prepareSpeechMode() {
2023
+ const speechMode = this.getSpeechMode();
2024
+ if (speechMode === "browser" && !this.browserSpeech.isAvailable()) throw new Error("Browser speech is not supported");
2025
+ if (speechMode === "server") {
2026
+ this.speechProviderForTurn = "server";
2027
+ return;
2028
+ }
2029
+ if (speechMode === "browser") {
2030
+ this.speechProviderForTurn = "browser";
2031
+ return;
2032
+ }
2033
+ this.speechProviderForTurn = this.browserSpeech.isAvailable() ? "browser" : "server";
2034
+ }
2035
+ /**
2036
+ * Prepare a playback task for one text segment.
2037
+ *
2038
+ * The queue calls this eagerly so server synthesis can overlap with the
2039
+ * currently playing segment, but the returned task is still executed in the
2040
+ * original enqueue order.
2041
+ */
2042
+ async prepareSpeechSegment(text, signal) {
2043
+ switch (this.getSpeechMode()) {
2044
+ case "server": return this.prepareServerSpeechTask(text, signal);
2045
+ case "browser": return this.prepareBrowserSpeechTask(text, signal);
2046
+ default: return this.prepareAutoSpeechTask(text, signal);
2047
+ }
2048
+ }
2049
+ /**
2050
+ * Synthesize server audio immediately and return a playback task that reuses
2051
+ * the prepared blob later.
2052
+ */
2053
+ async prepareServerSpeechTask(text, signal) {
2054
+ const blob = await this.synthesizeSpeech(text, signal);
2055
+ return () => this.audioPlayback.play(blob, signal);
2056
+ }
2057
+ /**
2058
+ * Return a browser playback task for one text segment.
2059
+ */
2060
+ async prepareBrowserSpeechTask(text, signal) {
2061
+ return () => this.browserSpeech.speak(text, signal);
2062
+ }
2063
+ /**
2064
+ * Prepare a playback task for `auto` mode.
2065
+ *
2066
+ * We prefer the browser for low latency, but if browser speech fails for any
2067
+ * segment we permanently switch the remainder of the turn to server TTS so
2068
+ * later segments do not keep retrying the failing browser path.
2069
+ */
2070
+ async prepareAutoSpeechTask(text, signal) {
2071
+ if (this.getAutoSpeechProvider() === "server") return this.prepareServerSpeechTask(text, signal);
2072
+ return async () => {
2073
+ if (this.getAutoSpeechProvider() === "server") {
2074
+ await (await this.prepareServerSpeechTask(text, signal))();
2075
+ return;
2076
+ }
2077
+ try {
2078
+ await this.browserSpeech.speak(text, signal);
2079
+ } catch (error) {
2080
+ if (signal?.aborted) return;
2081
+ this.speechProviderForTurn = "server";
2082
+ await (await this.prepareServerSpeechTask(text, signal))();
2083
+ }
2084
+ };
2085
+ }
2086
+ /**
2087
+ * Read the current provider choice for `auto` mode, lazily defaulting to the
2088
+ * browser when supported and the server otherwise.
2089
+ */
2090
+ getAutoSpeechProvider() {
2091
+ if (this.speechProviderForTurn) return this.speechProviderForTurn;
2092
+ this.speechProviderForTurn = this.browserSpeech.isAvailable() ? "browser" : "server";
2093
+ return this.speechProviderForTurn;
2094
+ }
2095
+ handleError(err) {
2096
+ this.liveTranscript = "";
2097
+ this.error = err;
2098
+ this.stateMachine.transition({
2099
+ type: "ERROR",
2100
+ error: err
2101
+ });
2102
+ this.options.onError?.(err);
2103
+ this.notify();
2104
+ }
2105
+ /**
2106
+ * Resolve the effective transcription mode for the current client.
2107
+ */
2108
+ getTranscriptionMode() {
2109
+ return this.options.transcription?.mode ?? "auto";
2110
+ }
2111
+ /**
2112
+ * Resolve the effective speech mode for the current client.
2113
+ */
2114
+ getSpeechMode() {
2115
+ return this.options.speech?.mode ?? "server";
2116
+ }
2117
+ /**
2118
+ * Decide whether speech should start before the full chat response is ready.
2119
+ */
2120
+ isSpeechStreamingEnabled() {
2121
+ return this.options.speech?.allowStreaming ?? false;
2122
+ }
2123
+ /**
2124
+ * Decide whether this turn should attempt browser speech recognition.
2125
+ */
2126
+ shouldAttemptBrowserTranscription() {
2127
+ return this.getTranscriptionMode() !== "server";
2128
+ }
2129
+ /**
2130
+ * Decide whether browser speech recognition is mandatory for this turn.
2131
+ */
2132
+ isBrowserTranscriptionRequired() {
2133
+ return this.getTranscriptionMode() === "browser";
2134
+ }
2135
+ /**
2136
+ * Start the recorder and browser speech recognition together.
2137
+ *
2138
+ * The recorder always runs so we keep waveform updates and preserve a raw
2139
+ * audio backup for server fallback in `auto` mode.
2140
+ */
2141
+ async beginListeningSession(signal) {
2142
+ const shouldAttemptBrowser = this.shouldAttemptBrowserTranscription();
2143
+ const isBrowserTranscriptionAvailable = shouldAttemptBrowser && this.liveTranscription.isAvailable();
2144
+ if (shouldAttemptBrowser && !isBrowserTranscriptionAvailable) {
2145
+ if (this.isBrowserTranscriptionRequired()) throw new Error("Browser transcription is not supported");
2146
+ }
2147
+ const [voiceCaptureResult, browserTranscriptionResult] = await Promise.allSettled([this.voiceCapture.start(), isBrowserTranscriptionAvailable ? this.liveTranscription.start() : Promise.resolve(void 0)]);
2148
+ if (signal.aborted) return;
2149
+ if (voiceCaptureResult.status === "rejected") throw toError(voiceCaptureResult.reason, "Failed to start microphone");
2150
+ if (browserTranscriptionResult.status === "rejected" && this.isBrowserTranscriptionRequired()) throw toError(browserTranscriptionResult.reason, "Browser transcription failed to start");
2151
+ if (browserTranscriptionResult.status === "rejected") this.liveTranscription.dispose();
2152
+ }
2153
+ /**
2154
+ * Stop browser speech recognition and return the best final transcript it
2155
+ * produced for this turn.
2156
+ */
2157
+ async stopLiveTranscription() {
2158
+ if (!this.shouldAttemptBrowserTranscription() || !this.liveTranscription.isAvailable()) return "";
2159
+ try {
2160
+ return await this.liveTranscription.stop();
2161
+ } catch (error) {
2162
+ if (this.isBrowserTranscriptionRequired()) throw toError(error, "Browser transcription failed");
2163
+ return "";
2164
+ }
2165
+ }
2166
+ /**
2167
+ * Choose the transcript that should drive the turn.
2168
+ *
2169
+ * Decision tree:
2170
+ * 1. Use the browser transcript when it is available.
2171
+ * 2. In browser-only mode, fail if the browser produced nothing usable.
2172
+ * 3. In auto/server modes, fall back to the recorded audio upload.
2173
+ */
2174
+ async resolveTranscript(browserTranscript, audioBlob, signal) {
2175
+ const normalizedBrowserTranscript = browserTranscript.trim();
2176
+ if (normalizedBrowserTranscript) return normalizedBrowserTranscript;
2177
+ if (this.getTranscriptionMode() === "browser") throw new Error("Browser transcription did not produce a final transcript");
2178
+ return this.transcribe(audioBlob, signal);
2179
+ }
2180
+ updateResponse(text) {
2181
+ if (this.response === text) return;
2182
+ this.response = text;
2183
+ this.notify();
2184
+ }
2185
+ notify() {
2186
+ this.cachedSnapshot = this.buildSnapshot();
2187
+ this.listeners.forEach((listener) => listener());
2188
+ }
2189
+ };
2190
+ //#endregion
2191
+ export { $buddyScale as a, $buddyRotation as i, $audioLevel as n, $cursorPosition as o, $buddyPosition as r, $pointingTarget as s, CursorBuddyClient as t };
2192
+
2193
+ //# sourceMappingURL=client-UXGQt-7f.mjs.map