@speechos/client 0.2.5 → 0.2.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/form-detector.d.ts.map +1 -1
- package/dist/index.cjs +645 -122
- package/dist/index.cjs.map +1 -1
- package/dist/index.iife.js +670 -134
- package/dist/index.iife.js.map +1 -1
- package/dist/index.iife.min.js +248 -107
- package/dist/index.iife.min.js.map +1 -1
- package/dist/index.js +646 -123
- package/dist/index.js.map +1 -1
- package/dist/ui/dictation-output-modal.d.ts +5 -0
- package/dist/ui/dictation-output-modal.d.ts.map +1 -1
- package/dist/ui/mic-button.d.ts +5 -2
- package/dist/ui/mic-button.d.ts.map +1 -1
- package/dist/ui/mic-button.test.d.ts +1 -1
- package/dist/ui/widget.d.ts +41 -4
- package/dist/ui/widget.d.ts.map +1 -1
- package/package.json +3 -2
package/dist/index.cjs
CHANGED
|
@@ -145,6 +145,10 @@ class FormDetector {
|
|
|
145
145
|
this.focusHandler = (event) => {
|
|
146
146
|
const target = event.target;
|
|
147
147
|
if (isFormField(target)) {
|
|
148
|
+
console.log("[SpeechOS] FormDetector: focus on form field", {
|
|
149
|
+
element: target,
|
|
150
|
+
tagName: target?.tagName,
|
|
151
|
+
});
|
|
148
152
|
core.state.setFocusedElement(target);
|
|
149
153
|
core.state.show();
|
|
150
154
|
core.events.emit("form:focus", { element: target });
|
|
@@ -1400,6 +1404,71 @@ const transcriptStore = {
|
|
|
1400
1404
|
deleteTranscript: deleteTranscript,
|
|
1401
1405
|
};
|
|
1402
1406
|
|
|
1407
|
+
function isNativeField(field) {
|
|
1408
|
+
return field instanceof HTMLInputElement || field instanceof HTMLTextAreaElement;
|
|
1409
|
+
}
|
|
1410
|
+
/** Call a function after focusing a field and then restore the previous focus afterwards if necessary */
|
|
1411
|
+
function withFocus(field, callback) {
|
|
1412
|
+
const document = field.ownerDocument;
|
|
1413
|
+
const initialFocus = document.activeElement;
|
|
1414
|
+
if (initialFocus === field) {
|
|
1415
|
+
return callback();
|
|
1416
|
+
}
|
|
1417
|
+
try {
|
|
1418
|
+
field.focus();
|
|
1419
|
+
return callback();
|
|
1420
|
+
}
|
|
1421
|
+
finally {
|
|
1422
|
+
field.blur(); // Supports `intialFocus === body`
|
|
1423
|
+
if (initialFocus instanceof HTMLElement) {
|
|
1424
|
+
initialFocus.focus();
|
|
1425
|
+
}
|
|
1426
|
+
}
|
|
1427
|
+
}
|
|
1428
|
+
// This will insert into the focused field. It shouild always be called inside withFocus.
|
|
1429
|
+
// Use this one locally if there are multiple `insertTextIntoField` or `document.execCommand` calls
|
|
1430
|
+
function insertTextWhereverTheFocusIs(document, text) {
|
|
1431
|
+
if (text === '') {
|
|
1432
|
+
// https://github.com/fregante/text-field-edit/issues/16
|
|
1433
|
+
document.execCommand('delete');
|
|
1434
|
+
}
|
|
1435
|
+
else {
|
|
1436
|
+
document.execCommand('insertText', false, text);
|
|
1437
|
+
}
|
|
1438
|
+
}
|
|
1439
|
+
/** Inserts `text` at the cursor’s position, replacing any selection, with **undo** support and by firing the `input` event. */
|
|
1440
|
+
function insertTextIntoField(field, text) {
|
|
1441
|
+
withFocus(field, () => {
|
|
1442
|
+
insertTextWhereverTheFocusIs(field.ownerDocument, text);
|
|
1443
|
+
});
|
|
1444
|
+
}
|
|
1445
|
+
/** Replaces the entire content, equivalent to `field.value = text` but with **undo** support and by firing the `input` event. */
|
|
1446
|
+
function setFieldText(field, text) {
|
|
1447
|
+
if (isNativeField(field)) {
|
|
1448
|
+
field.select();
|
|
1449
|
+
insertTextIntoField(field, text);
|
|
1450
|
+
}
|
|
1451
|
+
else {
|
|
1452
|
+
const document = field.ownerDocument;
|
|
1453
|
+
withFocus(field, () => {
|
|
1454
|
+
document.execCommand('selectAll', false, text);
|
|
1455
|
+
insertTextWhereverTheFocusIs(document, text);
|
|
1456
|
+
});
|
|
1457
|
+
}
|
|
1458
|
+
}
|
|
1459
|
+
/** Get the selected text in a field or an empty string if nothing is selected. */
|
|
1460
|
+
function getFieldSelection(field) {
|
|
1461
|
+
if (isNativeField(field)) {
|
|
1462
|
+
return field.value.slice(field.selectionStart, field.selectionEnd);
|
|
1463
|
+
}
|
|
1464
|
+
const selection = field.ownerDocument.getSelection();
|
|
1465
|
+
if (selection && field.contains(selection.anchorNode)) {
|
|
1466
|
+
// The selection is inside the field
|
|
1467
|
+
return selection.toString();
|
|
1468
|
+
}
|
|
1469
|
+
return '';
|
|
1470
|
+
}
|
|
1471
|
+
|
|
1403
1472
|
/**
|
|
1404
1473
|
* @license
|
|
1405
1474
|
* Copyright 2017 Google LLC
|
|
@@ -1760,7 +1829,9 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
1760
1829
|
this.activeAction = null;
|
|
1761
1830
|
this.editPreviewText = "";
|
|
1762
1831
|
this.errorMessage = null;
|
|
1763
|
-
this.
|
|
1832
|
+
this.showRetryButton = true;
|
|
1833
|
+
this.actionFeedback = null;
|
|
1834
|
+
this.showNoAudioWarning = false;
|
|
1764
1835
|
}
|
|
1765
1836
|
static { this.styles = [
|
|
1766
1837
|
themeStyles,
|
|
@@ -2350,8 +2421,9 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2350
2421
|
background-position: center;
|
|
2351
2422
|
}
|
|
2352
2423
|
|
|
2353
|
-
/* Command feedback badge - no match state (neutral gray) */
|
|
2354
|
-
.status-label.command-none
|
|
2424
|
+
/* Command/edit feedback badge - no match/empty state (neutral gray) */
|
|
2425
|
+
.status-label.command-none,
|
|
2426
|
+
.status-label.edit-empty {
|
|
2355
2427
|
background: #4b5563;
|
|
2356
2428
|
box-shadow: 0 4px 12px rgba(75, 85, 99, 0.3);
|
|
2357
2429
|
animation: command-feedback-in 0.3s cubic-bezier(0.34, 1.56, 0.64, 1)
|
|
@@ -2467,10 +2539,14 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2467
2539
|
bottom: 72px; /* Above button */
|
|
2468
2540
|
left: 50%;
|
|
2469
2541
|
transform: translateX(-50%) translateY(8px);
|
|
2542
|
+
min-width: 200px;
|
|
2470
2543
|
max-width: 280px;
|
|
2544
|
+
width: max-content;
|
|
2471
2545
|
font-size: 13px;
|
|
2472
2546
|
color: white;
|
|
2473
2547
|
white-space: normal;
|
|
2548
|
+
word-wrap: break-word;
|
|
2549
|
+
overflow-wrap: break-word;
|
|
2474
2550
|
text-align: center;
|
|
2475
2551
|
padding: 12px 16px;
|
|
2476
2552
|
border-radius: 12px;
|
|
@@ -2508,6 +2584,60 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2508
2584
|
border-color: rgba(255, 255, 255, 0.5);
|
|
2509
2585
|
}
|
|
2510
2586
|
|
|
2587
|
+
/* No audio warning banner */
|
|
2588
|
+
.no-audio-warning {
|
|
2589
|
+
position: absolute;
|
|
2590
|
+
bottom: 120px; /* Above button and waveform visualizer */
|
|
2591
|
+
left: 50%;
|
|
2592
|
+
transform: translateX(-50%) translateY(8px);
|
|
2593
|
+
display: flex;
|
|
2594
|
+
align-items: center;
|
|
2595
|
+
gap: 8px;
|
|
2596
|
+
padding: 10px 14px;
|
|
2597
|
+
border-radius: 12px;
|
|
2598
|
+
background: linear-gradient(135deg, #f59e0b 0%, #d97706 100%);
|
|
2599
|
+
box-shadow: 0 4px 12px rgba(245, 158, 11, 0.3);
|
|
2600
|
+
transition: all 0.2s cubic-bezier(0.34, 1.56, 0.64, 1);
|
|
2601
|
+
pointer-events: none;
|
|
2602
|
+
opacity: 0;
|
|
2603
|
+
white-space: nowrap;
|
|
2604
|
+
}
|
|
2605
|
+
|
|
2606
|
+
.no-audio-warning.visible {
|
|
2607
|
+
opacity: 1;
|
|
2608
|
+
transform: translateX(-50%) translateY(0);
|
|
2609
|
+
pointer-events: auto;
|
|
2610
|
+
}
|
|
2611
|
+
|
|
2612
|
+
.no-audio-warning .warning-icon {
|
|
2613
|
+
flex-shrink: 0;
|
|
2614
|
+
color: white;
|
|
2615
|
+
}
|
|
2616
|
+
|
|
2617
|
+
.no-audio-warning .warning-text {
|
|
2618
|
+
font-size: 13px;
|
|
2619
|
+
font-weight: 500;
|
|
2620
|
+
color: white;
|
|
2621
|
+
}
|
|
2622
|
+
|
|
2623
|
+
.no-audio-warning .settings-link {
|
|
2624
|
+
background: rgba(255, 255, 255, 0.2);
|
|
2625
|
+
border: 1px solid rgba(255, 255, 255, 0.3);
|
|
2626
|
+
border-radius: 6px;
|
|
2627
|
+
padding: 4px 10px;
|
|
2628
|
+
font-size: 12px;
|
|
2629
|
+
font-weight: 600;
|
|
2630
|
+
color: white;
|
|
2631
|
+
cursor: pointer;
|
|
2632
|
+
transition: all 0.15s;
|
|
2633
|
+
white-space: nowrap;
|
|
2634
|
+
}
|
|
2635
|
+
|
|
2636
|
+
.no-audio-warning .settings-link:hover {
|
|
2637
|
+
background: rgba(255, 255, 255, 0.3);
|
|
2638
|
+
border-color: rgba(255, 255, 255, 0.5);
|
|
2639
|
+
}
|
|
2640
|
+
|
|
2511
2641
|
/* Mobile styles - 30% larger */
|
|
2512
2642
|
@media (max-width: 768px) and (hover: none) {
|
|
2513
2643
|
.mic-button {
|
|
@@ -2629,6 +2759,7 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2629
2759
|
.error-message {
|
|
2630
2760
|
font-size: 15px;
|
|
2631
2761
|
padding: 14px 18px;
|
|
2762
|
+
min-width: 220px;
|
|
2632
2763
|
max-width: 300px;
|
|
2633
2764
|
bottom: 94px;
|
|
2634
2765
|
}
|
|
@@ -2637,6 +2768,21 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2637
2768
|
padding: 8px 14px;
|
|
2638
2769
|
font-size: 14px;
|
|
2639
2770
|
}
|
|
2771
|
+
|
|
2772
|
+
.no-audio-warning {
|
|
2773
|
+
padding: 12px 16px;
|
|
2774
|
+
gap: 10px;
|
|
2775
|
+
bottom: 145px; /* Above button and waveform on mobile */
|
|
2776
|
+
}
|
|
2777
|
+
|
|
2778
|
+
.no-audio-warning .warning-text {
|
|
2779
|
+
font-size: 15px;
|
|
2780
|
+
}
|
|
2781
|
+
|
|
2782
|
+
.no-audio-warning .settings-link {
|
|
2783
|
+
padding: 6px 12px;
|
|
2784
|
+
font-size: 14px;
|
|
2785
|
+
}
|
|
2640
2786
|
}
|
|
2641
2787
|
`,
|
|
2642
2788
|
]; }
|
|
@@ -2694,6 +2840,14 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2694
2840
|
composed: true,
|
|
2695
2841
|
}));
|
|
2696
2842
|
}
|
|
2843
|
+
handleOpenSettings(e) {
|
|
2844
|
+
e.stopPropagation();
|
|
2845
|
+
e.preventDefault();
|
|
2846
|
+
this.dispatchEvent(new CustomEvent("open-settings", {
|
|
2847
|
+
bubbles: true,
|
|
2848
|
+
composed: true,
|
|
2849
|
+
}));
|
|
2850
|
+
}
|
|
2697
2851
|
getButtonClass() {
|
|
2698
2852
|
const classes = ["mic-button"];
|
|
2699
2853
|
if (this.expanded && this.recordingState === "idle") {
|
|
@@ -2778,13 +2932,16 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2778
2932
|
}
|
|
2779
2933
|
return this.recordingState;
|
|
2780
2934
|
}
|
|
2781
|
-
|
|
2782
|
-
if (this.
|
|
2935
|
+
getActionFeedbackLabel() {
|
|
2936
|
+
if (this.actionFeedback === "command-success") {
|
|
2783
2937
|
return "Got it!";
|
|
2784
2938
|
}
|
|
2785
|
-
if (this.
|
|
2939
|
+
if (this.actionFeedback === "command-none") {
|
|
2786
2940
|
return "No command matched";
|
|
2787
2941
|
}
|
|
2942
|
+
if (this.actionFeedback === "edit-empty") {
|
|
2943
|
+
return "Couldn't understand edit";
|
|
2944
|
+
}
|
|
2788
2945
|
return "";
|
|
2789
2946
|
}
|
|
2790
2947
|
render() {
|
|
@@ -2794,9 +2951,9 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2794
2951
|
const showSiriEdit = this.recordingState === "processing" && this.activeAction === "edit";
|
|
2795
2952
|
const statusLabel = this.getStatusLabel();
|
|
2796
2953
|
const showVisualizer = this.shouldShowVisualizer();
|
|
2797
|
-
// Show status label during recording (either visualizer or edit text) OR
|
|
2798
|
-
const
|
|
2799
|
-
const showStatus = this.recordingState === "recording" ||
|
|
2954
|
+
// Show status label during recording (either visualizer or edit text) OR action feedback
|
|
2955
|
+
const showActionFeedback = this.recordingState === "idle" && this.actionFeedback !== null;
|
|
2956
|
+
const showStatus = this.recordingState === "recording" || showActionFeedback;
|
|
2800
2957
|
const showCancel = this.recordingState === "connecting" ||
|
|
2801
2958
|
this.recordingState === "recording" ||
|
|
2802
2959
|
this.recordingState === "processing";
|
|
@@ -2828,13 +2985,46 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2828
2985
|
? b `
|
|
2829
2986
|
<div class="error-message ${showError ? "visible" : ""}">
|
|
2830
2987
|
${this.errorMessage}
|
|
2831
|
-
|
|
2832
|
-
|
|
2833
|
-
|
|
2988
|
+
${this.showRetryButton
|
|
2989
|
+
? b `
|
|
2990
|
+
<button class="retry-button" @click="${this.handleRetry}">
|
|
2991
|
+
Retry Connection
|
|
2992
|
+
</button>
|
|
2993
|
+
`
|
|
2994
|
+
: ""}
|
|
2834
2995
|
</div>
|
|
2835
2996
|
`
|
|
2836
2997
|
: ""}
|
|
2837
2998
|
|
|
2999
|
+
<div
|
|
3000
|
+
class="no-audio-warning ${this.showNoAudioWarning &&
|
|
3001
|
+
this.recordingState === "recording"
|
|
3002
|
+
? "visible"
|
|
3003
|
+
: ""}"
|
|
3004
|
+
>
|
|
3005
|
+
<svg
|
|
3006
|
+
class="warning-icon"
|
|
3007
|
+
width="16"
|
|
3008
|
+
height="16"
|
|
3009
|
+
viewBox="0 0 24 24"
|
|
3010
|
+
fill="none"
|
|
3011
|
+
stroke="currentColor"
|
|
3012
|
+
stroke-width="2"
|
|
3013
|
+
stroke-linecap="round"
|
|
3014
|
+
stroke-linejoin="round"
|
|
3015
|
+
>
|
|
3016
|
+
<path
|
|
3017
|
+
d="M10.29 3.86L1.82 18a2 2 0 0 0 1.71 3h16.94a2 2 0 0 0 1.71-3L13.71 3.86a2 2 0 0 0-3.42 0z"
|
|
3018
|
+
/>
|
|
3019
|
+
<line x1="12" y1="9" x2="12" y2="13" />
|
|
3020
|
+
<line x1="12" y1="17" x2="12.01" y2="17" />
|
|
3021
|
+
</svg>
|
|
3022
|
+
<span class="warning-text">We're not hearing anything</span>
|
|
3023
|
+
<button class="settings-link" @click="${this.handleOpenSettings}">
|
|
3024
|
+
Check Settings
|
|
3025
|
+
</button>
|
|
3026
|
+
</div>
|
|
3027
|
+
|
|
2838
3028
|
<button
|
|
2839
3029
|
class="${this.getButtonClass()}"
|
|
2840
3030
|
@click="${this.handleClick}"
|
|
@@ -2847,14 +3037,14 @@ let SpeechOSMicButton = class SpeechOSMicButton extends i$1 {
|
|
|
2847
3037
|
</button>
|
|
2848
3038
|
|
|
2849
3039
|
<span
|
|
2850
|
-
class="status-label ${showStatus ? "visible" : ""} ${
|
|
2851
|
-
?
|
|
3040
|
+
class="status-label ${showStatus ? "visible" : ""} ${showActionFeedback
|
|
3041
|
+
? this.actionFeedback
|
|
2852
3042
|
: showVisualizer
|
|
2853
3043
|
? "visualizer"
|
|
2854
3044
|
: this.getStatusClass()}"
|
|
2855
3045
|
>
|
|
2856
|
-
${
|
|
2857
|
-
? this.
|
|
3046
|
+
${showActionFeedback
|
|
3047
|
+
? this.getActionFeedbackLabel()
|
|
2858
3048
|
: showVisualizer
|
|
2859
3049
|
? b `<speechos-audio-visualizer
|
|
2860
3050
|
?active="${showVisualizer}"
|
|
@@ -2900,9 +3090,15 @@ __decorate([
|
|
|
2900
3090
|
__decorate([
|
|
2901
3091
|
n({ type: String })
|
|
2902
3092
|
], SpeechOSMicButton.prototype, "errorMessage", void 0);
|
|
3093
|
+
__decorate([
|
|
3094
|
+
n({ type: Boolean })
|
|
3095
|
+
], SpeechOSMicButton.prototype, "showRetryButton", void 0);
|
|
2903
3096
|
__decorate([
|
|
2904
3097
|
n({ type: String })
|
|
2905
|
-
], SpeechOSMicButton.prototype, "
|
|
3098
|
+
], SpeechOSMicButton.prototype, "actionFeedback", void 0);
|
|
3099
|
+
__decorate([
|
|
3100
|
+
n({ type: Boolean })
|
|
3101
|
+
], SpeechOSMicButton.prototype, "showNoAudioWarning", void 0);
|
|
2906
3102
|
SpeechOSMicButton = __decorate([
|
|
2907
3103
|
t$1("speechos-mic-button")
|
|
2908
3104
|
], SpeechOSMicButton);
|
|
@@ -5932,6 +6128,7 @@ let SpeechOSDictationOutputModal = class SpeechOSDictationOutputModal extends i$
|
|
|
5932
6128
|
super(...arguments);
|
|
5933
6129
|
this.open = false;
|
|
5934
6130
|
this.text = "";
|
|
6131
|
+
this.mode = "dictation";
|
|
5935
6132
|
this.copied = false;
|
|
5936
6133
|
this.copyTimeout = null;
|
|
5937
6134
|
}
|
|
@@ -6011,6 +6208,41 @@ let SpeechOSDictationOutputModal = class SpeechOSDictationOutputModal extends i$
|
|
|
6011
6208
|
color: #10b981;
|
|
6012
6209
|
flex-shrink: 0;
|
|
6013
6210
|
}
|
|
6211
|
+
|
|
6212
|
+
/* Edit mode styles */
|
|
6213
|
+
:host([mode="edit"]) .logo-icon {
|
|
6214
|
+
background: linear-gradient(135deg, #8b5cf6 0%, #6366f1 100%);
|
|
6215
|
+
}
|
|
6216
|
+
|
|
6217
|
+
:host([mode="edit"]) .modal-title {
|
|
6218
|
+
background: linear-gradient(135deg, #a78bfa 0%, #818cf8 100%);
|
|
6219
|
+
-webkit-background-clip: text;
|
|
6220
|
+
-webkit-text-fill-color: transparent;
|
|
6221
|
+
background-clip: text;
|
|
6222
|
+
}
|
|
6223
|
+
|
|
6224
|
+
:host([mode="edit"]) .hint {
|
|
6225
|
+
background: rgba(139, 92, 246, 0.08);
|
|
6226
|
+
}
|
|
6227
|
+
|
|
6228
|
+
:host([mode="edit"]) .hint-icon {
|
|
6229
|
+
color: #8b5cf6;
|
|
6230
|
+
}
|
|
6231
|
+
|
|
6232
|
+
:host([mode="edit"]) .btn-primary {
|
|
6233
|
+
background: linear-gradient(135deg, #8b5cf6 0%, #7c3aed 100%);
|
|
6234
|
+
box-shadow: 0 4px 12px rgba(139, 92, 246, 0.3);
|
|
6235
|
+
}
|
|
6236
|
+
|
|
6237
|
+
:host([mode="edit"]) .btn-primary:hover {
|
|
6238
|
+
background: linear-gradient(135deg, #a78bfa 0%, #8b5cf6 100%);
|
|
6239
|
+
box-shadow: 0 6px 16px rgba(139, 92, 246, 0.4);
|
|
6240
|
+
}
|
|
6241
|
+
|
|
6242
|
+
:host([mode="edit"]) .btn-success {
|
|
6243
|
+
background: linear-gradient(135deg, #a78bfa 0%, #8b5cf6 100%);
|
|
6244
|
+
box-shadow: 0 4px 12px rgba(167, 139, 250, 0.3);
|
|
6245
|
+
}
|
|
6014
6246
|
`,
|
|
6015
6247
|
]; }
|
|
6016
6248
|
disconnectedCallback() {
|
|
@@ -6063,6 +6295,17 @@ let SpeechOSDictationOutputModal = class SpeechOSDictationOutputModal extends i$
|
|
|
6063
6295
|
console.error("[SpeechOS] Failed to copy text:", err);
|
|
6064
6296
|
}
|
|
6065
6297
|
}
|
|
6298
|
+
get modalTitle() {
|
|
6299
|
+
return this.mode === "edit" ? "Edit Complete" : "Dictation Complete";
|
|
6300
|
+
}
|
|
6301
|
+
get modalIcon() {
|
|
6302
|
+
return this.mode === "edit" ? editIcon(18) : micIcon(18);
|
|
6303
|
+
}
|
|
6304
|
+
get hintText() {
|
|
6305
|
+
return this.mode === "edit"
|
|
6306
|
+
? "Tip: The editor didn't accept the edit. Copy and paste manually."
|
|
6307
|
+
: "Tip: Focus a text field first to auto-insert next time";
|
|
6308
|
+
}
|
|
6066
6309
|
render() {
|
|
6067
6310
|
return b `
|
|
6068
6311
|
<div
|
|
@@ -6072,8 +6315,8 @@ let SpeechOSDictationOutputModal = class SpeechOSDictationOutputModal extends i$
|
|
|
6072
6315
|
<div class="modal-card">
|
|
6073
6316
|
<div class="modal-header">
|
|
6074
6317
|
<div class="header-content">
|
|
6075
|
-
<div class="logo-icon">${
|
|
6076
|
-
<h2 class="modal-title"
|
|
6318
|
+
<div class="logo-icon">${this.modalIcon}</div>
|
|
6319
|
+
<h2 class="modal-title">${this.modalTitle}</h2>
|
|
6077
6320
|
</div>
|
|
6078
6321
|
<button
|
|
6079
6322
|
class="close-button"
|
|
@@ -6090,7 +6333,7 @@ let SpeechOSDictationOutputModal = class SpeechOSDictationOutputModal extends i$
|
|
|
6090
6333
|
<svg class="hint-icon" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
|
6091
6334
|
<circle cx="12" cy="12" r="10"/><path d="M12 16v-4M12 8h.01"/>
|
|
6092
6335
|
</svg>
|
|
6093
|
-
<span
|
|
6336
|
+
<span>${this.hintText}</span>
|
|
6094
6337
|
</div>
|
|
6095
6338
|
</div>
|
|
6096
6339
|
|
|
@@ -6117,6 +6360,9 @@ __decorate([
|
|
|
6117
6360
|
__decorate([
|
|
6118
6361
|
n({ type: String })
|
|
6119
6362
|
], SpeechOSDictationOutputModal.prototype, "text", void 0);
|
|
6363
|
+
__decorate([
|
|
6364
|
+
n({ type: String, reflect: true })
|
|
6365
|
+
], SpeechOSDictationOutputModal.prototype, "mode", void 0);
|
|
6120
6366
|
__decorate([
|
|
6121
6367
|
r()
|
|
6122
6368
|
], SpeechOSDictationOutputModal.prototype, "copied", void 0);
|
|
@@ -6276,15 +6522,29 @@ var SpeechOSWidget_1;
|
|
|
6276
6522
|
* duration so users can see the visual feedback before transitioning to recording.
|
|
6277
6523
|
*/
|
|
6278
6524
|
const MIN_CONNECTING_ANIMATION_MS = 200;
|
|
6525
|
+
/**
|
|
6526
|
+
* Time to wait for a transcription event before showing the "no audio" warning (in milliseconds).
|
|
6527
|
+
* If no transcription:interim event is received within this time during recording,
|
|
6528
|
+
* it indicates the server isn't receiving/processing audio.
|
|
6529
|
+
*/
|
|
6530
|
+
const NO_AUDIO_WARNING_TIMEOUT_MS = 5000;
|
|
6531
|
+
/**
|
|
6532
|
+
* Number of consecutive actions with empty results before showing warning on next action.
|
|
6533
|
+
*/
|
|
6534
|
+
const CONSECUTIVE_NO_AUDIO_THRESHOLD = 2;
|
|
6279
6535
|
let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
6280
6536
|
constructor() {
|
|
6281
6537
|
super(...arguments);
|
|
6282
6538
|
this.widgetState = core.state.getState();
|
|
6283
6539
|
this.settingsOpen = false;
|
|
6540
|
+
this.settingsOpenFromWarning = false;
|
|
6284
6541
|
this.dictationModalOpen = false;
|
|
6285
6542
|
this.dictationModalText = "";
|
|
6543
|
+
this.dictationModalMode = "dictation";
|
|
6286
6544
|
this.editHelpModalOpen = false;
|
|
6287
|
-
this.
|
|
6545
|
+
this.actionFeedback = null;
|
|
6546
|
+
this.showNoAudioWarning = false;
|
|
6547
|
+
this.isErrorRetryable = true;
|
|
6288
6548
|
this.dictationTargetElement = null;
|
|
6289
6549
|
this.editTargetElement = null;
|
|
6290
6550
|
this.dictationCursorStart = null;
|
|
@@ -6296,7 +6556,7 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6296
6556
|
this.modalElement = null;
|
|
6297
6557
|
this.dictationModalElement = null;
|
|
6298
6558
|
this.editHelpModalElement = null;
|
|
6299
|
-
this.
|
|
6559
|
+
this.actionFeedbackTimeout = null;
|
|
6300
6560
|
this.customPosition = null;
|
|
6301
6561
|
this.isDragging = false;
|
|
6302
6562
|
this.dragStartPos = null;
|
|
@@ -6306,6 +6566,11 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6306
6566
|
this.suppressNextClick = false;
|
|
6307
6567
|
this.boundViewportResizeHandler = null;
|
|
6308
6568
|
this.boundScrollHandler = null;
|
|
6569
|
+
// No-audio warning state tracking
|
|
6570
|
+
this.consecutiveNoAudioActions = 0;
|
|
6571
|
+
this.transcriptionReceived = false;
|
|
6572
|
+
this.noAudioWarningTimeout = null;
|
|
6573
|
+
this.transcriptionInterimUnsubscribe = null;
|
|
6309
6574
|
}
|
|
6310
6575
|
static { SpeechOSWidget_1 = this; }
|
|
6311
6576
|
static { this.styles = [
|
|
@@ -6388,6 +6653,7 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6388
6653
|
this.modalElement = document.createElement("speechos-settings-modal");
|
|
6389
6654
|
this.modalElement.addEventListener("modal-close", () => {
|
|
6390
6655
|
this.settingsOpen = false;
|
|
6656
|
+
this.settingsOpenFromWarning = false;
|
|
6391
6657
|
});
|
|
6392
6658
|
document.body.appendChild(this.modalElement);
|
|
6393
6659
|
// Mount dictation output modal
|
|
@@ -6403,7 +6669,17 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6403
6669
|
});
|
|
6404
6670
|
document.body.appendChild(this.editHelpModalElement);
|
|
6405
6671
|
this.stateUnsubscribe = core.state.subscribe((newState) => {
|
|
6406
|
-
if (!newState.isVisible
|
|
6672
|
+
if (!newState.isVisible) {
|
|
6673
|
+
if (core.getConfig().debug && this.settingsOpen) {
|
|
6674
|
+
console.log("[SpeechOS] Closing settings modal: widget hidden");
|
|
6675
|
+
}
|
|
6676
|
+
this.settingsOpen = false;
|
|
6677
|
+
this.settingsOpenFromWarning = false;
|
|
6678
|
+
}
|
|
6679
|
+
else if (!newState.isExpanded && !this.settingsOpenFromWarning) {
|
|
6680
|
+
if (core.getConfig().debug && this.settingsOpen) {
|
|
6681
|
+
console.log("[SpeechOS] Closing settings modal: widget collapsed");
|
|
6682
|
+
}
|
|
6407
6683
|
this.settingsOpen = false;
|
|
6408
6684
|
}
|
|
6409
6685
|
// Clear custom position when focused element changes (re-anchor to new element)
|
|
@@ -6417,6 +6693,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6417
6693
|
this.errorEventUnsubscribe = core.events.on("error", (payload) => {
|
|
6418
6694
|
if (this.widgetState.recordingState !== "idle" &&
|
|
6419
6695
|
this.widgetState.recordingState !== "error") {
|
|
6696
|
+
// Check if this is a non-retryable error (e.g., CSP blocked connection)
|
|
6697
|
+
this.isErrorRetryable = payload.code !== "connection_blocked";
|
|
6420
6698
|
core.state.setError(payload.message);
|
|
6421
6699
|
core.getBackend().disconnect().catch(() => { });
|
|
6422
6700
|
}
|
|
@@ -6449,9 +6727,9 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6449
6727
|
this.editHelpModalElement.remove();
|
|
6450
6728
|
this.editHelpModalElement = null;
|
|
6451
6729
|
}
|
|
6452
|
-
if (this.
|
|
6453
|
-
clearTimeout(this.
|
|
6454
|
-
this.
|
|
6730
|
+
if (this.actionFeedbackTimeout) {
|
|
6731
|
+
clearTimeout(this.actionFeedbackTimeout);
|
|
6732
|
+
this.actionFeedbackTimeout = null;
|
|
6455
6733
|
}
|
|
6456
6734
|
if (this.stateUnsubscribe) {
|
|
6457
6735
|
this.stateUnsubscribe();
|
|
@@ -6479,6 +6757,7 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6479
6757
|
window.removeEventListener("scroll", this.boundScrollHandler);
|
|
6480
6758
|
this.boundScrollHandler = null;
|
|
6481
6759
|
}
|
|
6760
|
+
this.cleanupNoAudioWarningTracking();
|
|
6482
6761
|
}
|
|
6483
6762
|
updated(changedProperties) {
|
|
6484
6763
|
if (changedProperties.has("settingsOpen") && this.modalElement) {
|
|
@@ -6490,6 +6769,9 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6490
6769
|
if (changedProperties.has("dictationModalText") && this.dictationModalElement) {
|
|
6491
6770
|
this.dictationModalElement.text = this.dictationModalText;
|
|
6492
6771
|
}
|
|
6772
|
+
if (changedProperties.has("dictationModalMode") && this.dictationModalElement) {
|
|
6773
|
+
this.dictationModalElement.mode = this.dictationModalMode;
|
|
6774
|
+
}
|
|
6493
6775
|
if (changedProperties.has("editHelpModalOpen") && this.editHelpModalElement) {
|
|
6494
6776
|
this.editHelpModalElement.open = this.editHelpModalOpen;
|
|
6495
6777
|
}
|
|
@@ -6656,7 +6938,7 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6656
6938
|
}
|
|
6657
6939
|
if (this.widgetState.recordingState === "idle") {
|
|
6658
6940
|
// Clear command feedback on any mic click
|
|
6659
|
-
this.
|
|
6941
|
+
this.clearActionFeedback();
|
|
6660
6942
|
// If we're expanding, prefetch the token to reduce latency when user selects an action
|
|
6661
6943
|
if (!this.widgetState.isExpanded) {
|
|
6662
6944
|
// Fire and forget - we don't need to wait for this (LiveKit only)
|
|
@@ -6677,6 +6959,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6677
6959
|
}
|
|
6678
6960
|
}
|
|
6679
6961
|
async handleStopRecording() {
|
|
6962
|
+
// Clean up no-audio warning tracking
|
|
6963
|
+
this.cleanupNoAudioWarningTracking();
|
|
6680
6964
|
if (this.widgetState.activeAction === "edit") {
|
|
6681
6965
|
await this.handleStopEdit();
|
|
6682
6966
|
}
|
|
@@ -6688,14 +6972,27 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6688
6972
|
const backend = core.getBackend();
|
|
6689
6973
|
try {
|
|
6690
6974
|
const transcription = await this.withMinDisplayTime(backend.stopVoiceSession(), 300);
|
|
6975
|
+
// Track result for consecutive failure detection
|
|
6976
|
+
this.trackActionResult(!!transcription);
|
|
6691
6977
|
if (transcription) {
|
|
6978
|
+
if (core.getConfig().debug) {
|
|
6979
|
+
console.log("[SpeechOS] Transcription received:", {
|
|
6980
|
+
transcription,
|
|
6981
|
+
dictationTargetElement: this.dictationTargetElement,
|
|
6982
|
+
tagName: this.dictationTargetElement?.tagName,
|
|
6983
|
+
});
|
|
6984
|
+
}
|
|
6692
6985
|
// Check if we have a target element to insert into
|
|
6693
6986
|
if (this.dictationTargetElement) {
|
|
6694
6987
|
this.insertTranscription(transcription);
|
|
6695
6988
|
}
|
|
6696
6989
|
else {
|
|
6697
6990
|
// No target element - show dictation output modal
|
|
6991
|
+
if (core.getConfig().debug) {
|
|
6992
|
+
console.log("[SpeechOS] No target element, showing dictation modal");
|
|
6993
|
+
}
|
|
6698
6994
|
this.dictationModalText = transcription;
|
|
6995
|
+
this.dictationModalMode = "dictation";
|
|
6699
6996
|
this.dictationModalOpen = true;
|
|
6700
6997
|
}
|
|
6701
6998
|
transcriptStore.saveTranscript(transcription, "dictate");
|
|
@@ -6707,6 +7004,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6707
7004
|
backend.startAutoRefresh?.();
|
|
6708
7005
|
}
|
|
6709
7006
|
catch (error) {
|
|
7007
|
+
// Track as failed result
|
|
7008
|
+
this.trackActionResult(false);
|
|
6710
7009
|
const errorMessage = error instanceof Error ? error.message : "Failed to transcribe audio";
|
|
6711
7010
|
if (errorMessage !== "Disconnected") {
|
|
6712
7011
|
core.state.setError(errorMessage);
|
|
@@ -6716,6 +7015,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6716
7015
|
}
|
|
6717
7016
|
}
|
|
6718
7017
|
async handleCancelOperation() {
|
|
7018
|
+
// Clean up no-audio warning tracking
|
|
7019
|
+
this.cleanupNoAudioWarningTracking();
|
|
6719
7020
|
await core.getBackend().disconnect();
|
|
6720
7021
|
if (this.widgetState.recordingState === "error") {
|
|
6721
7022
|
core.state.clearError();
|
|
@@ -6745,7 +7046,7 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6745
7046
|
}
|
|
6746
7047
|
}
|
|
6747
7048
|
handleCloseWidget() {
|
|
6748
|
-
this.
|
|
7049
|
+
this.clearActionFeedback();
|
|
6749
7050
|
core.getBackend().stopAutoRefresh?.();
|
|
6750
7051
|
core.state.hide();
|
|
6751
7052
|
}
|
|
@@ -6836,45 +7137,70 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6836
7137
|
return;
|
|
6837
7138
|
}
|
|
6838
7139
|
const tagName = target.tagName.toLowerCase();
|
|
7140
|
+
const originalContent = this.getElementContent(target) || "";
|
|
6839
7141
|
if (tagName === "input" || tagName === "textarea") {
|
|
6840
7142
|
const inputEl = target;
|
|
7143
|
+
// Restore cursor position before inserting
|
|
6841
7144
|
const start = this.dictationCursorStart ?? inputEl.value.length;
|
|
6842
7145
|
const end = this.dictationCursorEnd ?? inputEl.value.length;
|
|
6843
|
-
|
|
6844
|
-
|
|
6845
|
-
inputEl
|
|
6846
|
-
if (this.supportsSelection(inputEl)) {
|
|
6847
|
-
const newCursorPos = start + text.length;
|
|
6848
|
-
inputEl.setSelectionRange(newCursorPos, newCursorPos);
|
|
6849
|
-
}
|
|
6850
|
-
inputEl.dispatchEvent(new Event("input", { bubbles: true }));
|
|
6851
|
-
inputEl.focus();
|
|
7146
|
+
inputEl.setSelectionRange(start, end);
|
|
7147
|
+
// Use text-field-edit to insert text (handles undo, events, etc.)
|
|
7148
|
+
insertTextIntoField(inputEl, text);
|
|
6852
7149
|
core.state.setFocusedElement(inputEl);
|
|
6853
7150
|
}
|
|
6854
7151
|
else if (target.isContentEditable) {
|
|
6855
7152
|
target.focus();
|
|
6856
7153
|
core.state.setFocusedElement(target);
|
|
6857
|
-
|
|
6858
|
-
target
|
|
6859
|
-
const selection = window.getSelection();
|
|
6860
|
-
if (selection) {
|
|
6861
|
-
const range = document.createRange();
|
|
6862
|
-
range.selectNodeContents(textNode);
|
|
6863
|
-
range.collapse(false);
|
|
6864
|
-
selection.removeAllRanges();
|
|
6865
|
-
selection.addRange(range);
|
|
6866
|
-
}
|
|
6867
|
-
target.dispatchEvent(new Event("input", { bubbles: true }));
|
|
7154
|
+
// Use text-field-edit for contentEditable elements
|
|
7155
|
+
insertTextIntoField(target, text);
|
|
6868
7156
|
}
|
|
6869
7157
|
core.events.emit("transcription:inserted", { text, element: target });
|
|
7158
|
+
// Verify insertion was applied after DOM updates
|
|
7159
|
+
this.verifyInsertionApplied(target, text, originalContent);
|
|
6870
7160
|
this.dictationTargetElement = null;
|
|
6871
7161
|
this.dictationCursorStart = null;
|
|
6872
7162
|
this.dictationCursorEnd = null;
|
|
6873
7163
|
}
|
|
7164
|
+
/**
|
|
7165
|
+
* Verify that a dictation insertion was actually applied to the target element.
|
|
7166
|
+
* Some custom editors (CodeMirror, Monaco, Slate, etc.) don't respond to
|
|
7167
|
+
* standard DOM editing methods. If the insertion fails, show a fallback modal.
|
|
7168
|
+
*/
|
|
7169
|
+
verifyInsertionApplied(target, insertedText, originalContent) {
|
|
7170
|
+
// Use requestAnimationFrame to check after DOM updates
|
|
7171
|
+
requestAnimationFrame(() => {
|
|
7172
|
+
const tagName = target.tagName.toLowerCase();
|
|
7173
|
+
let currentContent = "";
|
|
7174
|
+
if (tagName === "input" || tagName === "textarea") {
|
|
7175
|
+
currentContent = target.value;
|
|
7176
|
+
}
|
|
7177
|
+
else if (target.isContentEditable) {
|
|
7178
|
+
currentContent = target.textContent || "";
|
|
7179
|
+
}
|
|
7180
|
+
// Check if the insertion was applied:
|
|
7181
|
+
// - Content should contain the inserted text
|
|
7182
|
+
// - Or content should be different from original (for empty fields)
|
|
7183
|
+
const insertionApplied = currentContent.includes(insertedText) ||
|
|
7184
|
+
(originalContent === "" && currentContent !== "");
|
|
7185
|
+
if (!insertionApplied) {
|
|
7186
|
+
if (core.getConfig().debug) {
|
|
7187
|
+
console.log("[SpeechOS] Dictation failed to insert, showing fallback modal", {
|
|
7188
|
+
insertedText,
|
|
7189
|
+
currentContent,
|
|
7190
|
+
originalContent,
|
|
7191
|
+
});
|
|
7192
|
+
}
|
|
7193
|
+
// Show fallback modal with dictation mode styling
|
|
7194
|
+
this.dictationModalText = insertedText;
|
|
7195
|
+
this.dictationModalMode = "dictation";
|
|
7196
|
+
this.dictationModalOpen = true;
|
|
7197
|
+
}
|
|
7198
|
+
});
|
|
7199
|
+
}
|
|
6874
7200
|
handleActionSelect(event) {
|
|
6875
7201
|
const { action } = event.detail;
|
|
6876
7202
|
// Clear any existing command feedback when a new action is selected
|
|
6877
|
-
this.
|
|
7203
|
+
this.clearActionFeedback();
|
|
6878
7204
|
core.state.setActiveAction(action);
|
|
6879
7205
|
if (action === "dictate") {
|
|
6880
7206
|
this.startDictation();
|
|
@@ -6909,6 +7235,13 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6909
7235
|
this.dictationTargetElement = this.widgetState.focusedElement;
|
|
6910
7236
|
this.dictationCursorStart = null;
|
|
6911
7237
|
this.dictationCursorEnd = null;
|
|
7238
|
+
if (core.getConfig().debug) {
|
|
7239
|
+
console.log("[SpeechOS] startDictation:", {
|
|
7240
|
+
focusedElement: this.widgetState.focusedElement,
|
|
7241
|
+
dictationTargetElement: this.dictationTargetElement,
|
|
7242
|
+
tagName: this.dictationTargetElement?.tagName,
|
|
7243
|
+
});
|
|
7244
|
+
}
|
|
6912
7245
|
if (this.dictationTargetElement) {
|
|
6913
7246
|
const tagName = this.dictationTargetElement.tagName.toLowerCase();
|
|
6914
7247
|
if (tagName === "input" || tagName === "textarea") {
|
|
@@ -6934,13 +7267,18 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6934
7267
|
// Ensure minimum animation duration before transitioning to recording
|
|
6935
7268
|
const elapsed = Date.now() - connectingStartTime;
|
|
6936
7269
|
const remainingDelay = MIN_CONNECTING_ANIMATION_MS - elapsed;
|
|
7270
|
+
const startRecording = () => {
|
|
7271
|
+
if (core.state.getState().recordingState === "error") {
|
|
7272
|
+
return;
|
|
7273
|
+
}
|
|
7274
|
+
core.state.setRecordingState("recording");
|
|
7275
|
+
this.startNoAudioWarningTracking();
|
|
7276
|
+
};
|
|
6937
7277
|
if (remainingDelay > 0) {
|
|
6938
|
-
setTimeout(
|
|
6939
|
-
core.state.setRecordingState("recording");
|
|
6940
|
-
}, remainingDelay);
|
|
7278
|
+
setTimeout(startRecording, remainingDelay);
|
|
6941
7279
|
}
|
|
6942
7280
|
else {
|
|
6943
|
-
|
|
7281
|
+
startRecording();
|
|
6944
7282
|
}
|
|
6945
7283
|
},
|
|
6946
7284
|
});
|
|
@@ -6948,7 +7286,10 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6948
7286
|
catch (error) {
|
|
6949
7287
|
const errorMessage = error instanceof Error ? error.message : "Connection failed";
|
|
6950
7288
|
if (errorMessage !== "Disconnected") {
|
|
6951
|
-
|
|
7289
|
+
// Only set error if not already in error state (error event may have already set it)
|
|
7290
|
+
if (this.widgetState.recordingState !== "error") {
|
|
7291
|
+
core.state.setError(`Failed to connect: ${errorMessage}`);
|
|
7292
|
+
}
|
|
6952
7293
|
await backend.disconnect();
|
|
6953
7294
|
}
|
|
6954
7295
|
}
|
|
@@ -6958,6 +7299,13 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6958
7299
|
this.editSelectionStart = null;
|
|
6959
7300
|
this.editSelectionEnd = null;
|
|
6960
7301
|
this.editSelectedText = "";
|
|
7302
|
+
if (core.getConfig().debug) {
|
|
7303
|
+
console.log("[SpeechOS] startEdit:", {
|
|
7304
|
+
focusedElement: this.widgetState.focusedElement,
|
|
7305
|
+
editTargetElement: this.editTargetElement,
|
|
7306
|
+
tagName: this.editTargetElement?.tagName,
|
|
7307
|
+
});
|
|
7308
|
+
}
|
|
6961
7309
|
if (this.editTargetElement) {
|
|
6962
7310
|
const tagName = this.editTargetElement.tagName.toLowerCase();
|
|
6963
7311
|
if (tagName === "input" || tagName === "textarea") {
|
|
@@ -6968,7 +7316,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6968
7316
|
const start = this.editSelectionStart ?? 0;
|
|
6969
7317
|
const end = this.editSelectionEnd ?? 0;
|
|
6970
7318
|
if (start !== end) {
|
|
6971
|
-
|
|
7319
|
+
// Use getFieldSelection from text-field-edit
|
|
7320
|
+
this.editSelectedText = getFieldSelection(inputEl);
|
|
6972
7321
|
}
|
|
6973
7322
|
}
|
|
6974
7323
|
else {
|
|
@@ -6977,13 +7326,11 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
6977
7326
|
}
|
|
6978
7327
|
}
|
|
6979
7328
|
else if (this.editTargetElement.isContentEditable) {
|
|
6980
|
-
|
|
6981
|
-
|
|
6982
|
-
|
|
6983
|
-
|
|
6984
|
-
|
|
6985
|
-
this.editSelectedText = selectedText;
|
|
6986
|
-
}
|
|
7329
|
+
// Use getFieldSelection from text-field-edit for contentEditable too
|
|
7330
|
+
const selectedText = getFieldSelection(this.editTargetElement);
|
|
7331
|
+
this.editSelectionStart = 0;
|
|
7332
|
+
this.editSelectionEnd = selectedText.length;
|
|
7333
|
+
this.editSelectedText = selectedText;
|
|
6987
7334
|
}
|
|
6988
7335
|
}
|
|
6989
7336
|
// Capture the content to edit at start time (sent with auth message)
|
|
@@ -7000,13 +7347,18 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7000
7347
|
// Ensure minimum animation duration before transitioning to recording
|
|
7001
7348
|
const elapsed = Date.now() - connectingStartTime;
|
|
7002
7349
|
const remainingDelay = MIN_CONNECTING_ANIMATION_MS - elapsed;
|
|
7350
|
+
const startRecording = () => {
|
|
7351
|
+
if (core.state.getState().recordingState === "error") {
|
|
7352
|
+
return;
|
|
7353
|
+
}
|
|
7354
|
+
core.state.setRecordingState("recording");
|
|
7355
|
+
this.startNoAudioWarningTracking();
|
|
7356
|
+
};
|
|
7003
7357
|
if (remainingDelay > 0) {
|
|
7004
|
-
setTimeout(
|
|
7005
|
-
core.state.setRecordingState("recording");
|
|
7006
|
-
}, remainingDelay);
|
|
7358
|
+
setTimeout(startRecording, remainingDelay);
|
|
7007
7359
|
}
|
|
7008
7360
|
else {
|
|
7009
|
-
|
|
7361
|
+
startRecording();
|
|
7010
7362
|
}
|
|
7011
7363
|
},
|
|
7012
7364
|
});
|
|
@@ -7014,7 +7366,10 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7014
7366
|
catch (error) {
|
|
7015
7367
|
const errorMessage = error instanceof Error ? error.message : "Connection failed";
|
|
7016
7368
|
if (errorMessage !== "Disconnected") {
|
|
7017
|
-
|
|
7369
|
+
// Only set error if not already in error state (error event may have already set it)
|
|
7370
|
+
if (this.widgetState.recordingState !== "error") {
|
|
7371
|
+
core.state.setError(`Failed to connect: ${errorMessage}`);
|
|
7372
|
+
}
|
|
7018
7373
|
await backend.disconnect();
|
|
7019
7374
|
}
|
|
7020
7375
|
}
|
|
@@ -7025,12 +7380,30 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7025
7380
|
const backend = core.getBackend();
|
|
7026
7381
|
try {
|
|
7027
7382
|
const editedText = await this.withMinDisplayTime(backend.requestEditText(originalContent), 300);
|
|
7383
|
+
// Check if server returned no change (couldn't understand edit)
|
|
7384
|
+
const noChange = editedText.trim() === originalContent.trim();
|
|
7385
|
+
if (noChange) {
|
|
7386
|
+
this.trackActionResult(false);
|
|
7387
|
+
this.showActionFeedback("edit-empty");
|
|
7388
|
+
core.state.completeRecording();
|
|
7389
|
+
this.editTargetElement = null;
|
|
7390
|
+
this.editSelectionStart = null;
|
|
7391
|
+
this.editSelectionEnd = null;
|
|
7392
|
+
this.editSelectedText = "";
|
|
7393
|
+
backend.disconnect().catch(() => { });
|
|
7394
|
+
backend.startAutoRefresh?.();
|
|
7395
|
+
return;
|
|
7396
|
+
}
|
|
7397
|
+
// Track result - got a meaningful change
|
|
7398
|
+
this.trackActionResult(true);
|
|
7028
7399
|
this.applyEdit(editedText);
|
|
7029
7400
|
backend.disconnect().catch(() => { });
|
|
7030
7401
|
// Start auto-refresh to keep token fresh for subsequent commands (LiveKit only)
|
|
7031
7402
|
backend.startAutoRefresh?.();
|
|
7032
7403
|
}
|
|
7033
7404
|
catch (error) {
|
|
7405
|
+
// Track as failed result
|
|
7406
|
+
this.trackActionResult(false);
|
|
7034
7407
|
const errorMessage = error instanceof Error ? error.message : "Failed to apply edit";
|
|
7035
7408
|
if (errorMessage !== "Disconnected") {
|
|
7036
7409
|
core.state.setError(errorMessage);
|
|
@@ -7053,13 +7426,18 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7053
7426
|
// Ensure minimum animation duration before transitioning to recording
|
|
7054
7427
|
const elapsed = Date.now() - connectingStartTime;
|
|
7055
7428
|
const remainingDelay = MIN_CONNECTING_ANIMATION_MS - elapsed;
|
|
7429
|
+
const startRecording = () => {
|
|
7430
|
+
if (core.state.getState().recordingState === "error") {
|
|
7431
|
+
return;
|
|
7432
|
+
}
|
|
7433
|
+
core.state.setRecordingState("recording");
|
|
7434
|
+
this.startNoAudioWarningTracking();
|
|
7435
|
+
};
|
|
7056
7436
|
if (remainingDelay > 0) {
|
|
7057
|
-
setTimeout(
|
|
7058
|
-
core.state.setRecordingState("recording");
|
|
7059
|
-
}, remainingDelay);
|
|
7437
|
+
setTimeout(startRecording, remainingDelay);
|
|
7060
7438
|
}
|
|
7061
7439
|
else {
|
|
7062
|
-
|
|
7440
|
+
startRecording();
|
|
7063
7441
|
}
|
|
7064
7442
|
},
|
|
7065
7443
|
});
|
|
@@ -7067,7 +7445,10 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7067
7445
|
catch (error) {
|
|
7068
7446
|
const errorMessage = error instanceof Error ? error.message : "Connection failed";
|
|
7069
7447
|
if (errorMessage !== "Disconnected") {
|
|
7070
|
-
|
|
7448
|
+
// Only set error if not already in error state (error event may have already set it)
|
|
7449
|
+
if (this.widgetState.recordingState !== "error") {
|
|
7450
|
+
core.state.setError(`Failed to connect: ${errorMessage}`);
|
|
7451
|
+
}
|
|
7071
7452
|
await backend.disconnect();
|
|
7072
7453
|
}
|
|
7073
7454
|
}
|
|
@@ -7079,6 +7460,8 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7079
7460
|
const backend = core.getBackend();
|
|
7080
7461
|
try {
|
|
7081
7462
|
const result = await this.withMinDisplayTime(backend.requestCommand(commands), 300);
|
|
7463
|
+
// Track result - null result means no command matched (possibly no audio)
|
|
7464
|
+
this.trackActionResult(result !== null);
|
|
7082
7465
|
// Get input text from the backend if available
|
|
7083
7466
|
const inputText = backend.getLastInputText?.();
|
|
7084
7467
|
// Save to transcript store
|
|
@@ -7096,12 +7479,14 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7096
7479
|
// Keep widget visible but collapsed (just mic button, no action bubbles)
|
|
7097
7480
|
core.state.setState({ isExpanded: false });
|
|
7098
7481
|
// Show command feedback
|
|
7099
|
-
this.
|
|
7482
|
+
this.showActionFeedback(result ? "command-success" : "command-none");
|
|
7100
7483
|
backend.disconnect().catch(() => { });
|
|
7101
7484
|
// Start auto-refresh to keep token fresh for subsequent commands (LiveKit only)
|
|
7102
7485
|
backend.startAutoRefresh?.();
|
|
7103
7486
|
}
|
|
7104
7487
|
catch (error) {
|
|
7488
|
+
// Track as failed result
|
|
7489
|
+
this.trackActionResult(false);
|
|
7105
7490
|
const errorMessage = error instanceof Error ? error.message : "Failed to process command";
|
|
7106
7491
|
if (errorMessage !== "Disconnected") {
|
|
7107
7492
|
core.state.setError(errorMessage);
|
|
@@ -7109,24 +7494,110 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7109
7494
|
}
|
|
7110
7495
|
}
|
|
7111
7496
|
}
|
|
7112
|
-
|
|
7113
|
-
this.
|
|
7497
|
+
showActionFeedback(feedback) {
|
|
7498
|
+
this.actionFeedback = feedback;
|
|
7114
7499
|
// Clear any existing timeout
|
|
7115
|
-
if (this.
|
|
7116
|
-
clearTimeout(this.
|
|
7500
|
+
if (this.actionFeedbackTimeout) {
|
|
7501
|
+
clearTimeout(this.actionFeedbackTimeout);
|
|
7117
7502
|
}
|
|
7118
7503
|
// Auto-dismiss after 4 seconds
|
|
7119
|
-
this.
|
|
7120
|
-
this.
|
|
7121
|
-
this.
|
|
7504
|
+
this.actionFeedbackTimeout = window.setTimeout(() => {
|
|
7505
|
+
this.actionFeedback = null;
|
|
7506
|
+
this.actionFeedbackTimeout = null;
|
|
7122
7507
|
}, 4000);
|
|
7123
7508
|
}
|
|
7124
|
-
|
|
7125
|
-
if (this.
|
|
7126
|
-
clearTimeout(this.
|
|
7127
|
-
this.
|
|
7509
|
+
clearActionFeedback() {
|
|
7510
|
+
if (this.actionFeedbackTimeout) {
|
|
7511
|
+
clearTimeout(this.actionFeedbackTimeout);
|
|
7512
|
+
this.actionFeedbackTimeout = null;
|
|
7513
|
+
}
|
|
7514
|
+
this.actionFeedback = null;
|
|
7515
|
+
}
|
|
7516
|
+
/**
|
|
7517
|
+
* Start tracking for no-audio warning when recording begins.
|
|
7518
|
+
*/
|
|
7519
|
+
startNoAudioWarningTracking() {
|
|
7520
|
+
this.transcriptionReceived = false;
|
|
7521
|
+
this.showNoAudioWarning = false;
|
|
7522
|
+
// If we had consecutive failures, show warning immediately
|
|
7523
|
+
if (this.consecutiveNoAudioActions >= CONSECUTIVE_NO_AUDIO_THRESHOLD) {
|
|
7524
|
+
this.showNoAudioWarning = true;
|
|
7525
|
+
}
|
|
7526
|
+
// Start timeout - if no transcription within 5s, show warning
|
|
7527
|
+
this.noAudioWarningTimeout = window.setTimeout(() => {
|
|
7528
|
+
if (!this.transcriptionReceived &&
|
|
7529
|
+
this.widgetState.recordingState === "recording") {
|
|
7530
|
+
this.showNoAudioWarning = true;
|
|
7531
|
+
}
|
|
7532
|
+
}, NO_AUDIO_WARNING_TIMEOUT_MS);
|
|
7533
|
+
// Subscribe to transcription:interim events
|
|
7534
|
+
this.transcriptionInterimUnsubscribe = core.events.on("transcription:interim", () => {
|
|
7535
|
+
this.transcriptionReceived = true;
|
|
7536
|
+
if (this.showNoAudioWarning) {
|
|
7537
|
+
this.showNoAudioWarning = false;
|
|
7538
|
+
}
|
|
7539
|
+
});
|
|
7540
|
+
}
|
|
7541
|
+
/**
|
|
7542
|
+
* Clean up no-audio warning tracking when recording stops.
|
|
7543
|
+
*/
|
|
7544
|
+
cleanupNoAudioWarningTracking() {
|
|
7545
|
+
if (this.noAudioWarningTimeout !== null) {
|
|
7546
|
+
clearTimeout(this.noAudioWarningTimeout);
|
|
7547
|
+
this.noAudioWarningTimeout = null;
|
|
7128
7548
|
}
|
|
7129
|
-
this.
|
|
7549
|
+
if (this.transcriptionInterimUnsubscribe) {
|
|
7550
|
+
this.transcriptionInterimUnsubscribe();
|
|
7551
|
+
this.transcriptionInterimUnsubscribe = null;
|
|
7552
|
+
}
|
|
7553
|
+
this.showNoAudioWarning = false;
|
|
7554
|
+
}
|
|
7555
|
+
/**
|
|
7556
|
+
* Track the result of an action for consecutive failure detection.
|
|
7557
|
+
*/
|
|
7558
|
+
trackActionResult(hasContent) {
|
|
7559
|
+
if (hasContent) {
|
|
7560
|
+
this.consecutiveNoAudioActions = 0;
|
|
7561
|
+
}
|
|
7562
|
+
else {
|
|
7563
|
+
this.consecutiveNoAudioActions++;
|
|
7564
|
+
}
|
|
7565
|
+
}
|
|
7566
|
+
/**
|
|
7567
|
+
* Handle opening settings from the no-audio warning.
|
|
7568
|
+
* Stops the current dictation session immediately, then opens settings.
|
|
7569
|
+
*/
|
|
7570
|
+
async handleOpenSettingsFromWarning() {
|
|
7571
|
+
if (core.getConfig().debug) {
|
|
7572
|
+
console.log("[SpeechOS] No-audio settings link clicked");
|
|
7573
|
+
}
|
|
7574
|
+
// Clean up no-audio warning tracking first
|
|
7575
|
+
this.cleanupNoAudioWarningTracking();
|
|
7576
|
+
// Keep settings open even if widget collapses
|
|
7577
|
+
this.settingsOpenFromWarning = true;
|
|
7578
|
+
// Stop audio capture and disconnect immediately (don't wait for transcription)
|
|
7579
|
+
// Kick this off before opening settings so audio stops fast, but don't block UI.
|
|
7580
|
+
const disconnectPromise = core.getBackend().disconnect().catch((error) => {
|
|
7581
|
+
if (core.getConfig().debug) {
|
|
7582
|
+
console.log("[SpeechOS] Disconnect failed while opening settings", error);
|
|
7583
|
+
}
|
|
7584
|
+
});
|
|
7585
|
+
// Update UI state to idle
|
|
7586
|
+
core.state.cancelRecording();
|
|
7587
|
+
// Clear target elements
|
|
7588
|
+
this.dictationTargetElement = null;
|
|
7589
|
+
this.editTargetElement = null;
|
|
7590
|
+
this.dictationCursorStart = null;
|
|
7591
|
+
this.dictationCursorEnd = null;
|
|
7592
|
+
this.editSelectionStart = null;
|
|
7593
|
+
this.editSelectionEnd = null;
|
|
7594
|
+
this.editSelectedText = "";
|
|
7595
|
+
// Open settings modal
|
|
7596
|
+
this.settingsOpen = true;
|
|
7597
|
+
if (core.getConfig().debug) {
|
|
7598
|
+
console.log("[SpeechOS] Settings modal opened from no-audio warning");
|
|
7599
|
+
}
|
|
7600
|
+
await disconnectPromise;
|
|
7130
7601
|
}
|
|
7131
7602
|
supportsSelection(element) {
|
|
7132
7603
|
if (element.tagName.toLowerCase() === "textarea") {
|
|
@@ -7142,21 +7613,14 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7142
7613
|
const tagName = element.tagName.toLowerCase();
|
|
7143
7614
|
if (tagName === "input" || tagName === "textarea") {
|
|
7144
7615
|
const inputEl = element;
|
|
7145
|
-
const
|
|
7146
|
-
|
|
7147
|
-
|
|
7148
|
-
const hasSelection = start !== end;
|
|
7149
|
-
if (hasSelection) {
|
|
7150
|
-
return fullContent.substring(start, end);
|
|
7151
|
-
}
|
|
7152
|
-
return fullContent;
|
|
7616
|
+
const selectedText = getFieldSelection(inputEl);
|
|
7617
|
+
// If there's selected text, return it; otherwise return full content
|
|
7618
|
+
return selectedText || inputEl.value;
|
|
7153
7619
|
}
|
|
7154
7620
|
else if (element.isContentEditable) {
|
|
7155
|
-
const
|
|
7156
|
-
|
|
7157
|
-
|
|
7158
|
-
}
|
|
7159
|
-
return element.textContent || "";
|
|
7621
|
+
const selectedText = getFieldSelection(element);
|
|
7622
|
+
// If there's selected text, return it; otherwise return full content
|
|
7623
|
+
return selectedText || element.textContent || "";
|
|
7160
7624
|
}
|
|
7161
7625
|
return "";
|
|
7162
7626
|
}
|
|
@@ -7171,40 +7635,44 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7171
7635
|
if (tagName === "input" || tagName === "textarea") {
|
|
7172
7636
|
const inputEl = target;
|
|
7173
7637
|
originalContent = inputEl.value;
|
|
7174
|
-
|
|
7175
|
-
|
|
7176
|
-
|
|
7177
|
-
|
|
7178
|
-
|
|
7179
|
-
|
|
7180
|
-
|
|
7181
|
-
|
|
7182
|
-
else {
|
|
7183
|
-
inputEl.setSelectionRange(0, inputEl.value.length);
|
|
7184
|
-
}
|
|
7185
|
-
document.execCommand("insertText", false, editedText);
|
|
7638
|
+
// Restore the original selection/cursor position
|
|
7639
|
+
const selectionStart = this.editSelectionStart ?? 0;
|
|
7640
|
+
const selectionEnd = this.editSelectionEnd ?? inputEl.value.length;
|
|
7641
|
+
const hasSelection = selectionStart !== selectionEnd;
|
|
7642
|
+
if (hasSelection) {
|
|
7643
|
+
// Restore selection, then use insertTextIntoField() to replace it
|
|
7644
|
+
inputEl.setSelectionRange(selectionStart, selectionEnd);
|
|
7645
|
+
insertTextIntoField(inputEl, editedText);
|
|
7186
7646
|
}
|
|
7187
7647
|
else {
|
|
7188
|
-
|
|
7189
|
-
inputEl
|
|
7648
|
+
// No selection - replace entire content using setFieldText()
|
|
7649
|
+
setFieldText(inputEl, editedText);
|
|
7190
7650
|
}
|
|
7191
7651
|
core.state.setFocusedElement(inputEl);
|
|
7192
7652
|
}
|
|
7193
7653
|
else if (target.isContentEditable) {
|
|
7194
7654
|
originalContent = target.textContent || "";
|
|
7195
|
-
target.focus();
|
|
7196
|
-
core.state.setFocusedElement(target);
|
|
7197
7655
|
const hasSelection = this.editSelectionStart !== null &&
|
|
7198
7656
|
this.editSelectionEnd !== null &&
|
|
7199
7657
|
this.editSelectionStart !== this.editSelectionEnd;
|
|
7200
|
-
if (
|
|
7658
|
+
if (hasSelection) {
|
|
7659
|
+
// Selection exists - focus and insert (assumes selection is still active or we restore it)
|
|
7660
|
+
target.focus();
|
|
7661
|
+
insertTextIntoField(target, editedText);
|
|
7662
|
+
}
|
|
7663
|
+
else {
|
|
7664
|
+
// No selection - select all content first, then replace with insertTextIntoField()
|
|
7665
|
+
target.focus();
|
|
7201
7666
|
const selection = window.getSelection();
|
|
7202
|
-
|
|
7203
|
-
|
|
7204
|
-
|
|
7205
|
-
|
|
7667
|
+
if (selection) {
|
|
7668
|
+
const range = document.createRange();
|
|
7669
|
+
range.selectNodeContents(target);
|
|
7670
|
+
selection.removeAllRanges();
|
|
7671
|
+
selection.addRange(range);
|
|
7672
|
+
}
|
|
7673
|
+
insertTextIntoField(target, editedText);
|
|
7206
7674
|
}
|
|
7207
|
-
|
|
7675
|
+
core.state.setFocusedElement(target);
|
|
7208
7676
|
}
|
|
7209
7677
|
transcriptStore.saveTranscript(editedText, "edit", originalContent);
|
|
7210
7678
|
core.events.emit("edit:applied", {
|
|
@@ -7213,11 +7681,54 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7213
7681
|
element: target,
|
|
7214
7682
|
});
|
|
7215
7683
|
core.state.completeRecording();
|
|
7684
|
+
// Verify edit was applied after DOM updates
|
|
7685
|
+
this.verifyEditApplied(target, editedText, originalContent);
|
|
7216
7686
|
this.editTargetElement = null;
|
|
7217
7687
|
this.editSelectionStart = null;
|
|
7218
7688
|
this.editSelectionEnd = null;
|
|
7219
7689
|
this.editSelectedText = "";
|
|
7220
7690
|
}
|
|
7691
|
+
/**
|
|
7692
|
+
* Verify that an edit was actually applied to the target element.
|
|
7693
|
+
* Some custom editors (CodeMirror, Monaco, Slate, etc.) don't respond to
|
|
7694
|
+
* standard DOM editing methods. If the edit fails, show a fallback modal.
|
|
7695
|
+
*/
|
|
7696
|
+
verifyEditApplied(target, editedText, originalContent) {
|
|
7697
|
+
// Use requestAnimationFrame to check after DOM updates
|
|
7698
|
+
requestAnimationFrame(() => {
|
|
7699
|
+
const tagName = target.tagName.toLowerCase();
|
|
7700
|
+
let currentContent = "";
|
|
7701
|
+
if (tagName === "input" || tagName === "textarea") {
|
|
7702
|
+
currentContent = target.value;
|
|
7703
|
+
}
|
|
7704
|
+
else if (target.isContentEditable) {
|
|
7705
|
+
currentContent = target.textContent || "";
|
|
7706
|
+
}
|
|
7707
|
+
// Normalize whitespace for comparison
|
|
7708
|
+
const normalizedCurrent = currentContent.trim();
|
|
7709
|
+
const normalizedEdited = editedText.trim();
|
|
7710
|
+
const normalizedOriginal = originalContent.trim();
|
|
7711
|
+
// Check if the edit was applied:
|
|
7712
|
+
// - Content should be different from original (unless edit was no-op)
|
|
7713
|
+
// - Content should contain or match the edited text
|
|
7714
|
+
const editApplied = normalizedCurrent !== normalizedOriginal ||
|
|
7715
|
+
normalizedCurrent === normalizedEdited ||
|
|
7716
|
+
normalizedCurrent.includes(normalizedEdited);
|
|
7717
|
+
if (!editApplied) {
|
|
7718
|
+
if (core.getConfig().debug) {
|
|
7719
|
+
console.log("[SpeechOS] Edit failed to apply, showing fallback modal", {
|
|
7720
|
+
expected: editedText,
|
|
7721
|
+
actual: currentContent,
|
|
7722
|
+
original: originalContent,
|
|
7723
|
+
});
|
|
7724
|
+
}
|
|
7725
|
+
// Show fallback modal with edit mode styling
|
|
7726
|
+
this.dictationModalText = editedText;
|
|
7727
|
+
this.dictationModalMode = "edit";
|
|
7728
|
+
this.dictationModalOpen = true;
|
|
7729
|
+
}
|
|
7730
|
+
});
|
|
7731
|
+
}
|
|
7221
7732
|
render() {
|
|
7222
7733
|
if (!this.widgetState.isVisible) {
|
|
7223
7734
|
this.setAttribute("hidden", "");
|
|
@@ -7245,12 +7756,15 @@ let SpeechOSWidget = class SpeechOSWidget extends i$1 {
|
|
|
7245
7756
|
activeAction="${this.widgetState.activeAction || ""}"
|
|
7246
7757
|
editPreviewText="${this.editSelectedText}"
|
|
7247
7758
|
errorMessage="${this.widgetState.errorMessage || ""}"
|
|
7248
|
-
|
|
7759
|
+
?showRetryButton="${this.isErrorRetryable}"
|
|
7760
|
+
.actionFeedback="${this.actionFeedback}"
|
|
7761
|
+
?showNoAudioWarning="${this.showNoAudioWarning}"
|
|
7249
7762
|
@mic-click="${this.handleMicClick}"
|
|
7250
7763
|
@stop-recording="${this.handleStopRecording}"
|
|
7251
7764
|
@cancel-operation="${this.handleCancelOperation}"
|
|
7252
7765
|
@retry-connection="${this.handleRetryConnection}"
|
|
7253
7766
|
@close-widget="${this.handleCloseWidget}"
|
|
7767
|
+
@open-settings="${this.handleOpenSettingsFromWarning}"
|
|
7254
7768
|
></speechos-mic-button>
|
|
7255
7769
|
</div>
|
|
7256
7770
|
</div>
|
|
@@ -7269,12 +7783,21 @@ __decorate([
|
|
|
7269
7783
|
__decorate([
|
|
7270
7784
|
r()
|
|
7271
7785
|
], SpeechOSWidget.prototype, "dictationModalText", void 0);
|
|
7786
|
+
__decorate([
|
|
7787
|
+
r()
|
|
7788
|
+
], SpeechOSWidget.prototype, "dictationModalMode", void 0);
|
|
7272
7789
|
__decorate([
|
|
7273
7790
|
r()
|
|
7274
7791
|
], SpeechOSWidget.prototype, "editHelpModalOpen", void 0);
|
|
7275
7792
|
__decorate([
|
|
7276
7793
|
r()
|
|
7277
|
-
], SpeechOSWidget.prototype, "
|
|
7794
|
+
], SpeechOSWidget.prototype, "actionFeedback", void 0);
|
|
7795
|
+
__decorate([
|
|
7796
|
+
r()
|
|
7797
|
+
], SpeechOSWidget.prototype, "showNoAudioWarning", void 0);
|
|
7798
|
+
__decorate([
|
|
7799
|
+
r()
|
|
7800
|
+
], SpeechOSWidget.prototype, "isErrorRetryable", void 0);
|
|
7278
7801
|
SpeechOSWidget = SpeechOSWidget_1 = __decorate([
|
|
7279
7802
|
t$1("speechos-widget")
|
|
7280
7803
|
], SpeechOSWidget);
|