ui-soxo-bootstrap-core 2.6.25 → 2.6.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,51 +1,62 @@
1
- .action-buttons-layout {
2
- display: grid;
3
- grid-template-rows: minmax(0, 1fr) auto;
1
+ .action-buttons-shell {
2
+ display: flex;
3
+ flex-direction: column;
4
4
  flex: 1;
5
- min-height: 0;
6
- overflow: hidden;
5
+ min-height: 100%;
7
6
  }
8
7
 
9
- .action-buttons-content {
8
+ .action-body {
9
+ flex: 1 1 auto;
10
10
  min-height: 0;
11
- overflow-y: auto;
12
- overflow-x: hidden;
13
- overscroll-behavior: contain;
14
-
15
11
  padding-bottom: 8px;
12
+ overflow: auto;
13
+ }
14
+
15
+ .action-footer {
16
+ position: sticky;
17
+ bottom: 0;
18
+ margin-top: auto;
19
+ z-index: 8;
20
+ background: linear-gradient(180deg, rgba(251, 253, 255, 0) 0%, #fbfdff 32%, #fbfdff 100%);
21
+ padding-top: 12px;
16
22
  }
17
23
 
18
24
  .action-buttons-container {
19
- margin-top: 0;
20
- flex-shrink: 0;
21
- // display: flex;
25
+ display: flex;
22
26
  justify-content: flex-start;
23
27
  gap: 10px;
24
- // position: relative;
25
- z-index: 2;
26
- background: #fff;
27
- padding: 12px 0 10px;
28
- border-top: 1px solid #f0f0f0;
29
- box-shadow: 0 -2px 10px rgba(15, 23, 42, 0.04);
30
-
31
- width: 61%;
32
- padding: 10px;
33
- position: fixed;
34
- bottom: 10px;
35
- border: 1px solid #f1f1f1;
36
- margin: flx;
37
- display: flex;
38
- border-radius: 4px;
39
- box-shadow: -1px -4px 10px 2px #f7f7f76e;
28
+ padding: 12px 0 14px;
29
+ border-top: 1px solid #edf2f8;
40
30
  flex-wrap: wrap;
41
31
 
42
32
  .ant-btn {
43
- border-radius: 4px;
33
+ border-radius: 8px;
34
+ min-width: 110px;
35
+ font-weight: 600;
44
36
  }
45
37
  }
46
38
 
47
- .process-steps-page.is-fullscreen .action-buttons-container {
48
- width: calc(100% - 24px);
49
- left: 12px;
50
- right: 12px;
39
+ @media (max-width: 992px) {
40
+ .action-body {
41
+ min-height: 180px;
42
+ }
43
+
44
+ .action-footer {
45
+ position: static;
46
+ padding-top: 4px;
47
+ }
48
+
49
+ .action-buttons-container {
50
+ padding: 10px 0 0;
51
+ border-top: 0;
52
+ }
53
+ }
54
+
55
+ @media (max-width: 576px) {
56
+ .action-buttons-container {
57
+ .ant-btn {
58
+ flex: 1 1 calc(50% - 10px);
59
+ min-width: 0;
60
+ }
61
+ }
51
62
  }
@@ -0,0 +1,141 @@
1
+ const GEMINI_CHAT_MODEL = process.env.GEMINI_CHAT_MODEL || process.env.REACT_APP_GEMINI_CHAT_MODEL || 'gemini-2.5-flash';
2
+
3
+ const GEMINI_API_BASE_URL =
4
+ process.env.GEMINI_API_BASE_URL || process.env.REACT_APP_GEMINI_API_BASE_URL || 'https://generativelanguage.googleapis.com/v1beta';
5
+
6
+ function getGeminiApiKey() {
7
+ if (process.env.GEMINI_API_KEY) {
8
+ return process.env.GEMINI_API_KEY;
9
+ }
10
+
11
+ if (process.env.REACT_APP_GEMINI_API_KEY) {
12
+ return process.env.REACT_APP_GEMINI_API_KEY;
13
+ }
14
+
15
+ if (typeof window !== 'undefined') {
16
+ try {
17
+ if (window.localStorage) {
18
+ return window.localStorage.getItem('gemini_api_key');
19
+ }
20
+ } catch (error) {
21
+ return null;
22
+ }
23
+ }
24
+
25
+ return null;
26
+ }
27
+
28
+ function normalizeHistory(history = []) {
29
+ return history.slice(-10).map((item) => ({
30
+ role: item.role === 'assistant' ? 'model' : 'user',
31
+ parts: [{ text: item.text }],
32
+ }));
33
+ }
34
+
35
+ function extractText(payload) {
36
+ const candidates = payload && payload.candidates ? payload.candidates : [];
37
+
38
+ for (const candidate of candidates) {
39
+ const parts = candidate && candidate.content && candidate.content.parts ? candidate.content.parts : [];
40
+
41
+ const text = parts
42
+ .map((part) => (part && typeof part.text === 'string' ? part.text : ''))
43
+ .join(' ')
44
+ .trim();
45
+
46
+ if (text) {
47
+ return text;
48
+ }
49
+ }
50
+
51
+ return '';
52
+ }
53
+
54
+ async function generateText({ prompt, history = [], temperature = 0.65, maxOutputTokens = 360 }) {
55
+ const apiKey = getGeminiApiKey();
56
+
57
+ if (!apiKey) {
58
+ throw new Error('Gemini API key is missing');
59
+ }
60
+
61
+ const endpoint = `${GEMINI_API_BASE_URL}/models/${GEMINI_CHAT_MODEL}:generateContent?key=${encodeURIComponent(apiKey)}`;
62
+ const contents = [...normalizeHistory(history), { role: 'user', parts: [{ text: prompt }] }];
63
+
64
+ const response = await fetch(endpoint, {
65
+ method: 'POST',
66
+ headers: {
67
+ 'Content-Type': 'application/json',
68
+ },
69
+ body: JSON.stringify({
70
+ contents,
71
+ generationConfig: {
72
+ temperature,
73
+ maxOutputTokens,
74
+ },
75
+ }),
76
+ });
77
+
78
+ if (!response.ok) {
79
+ throw new Error(`Gemini chat request failed with status ${response.status}`);
80
+ }
81
+
82
+ const payload = await response.json();
83
+ const text = extractText(payload);
84
+
85
+ if (!text) {
86
+ throw new Error('Gemini chat response did not include text');
87
+ }
88
+
89
+ return text;
90
+ }
91
+
92
+ export async function generateStepAssistantMessage({ step, index, total, fallbackText, history = [] }) {
93
+ const stepName = (step && step.step_name) || `Step ${index + 1}`;
94
+ const stepDescription = (step && step.step_description) || '';
95
+
96
+ const prompt = [
97
+ 'You are a warm healthcare concierge assistant inside a step-by-step consultation app.',
98
+ 'Write a short, patient-friendly message in 3-4 sentences.',
99
+ 'It must be reassuring, clear, and engaging.',
100
+ `Current step: ${index + 1} of ${total}.`,
101
+ `Step title: ${stepName}.`,
102
+ `Step details: ${stepDescription || 'No additional description'}.`,
103
+ 'Explain what the guest can expect in this step and what happens next.',
104
+ 'Avoid medical claims and avoid markdown bullets.',
105
+ ].join(' ');
106
+
107
+ try {
108
+ return await generateText({
109
+ prompt,
110
+ history,
111
+ temperature: 0.7,
112
+ maxOutputTokens: 260,
113
+ });
114
+ } catch (error) {
115
+ return fallbackText;
116
+ }
117
+ }
118
+
119
+ export async function generateChatAssistantReply({ step, index, total, userMessage, history = [] }) {
120
+ const stepName = (step && step.step_name) || `Step ${index + 1}`;
121
+ const stepDescription = (step && step.step_description) || '';
122
+
123
+ const prompt = [
124
+ 'You are a conversational healthcare onboarding assistant inside a guided consultation workflow.',
125
+ 'Respond in 2-5 short sentences.',
126
+ 'Stay patient-friendly, practical, and calm.',
127
+ `Current step: ${index + 1} of ${total}.`,
128
+ `Current step title: ${stepName}.`,
129
+ `Current step description: ${stepDescription || 'No additional description'}.`,
130
+ `Guest message: "${userMessage}".`,
131
+ 'If the user asks about next or previous steps, explain the flow simply.',
132
+ 'Do not provide diagnosis or treatment advice.',
133
+ ].join(' ');
134
+
135
+ return generateText({
136
+ prompt,
137
+ history,
138
+ temperature: 0.6,
139
+ maxOutputTokens: 320,
140
+ });
141
+ }
@@ -0,0 +1,275 @@
1
+ const DEFAULT_MODEL =
2
+ process.env.OPENAI_REALTIME_MODEL ||
3
+ process.env.REACT_APP_OPENAI_REALTIME_MODEL ||
4
+ 'gpt-realtime';
5
+ const DEFAULT_VOICE =
6
+ process.env.OPENAI_REALTIME_VOICE ||
7
+ process.env.REACT_APP_OPENAI_REALTIME_VOICE ||
8
+ 'alloy';
9
+ const DEFAULT_CALLS_ENDPOINT =
10
+ process.env.OPENAI_REALTIME_ENDPOINT ||
11
+ process.env.REACT_APP_OPENAI_REALTIME_ENDPOINT ||
12
+ 'https://api.openai.com/v1/realtime/calls';
13
+ const DEFAULT_TOKEN_ENDPOINT =
14
+ process.env.OPENAI_REALTIME_TOKEN_ENDPOINT ||
15
+ process.env.REACT_APP_OPENAI_REALTIME_TOKEN_ENDPOINT ||
16
+ '';
17
+ const DEFAULT_INSTRUCTIONS =
18
+ process.env.OPENAI_REALTIME_INSTRUCTIONS ||
19
+ process.env.REACT_APP_OPENAI_REALTIME_INSTRUCTIONS ||
20
+ 'You are a warm, concise healthcare concierge assisting a guest during a guided process.';
21
+
22
+ function getFromStorage(storageKey) {
23
+ if (typeof window === 'undefined' || !window.localStorage) {
24
+ return null;
25
+ }
26
+
27
+ try {
28
+ return window.localStorage.getItem(storageKey);
29
+ } catch (error) {
30
+ return null;
31
+ }
32
+ }
33
+
34
+ function resolveOpenAIKey() {
35
+ return (
36
+ process.env.OPENAI_API_KEY ||
37
+ process.env.OPEN_AI_KEY ||
38
+ process.env.REACT_APP_OPENAI_API_KEY ||
39
+ process.env.REACT_APP_OPEN_AI_KEY ||
40
+ getFromStorage('openai_api_key') ||
41
+ getFromStorage('open_ai_key') ||
42
+ getFromStorage('OPENAI_API_KEY') ||
43
+ getFromStorage('OPEN_AI_KEY') ||
44
+ getFromStorage('REACT_APP_OPENAI_API_KEY') ||
45
+ getFromStorage('REACT_APP_OPEN_AI_KEY') ||
46
+ null
47
+ );
48
+ }
49
+
50
+ async function fetchEphemeralKey(tokenEndpoint) {
51
+ const response = await fetch(tokenEndpoint, {
52
+ method: 'GET',
53
+ headers: { 'Content-Type': 'application/json' },
54
+ });
55
+
56
+ if (!response.ok) {
57
+ throw new Error(`OpenAI token endpoint failed with status ${response.status}`);
58
+ }
59
+
60
+ const payload = await response.json().catch(() => null);
61
+
62
+ return (
63
+ payload?.client_secret?.value ||
64
+ payload?.session?.client_secret?.value ||
65
+ payload?.value ||
66
+ null
67
+ );
68
+ }
69
+
70
+ async function resolveRealtimeKey(tokenEndpoint) {
71
+ if (tokenEndpoint) {
72
+ const key = await fetchEphemeralKey(tokenEndpoint);
73
+ if (key) {
74
+ return key;
75
+ }
76
+
77
+ throw new Error('OpenAI token endpoint did not return a client secret');
78
+ }
79
+
80
+ const key = resolveOpenAIKey();
81
+ if (key) {
82
+ return key;
83
+ }
84
+
85
+ throw new Error('OpenAI Realtime credentials are missing. Set OPEN_AI_KEY or configure OPENAI_REALTIME_TOKEN_ENDPOINT.');
86
+ }
87
+
88
+ export function hasOpenAIRealtimeCredentials(tokenEndpoint = DEFAULT_TOKEN_ENDPOINT) {
89
+ if (tokenEndpoint) {
90
+ return true;
91
+ }
92
+
93
+ return Boolean(resolveOpenAIKey());
94
+ }
95
+
96
+ export function createOpenAIRealtimeSession({
97
+ model = DEFAULT_MODEL,
98
+ voice = DEFAULT_VOICE,
99
+ instructions = DEFAULT_INSTRUCTIONS,
100
+ callsEndpoint = DEFAULT_CALLS_ENDPOINT,
101
+ tokenEndpoint = DEFAULT_TOKEN_ENDPOINT,
102
+ onEvent = () => {},
103
+ onStatus = () => {},
104
+ onError = () => {},
105
+ } = {}) {
106
+ let peerConnection = null;
107
+ let dataChannel = null;
108
+ let localStream = null;
109
+ let audioElement = null;
110
+
111
+ const state = {
112
+ status: 'idle',
113
+ };
114
+
115
+ function updateStatus(status) {
116
+ state.status = status;
117
+ onStatus(status);
118
+ }
119
+
120
+ function sendEvent(eventPayload) {
121
+ if (!dataChannel || dataChannel.readyState !== 'open') {
122
+ return;
123
+ }
124
+
125
+ dataChannel.send(JSON.stringify(eventPayload));
126
+ }
127
+
128
+ function configureSession() {
129
+ sendEvent({
130
+ type: 'session.update',
131
+ session: {
132
+ instructions,
133
+ output_modalities: ['audio'],
134
+ audio: {
135
+ output: {
136
+ voice,
137
+ },
138
+ input: {
139
+ turn_detection: {
140
+ type: 'semantic_vad',
141
+ create_response: true,
142
+ interrupt_response: true,
143
+ },
144
+ },
145
+ },
146
+ },
147
+ });
148
+ }
149
+
150
+ async function connect() {
151
+ if (peerConnection) {
152
+ return;
153
+ }
154
+
155
+ updateStatus('connecting');
156
+
157
+ try {
158
+ if (typeof window === 'undefined' || typeof navigator === 'undefined') {
159
+ throw new Error('OpenAI Realtime requires a browser environment');
160
+ }
161
+
162
+ if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
163
+ throw new Error('Microphone access is not available in this browser');
164
+ }
165
+
166
+ const token = await resolveRealtimeKey(tokenEndpoint);
167
+
168
+ peerConnection = new RTCPeerConnection();
169
+ dataChannel = peerConnection.createDataChannel('oai-events');
170
+
171
+ dataChannel.addEventListener('message', (event) => {
172
+ try {
173
+ const payload = JSON.parse(event.data);
174
+ onEvent(payload);
175
+ } catch (error) {
176
+ onError(error);
177
+ }
178
+ });
179
+
180
+ dataChannel.addEventListener('open', () => {
181
+ configureSession();
182
+ });
183
+
184
+ localStream = await navigator.mediaDevices.getUserMedia({ audio: true });
185
+ localStream.getTracks().forEach((track) => peerConnection.addTrack(track, localStream));
186
+
187
+ audioElement = document.createElement('audio');
188
+ audioElement.autoplay = true;
189
+
190
+ peerConnection.addEventListener('track', (event) => {
191
+ const stream = event.streams?.[0];
192
+ if (stream && audioElement.srcObject !== stream) {
193
+ audioElement.srcObject = stream;
194
+ }
195
+ });
196
+
197
+ const offer = await peerConnection.createOffer();
198
+ await peerConnection.setLocalDescription(offer);
199
+
200
+ const response = await fetch(callsEndpoint, {
201
+ method: 'POST',
202
+ headers: {
203
+ Authorization: `Bearer ${token}`,
204
+ 'Content-Type': 'application/sdp',
205
+ },
206
+ body: offer.sdp,
207
+ });
208
+
209
+ if (!response.ok) {
210
+ throw new Error(`OpenAI Realtime SDP exchange failed with status ${response.status}`);
211
+ }
212
+
213
+ const answer = await response.text();
214
+ await peerConnection.setRemoteDescription({ type: 'answer', sdp: answer });
215
+
216
+ updateStatus('connected');
217
+ } catch (error) {
218
+ updateStatus('error');
219
+ onError(error);
220
+ disconnect();
221
+ throw error;
222
+ }
223
+ }
224
+
225
+ function disconnect() {
226
+ updateStatus('idle');
227
+
228
+ if (dataChannel) {
229
+ dataChannel.close();
230
+ dataChannel = null;
231
+ }
232
+
233
+ if (peerConnection) {
234
+ peerConnection.close();
235
+ peerConnection = null;
236
+ }
237
+
238
+ if (localStream) {
239
+ localStream.getTracks().forEach((track) => track.stop());
240
+ localStream = null;
241
+ }
242
+
243
+ if (audioElement) {
244
+ audioElement.srcObject = null;
245
+ audioElement = null;
246
+ }
247
+ }
248
+
249
+ function sendText(text) {
250
+ if (!text) {
251
+ return;
252
+ }
253
+
254
+ sendEvent({
255
+ type: 'conversation.item.create',
256
+ item: {
257
+ type: 'message',
258
+ role: 'user',
259
+ content: [{ type: 'input_text', text }],
260
+ },
261
+ });
262
+
263
+ sendEvent({ type: 'response.create' });
264
+ }
265
+
266
+ return {
267
+ connect,
268
+ disconnect,
269
+ sendText,
270
+ sendEvent,
271
+ get status() {
272
+ return state.status;
273
+ },
274
+ };
275
+ }
@@ -0,0 +1,167 @@
1
+ ## ProcessStepsPage
2
+
3
+ `ProcessStepsPage` is a generic multi-step process runner used to render and track workflow steps.
4
+
5
+ File: `core/modules/steps/steps.js`
6
+
7
+ ## What It Does
8
+
9
+ - Loads process steps from `Dashboard.loadProcess(processId)`.
10
+ - Dynamically renders step components using `related_page`.
11
+ - Tracks step and process timings.
12
+ - Supports `Next`, `Back`, `Skip`, `Finish`, and optional next-process start.
13
+ - Supports timeline click navigation.
14
+ - Supports keyboard navigation with `ArrowLeft` (previous) and `ArrowRight` (next).
15
+ - Supports voice-controlled navigation through Gemini Live API command interpretation.
16
+ - Supports step-level text-to-speech narration with optional auto narration.
17
+ - Optionally opens process content in an external window.
18
+
19
+ ## Export
20
+
21
+ `ProcessStepsPage` is exported from:
22
+
23
+ ```js
24
+ core/modules/index.js
25
+ ```
26
+
27
+ and can be imported as:
28
+
29
+ ```jsx
30
+ import { ProcessStepsPage } from '.../core/modules';
31
+ ```
32
+
33
+ ## Props
34
+
35
+ - `match`: Router match object.
36
+ - `CustomComponents`: Map of custom step components keyed by component name.
37
+ - `showExternalWindow`: Boolean to enable dual render in popup + main window after chaining process.
38
+ - `ExternalWindowWidth`: Popup width.
39
+ - `ExternalWindowHeight`: Popup height.
40
+ - `history`: Router history (used for goBack on completion).
41
+
42
+ All additional props are forwarded to dynamic step components.
43
+
44
+ ## URL Params Used
45
+
46
+ `Location.search()` is used to read:
47
+
48
+ - `processId`: Initial process to load.
49
+ - `opb_id` or `reference_id`: Reference id for process log.
50
+ - `opno` or `reference_number`: Reference number for process log.
51
+ - `mode`: Passed to process log payload.
52
+
53
+ ## Step Contract (Expected Data Shape)
54
+
55
+ Each step returned by backend is expected to include:
56
+
57
+ - `step_id`: Unique step id.
58
+ - `step_name`: Step title shown in UI.
59
+ - `step_description`: Step subtitle shown in UI.
60
+ - `related_page`: Component key to resolve from `CustomComponents` + generic components.
61
+ - `config`: Optional props object spread into the dynamic step component.
62
+ - `is_mandatory`: If `true`, step requires `onStepComplete()` before `Next` is enabled.
63
+ - `allow_skip`: `'Y'` enables Skip button.
64
+ - `order_seqtype`: `'E'` marks end-step and shows `Finish`.
65
+
66
+ ## Dynamic Step Rendering
67
+
68
+ The active step component is resolved by:
69
+
70
+ - `allComponents[step.related_page]`
71
+
72
+ and rendered with:
73
+
74
+ - `step`
75
+ - `params` (parsed URL params)
76
+ - `onStepComplete` callback
77
+ - forwarded parent props
78
+ - `step.config` spread values
79
+
80
+ If no step exists, an empty state is shown.
81
+ If component resolution fails, an empty state with missing component name is shown.
82
+
83
+ ## Timing Persistence
84
+
85
+ Timings are stored in localStorage per process:
86
+
87
+ - key: `processTimings_<processId>`
88
+
89
+ Each timing entry:
90
+
91
+ - `step_id`
92
+ - `start_time`
93
+ - `end_time`
94
+ - `duration` (ms)
95
+ - `status` (`completed` or `skipped`)
96
+
97
+ On successful `Dashboard.processLog(payload)`, timings are cleared.
98
+ On failure, timings are retained in localStorage.
99
+
100
+ ## Voice Control
101
+
102
+ Voice commands supported:
103
+
104
+ - `next`
105
+ - `previous` / `back`
106
+ - `skip`
107
+ - `finish`
108
+ - `repeat`
109
+ - `go to step <number>`
110
+
111
+ Voice flow:
112
+
113
+ - Browser speech recognition captures spoken command.
114
+ - Transcript is sent to Gemini Live API over WebSocket.
115
+ - Gemini returns normalized navigation action.
116
+ - Step navigation executes in `ProcessStepsPage`.
117
+
118
+ Step narration:
119
+
120
+ - Uses AI TTS providers (Gemini / ElevenLabs).
121
+ - Can be triggered with `Read Step`.
122
+ - Auto narration can be toggled on/off.
123
+
124
+ ### Voice Configuration
125
+
126
+ - `REACT_APP_GEMINI_API_KEY`: Gemini API key (required for Gemini Live).
127
+ - `REACT_APP_GEMINI_LIVE_MODEL`: Live model name (default: `gemini-live-2.5-flash-preview`).
128
+ - `REACT_APP_GEMINI_LIVE_WS_ENDPOINT`: Optional custom Live WebSocket endpoint.
129
+ - `REACT_APP_VOICE_COMMAND_LANG`: Speech recognition language (default: `en-US`).
130
+ - `ELEVEN_LABS_KEY` or `ELEVENLABS_API_KEY`: ElevenLabs API key for narration provider `elevenlabs`.
131
+ - `REACT_APP_ELEVENLABS_API_KEY`: Browser-safe ElevenLabs API key alternative.
132
+ - `ELEVENLABS_VOICE_ID` / `ELEVEN_LABS_VOICE_ID` / `REACT_APP_ELEVENLABS_VOICE_ID`: Optional ElevenLabs voice id.
133
+ - `REACT_APP_STEP_TTS_LANG`: Narration language (default: `en-US`).
134
+ - `REACT_APP_STEP_TTS_RATE`: Narration speed (default: `1`).
135
+ - `REACT_APP_STEP_TTS_PITCH`: Narration pitch (default: `1`).
136
+ - `REACT_APP_OPENAI_REALTIME_TOKEN_ENDPOINT`: Backend endpoint that returns an OpenAI Realtime client secret for WebRTC.
137
+ - `REACT_APP_OPENAI_REALTIME_MODEL`: Realtime model name (default: `gpt-realtime`).
138
+ - `REACT_APP_OPENAI_REALTIME_VOICE`: Realtime voice name (default: `alloy`).
139
+ - `REACT_APP_OPENAI_REALTIME_INSTRUCTIONS`: Optional system instructions for the conversation session.
140
+ - `OPEN_AI_KEY` / `OPENAI_API_KEY`: OpenAI API key used when token endpoint is not configured.
141
+
142
+ ## Minimal Usage Example
143
+
144
+ ```jsx
145
+ import React from 'react';
146
+ import { ProcessStepsPage } from '../../modules';
147
+ import SampleDetail from '../../modules/lab/components/sample-detail/sample-detail';
148
+ import ResultDetail from '../../modules/lab/components/result-detail/result-detail';
149
+
150
+ const CustomComponents = {
151
+ SampleDetail,
152
+ ResultDetail,
153
+ };
154
+
155
+ export default function ProcessPage(props) {
156
+ return <ProcessStepsPage {...props} CustomComponents={CustomComponents} showExternalWindow />;
157
+ }
158
+ ```
159
+
160
+ ## Related Files
161
+
162
+ - `core/modules/steps/steps.js`
163
+ - `core/modules/steps/timeline.js`
164
+ - `core/modules/steps/action-buttons.js`
165
+ - `core/modules/steps/voice-navigation.js`
166
+ - `core/modules/steps/openai-realtime.js`
167
+ - `core/modules/steps/steps.scss`