@tiny-codes/react-easy 1.4.6 → 1.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. package/CHANGELOG.md +10 -0
  2. package/es/assets/request-camera-en.js +2 -0
  3. package/es/assets/request-camera-en.js.d.ts +2 -0
  4. package/es/assets/request-camera-en.js.map +1 -0
  5. package/es/assets/request-camera-en.png +0 -0
  6. package/es/assets/request-camera-zh.js +2 -0
  7. package/es/assets/request-camera-zh.js.d.ts +2 -0
  8. package/es/assets/request-camera-zh.js.map +1 -0
  9. package/es/assets/request-camera-zh.png +0 -0
  10. package/es/assets/request-microphone-en.js +2 -0
  11. package/es/assets/request-microphone-en.js.d.ts +2 -0
  12. package/es/assets/request-microphone-en.js.map +1 -0
  13. package/es/assets/request-microphone-en.png +0 -0
  14. package/es/assets/request-microphone-zh.js +2 -0
  15. package/es/assets/request-microphone-zh.js.d.ts +2 -0
  16. package/es/assets/request-microphone-zh.js.map +1 -0
  17. package/es/assets/request-microphone-zh.png +0 -0
  18. package/es/assets/reset-camera-en.js +2 -0
  19. package/es/assets/reset-camera-en.js.d.ts +2 -0
  20. package/es/assets/reset-camera-en.js.map +1 -0
  21. package/es/assets/reset-camera-en.png +0 -0
  22. package/es/assets/reset-camera-zh.js +2 -0
  23. package/es/assets/reset-camera-zh.js.d.ts +2 -0
  24. package/es/assets/reset-camera-zh.js.map +1 -0
  25. package/es/assets/reset-camera-zh.png +0 -0
  26. package/es/assets/reset-microphone-en.js +2 -0
  27. package/es/assets/reset-microphone-en.js.d.ts +2 -0
  28. package/es/assets/reset-microphone-en.js.map +1 -0
  29. package/es/assets/reset-microphone-en.png +0 -0
  30. package/es/assets/reset-microphone-zh.js +2 -0
  31. package/es/assets/reset-microphone-zh.js.d.ts +2 -0
  32. package/es/assets/reset-microphone-zh.js.map +1 -0
  33. package/es/assets/reset-microphone-zh.png +0 -0
  34. package/es/assets/save-default-audio1-en.js +2 -0
  35. package/es/assets/save-default-audio1-en.js.d.ts +2 -0
  36. package/es/assets/save-default-audio1-en.js.map +1 -0
  37. package/es/assets/save-default-audio1-en.png +0 -0
  38. package/es/assets/save-default-audio1-zh.js +2 -0
  39. package/es/assets/save-default-audio1-zh.js.d.ts +2 -0
  40. package/es/assets/save-default-audio1-zh.js.map +1 -0
  41. package/es/assets/save-default-audio1-zh.png +0 -0
  42. package/es/assets/save-default-audio2-en.js +2 -0
  43. package/es/assets/save-default-audio2-en.js.d.ts +2 -0
  44. package/es/assets/save-default-audio2-en.js.map +1 -0
  45. package/es/assets/save-default-audio2-en.png +0 -0
  46. package/es/assets/save-default-audio2-zh.js +2 -0
  47. package/es/assets/save-default-audio2-zh.js.d.ts +2 -0
  48. package/es/assets/save-default-audio2-zh.js.map +1 -0
  49. package/es/assets/save-default-audio2-zh.png +0 -0
  50. package/es/components/ConfigProvider/index.js +1 -0
  51. package/es/components/ConfigProvider/index.js.map +1 -1
  52. package/es/hooks/index.d.ts +1 -0
  53. package/es/hooks/index.js +1 -0
  54. package/es/hooks/index.js.map +1 -1
  55. package/es/hooks/useRefValue.d.ts +2 -2
  56. package/es/hooks/useRefValue.js.map +1 -1
  57. package/es/hooks/useUserMedia.d.ts +104 -0
  58. package/es/hooks/useUserMedia.js +799 -0
  59. package/es/hooks/useUserMedia.js.map +1 -0
  60. package/es/locales/index.d.ts +51 -0
  61. package/es/locales/langs/en-US.d.ts +17 -0
  62. package/es/locales/langs/en-US.js +18 -1
  63. package/es/locales/langs/en-US.js.map +1 -1
  64. package/es/locales/langs/zh-CN.d.ts +17 -0
  65. package/es/locales/langs/zh-CN.js +18 -1
  66. package/es/locales/langs/zh-CN.js.map +1 -1
  67. package/es/utils/stream.d.ts +68 -0
  68. package/es/utils/stream.js +122 -0
  69. package/es/utils/stream.js.map +1 -0
  70. package/lib/assets/request-camera-en.js +26 -0
  71. package/lib/assets/request-camera-en.js.d.ts +2 -0
  72. package/lib/assets/request-camera-en.js.map +7 -0
  73. package/lib/assets/request-camera-en.png +0 -0
  74. package/lib/assets/request-camera-zh.js +26 -0
  75. package/lib/assets/request-camera-zh.js.d.ts +2 -0
  76. package/lib/assets/request-camera-zh.js.map +7 -0
  77. package/lib/assets/request-camera-zh.png +0 -0
  78. package/lib/assets/request-microphone-en.js +26 -0
  79. package/lib/assets/request-microphone-en.js.d.ts +2 -0
  80. package/lib/assets/request-microphone-en.js.map +7 -0
  81. package/lib/assets/request-microphone-en.png +0 -0
  82. package/lib/assets/request-microphone-zh.js +26 -0
  83. package/lib/assets/request-microphone-zh.js.d.ts +2 -0
  84. package/lib/assets/request-microphone-zh.js.map +7 -0
  85. package/lib/assets/request-microphone-zh.png +0 -0
  86. package/lib/assets/reset-camera-en.js +26 -0
  87. package/lib/assets/reset-camera-en.js.d.ts +2 -0
  88. package/lib/assets/reset-camera-en.js.map +7 -0
  89. package/lib/assets/reset-camera-en.png +0 -0
  90. package/lib/assets/reset-camera-zh.js +26 -0
  91. package/lib/assets/reset-camera-zh.js.d.ts +2 -0
  92. package/lib/assets/reset-camera-zh.js.map +7 -0
  93. package/lib/assets/reset-camera-zh.png +0 -0
  94. package/lib/assets/reset-microphone-en.js +26 -0
  95. package/lib/assets/reset-microphone-en.js.d.ts +2 -0
  96. package/lib/assets/reset-microphone-en.js.map +7 -0
  97. package/lib/assets/reset-microphone-en.png +0 -0
  98. package/lib/assets/reset-microphone-zh.js +26 -0
  99. package/lib/assets/reset-microphone-zh.js.d.ts +2 -0
  100. package/lib/assets/reset-microphone-zh.js.map +7 -0
  101. package/lib/assets/reset-microphone-zh.png +0 -0
  102. package/lib/assets/save-default-audio1-en.js +26 -0
  103. package/lib/assets/save-default-audio1-en.js.d.ts +2 -0
  104. package/lib/assets/save-default-audio1-en.js.map +7 -0
  105. package/lib/assets/save-default-audio1-en.png +0 -0
  106. package/lib/assets/save-default-audio1-zh.js +26 -0
  107. package/lib/assets/save-default-audio1-zh.js.d.ts +2 -0
  108. package/lib/assets/save-default-audio1-zh.js.map +7 -0
  109. package/lib/assets/save-default-audio1-zh.png +0 -0
  110. package/lib/assets/save-default-audio2-en.js +26 -0
  111. package/lib/assets/save-default-audio2-en.js.d.ts +2 -0
  112. package/lib/assets/save-default-audio2-en.js.map +7 -0
  113. package/lib/assets/save-default-audio2-en.png +0 -0
  114. package/lib/assets/save-default-audio2-zh.js +26 -0
  115. package/lib/assets/save-default-audio2-zh.js.d.ts +2 -0
  116. package/lib/assets/save-default-audio2-zh.js.map +7 -0
  117. package/lib/assets/save-default-audio2-zh.png +0 -0
  118. package/lib/components/ConfigProvider/index.js +1 -0
  119. package/lib/components/ConfigProvider/index.js.map +2 -2
  120. package/lib/hooks/index.d.ts +1 -0
  121. package/lib/hooks/index.js +3 -0
  122. package/lib/hooks/index.js.map +2 -2
  123. package/lib/hooks/useRefValue.d.ts +2 -2
  124. package/lib/hooks/useRefValue.js.map +2 -2
  125. package/lib/hooks/useUserMedia.d.ts +104 -0
  126. package/lib/hooks/useUserMedia.js +553 -0
  127. package/lib/hooks/useUserMedia.js.map +7 -0
  128. package/lib/locales/index.d.ts +51 -0
  129. package/lib/locales/langs/en-US.d.ts +17 -0
  130. package/lib/locales/langs/en-US.js +18 -1
  131. package/lib/locales/langs/en-US.js.map +2 -2
  132. package/lib/locales/langs/zh-CN.d.ts +17 -0
  133. package/lib/locales/langs/zh-CN.js +18 -1
  134. package/lib/locales/langs/zh-CN.js.map +2 -2
  135. package/lib/utils/stream.d.ts +68 -0
  136. package/lib/utils/stream.js +101 -0
  137. package/lib/utils/stream.js.map +7 -0
  138. package/package.json +1 -1
@@ -1,4 +1,4 @@
1
- import type { RefObject } from 'react';
1
+ import type { MutableRefObject } from 'react';
2
2
  /**
3
3
  * - **EN:** Get a mutable ref object and automatically update the value change
4
4
  * - **CN:** 获取一个可变的ref对象,并自动更新值变化
@@ -7,5 +7,5 @@ import type { RefObject } from 'react';
7
7
  *
8
8
  * @returns A mutable ref object, but the reference is immutable | 可变的ref对象,但引用不可变
9
9
  */
10
- declare const useRefValue: <T>(value: T) => RefObject<T>;
10
+ declare const useRefValue: <T>(value: T) => MutableRefObject<T>;
11
11
  export default useRefValue;
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "version": 3,
3
3
  "sources": ["../../src/hooks/useRefValue.ts"],
4
- "sourcesContent": ["import type { RefObject } from 'react';\nimport { useRef } from 'react';\n\n/**\n * - **EN:** Get a mutable ref object and automatically update the value change\n * - **CN:** 获取一个可变的ref对象,并自动更新值变化\n *\n * @param value the wrapped value | 被包装的值\n *\n * @returns A mutable ref object, but the reference is immutable | 可变的ref对象,但引用不可变\n */\nconst useRefValue = <T>(value: T): RefObject<T> => {\n const ref = useRef<T>(value);\n ref.current = value;\n return ref;\n};\n\nexport default useRefValue;\n"],
5
- "mappings": ";;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,mBAAuB;AAUvB,IAAM,cAAc,CAAI,UAA2B;AACjD,QAAM,UAAM,qBAAU,KAAK;AAC3B,MAAI,UAAU;AACd,SAAO;AACT;AAEA,IAAO,sBAAQ;",
4
+ "sourcesContent": ["import type { MutableRefObject } from 'react';\nimport { useRef } from 'react';\n\n/**\n * - **EN:** Get a mutable ref object and automatically update the value change\n * - **CN:** 获取一个可变的ref对象,并自动更新值变化\n *\n * @param value the wrapped value | 被包装的值\n *\n * @returns A mutable ref object, but the reference is immutable | 可变的ref对象,但引用不可变\n */\nconst useRefValue = <T>(value: T): MutableRefObject<T> => {\n const ref = useRef<T>(value);\n ref.current = value;\n return ref;\n};\n\nexport default useRefValue;\n"],
5
+ "mappings": ";;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AACA,mBAAuB;AAUvB,IAAM,cAAc,CAAI,UAAkC;AACxD,QAAM,UAAM,qBAAU,KAAK;AAC3B,MAAI,UAAU;AACd,SAAO;AACT;AAEA,IAAO,sBAAQ;",
6
6
  "names": []
7
7
  }
@@ -0,0 +1,104 @@
1
+ export interface UseUserMediaProps {
2
+ /**
3
+ * - **EN:** The media constraints for the audio and video stream.
4
+ * - **CN:** 媒体流的媒体约束。
5
+ */
6
+ media: Pick<MediaStreamConstraints, 'audio' | 'video'>;
7
+ /**
8
+ * - **EN:** Whether to enable PCM output, only effective when recording audio. Please use
9
+ * `onPcmStreamChunk` callback to get PCM data. Default is `false`.
10
+ * - **CN:** 是否启用 PCM 输出,仅在录制音频时有效。请使用 `onPcmStreamChunk` 回调获取 PCM 数据。默认`false`
11
+ */
12
+ pcmAudioOptions?: {
13
+ /**
14
+ * - **EN:** The audio context options for the PCM output.
15
+ * - **CN:** PCM 输出的音频上下文选项。
16
+ */
17
+ audioContext?: AudioContextOptions;
18
+ /**
19
+ * - **EN:** The worklet options for the PCM output.
20
+ * - **CN:** PCM 输出的工作线程选项。
21
+ */
22
+ workletOptions?: AudioWorkletNodeOptions;
23
+ };
24
+ /**
25
+ * - **EN:** Callback function that is triggered when the recording starts, providing the media
26
+ * stream.
27
+ * - **CN:** 开始录制时触发的回调函数,提供媒体流。
28
+ *
29
+ * @param {MediaStream} stream - The media stream.
30
+ */
31
+ onStartRecording?: (stream: MediaStream) => void;
32
+ /**
33
+ * - **EN:** Callback function that is triggered when the recording stops.
34
+ * - **CN:** 停止录制时触发的回调函数。
35
+ */
36
+ onStopRecording?: () => void;
37
+ /**
38
+ * - **EN:** Callback function that is triggered when a new chunk of media data is available.
39
+ * - **CN:** 当录制媒体流时,每个时间分片会触发一次 `onStreamChunk` 回调,提供媒体数据块。
40
+ *
41
+ * > 注意音频流编码格式为:audio/webm;codecs=opus。如果希望获取 PCM 数据,请使用 `onPcmData` 回调。
42
+ *
43
+ * @param {Blob} chunk - The media data chunk (MIME: audio/webm;codecs=opus) | 媒体数据块 (MIME:
44
+ * audio/webm;codecs=opus)
45
+ */
46
+ onStreamChunk?: (chunk: Blob) => void;
47
+ /**
48
+ * - **EN:** Callback for raw PCM float data (per render quantum)
49
+ * - **CN:** 获取原始 PCM 浮点数据的回调(每个渲染量子)
50
+ *
51
+ * @param data Monophonic or polyphonic spliced data | 单声道或多声道拼接数据
52
+ * @param sampleRate Sample rate | 采样率
53
+ */
54
+ onPcmStreamChunk?: (channels: Float32Array[], sampleRate: number) => void;
55
+ /**
56
+ * - **EN:** Whether to disable this hook.
57
+ * - **CN:** 是否禁用此工具
58
+ */
59
+ disabled?: boolean;
60
+ /**
61
+ * - **EN:** The slicing time period (milliseconds) for each fragment of the audio and video stream,
62
+ * each time slice will trigger the `onStreamChunk` callback. Default is `500`.
63
+ * - **CN:** 媒体流每个分片的切片时间段(毫秒),每个时间分片会触发一次 `onStreamChunk` 回调,默认值为 `500`。
64
+ */
65
+ streamSliceMs?: number;
66
+ /**
67
+ * - **EN:** The silence detection threshold (0-1) for the audio stream, below which the audio is
68
+ * considered silent. Default is `0`.
69
+ * - **CN:** 音频流的静音检测阈值(0-1),低于该值音频被视为静音。默认值为 `0`。
70
+ */
71
+ soundDetectionThreshold?: number;
72
+ /**
73
+ * - **EN:** The timeout duration (milliseconds) for detecting sound input. If no sound is detected
74
+ * within this period, the user will be prompted to re-select the audio device. Default is
75
+ * `3000`.
76
+ * - **CN:** 检测是否有声音输入的超时时间(毫秒),如果在该时间段内没有检测到声音,则会提示用户重新选择音频设备。默认值为 `3000`。
77
+ */
78
+ soundDetectionTimeout?: number;
79
+ }
80
+ declare const useUserMedia: (props: UseUserMediaProps) => UseUserMediaResult;
81
+ export interface UseUserMediaResult {
82
+ /**
83
+ * - **EN** Whether the media stream is currently being recorded
84
+ * - **CN** 是否正在录制媒体流
85
+ */
86
+ isRecording: boolean;
87
+ /**
88
+ * - **EN** Start recording the media stream
89
+ * - **CN** 开始录制媒体流
90
+ */
91
+ startRecording: () => Promise<MediaRecorder>;
92
+ /**
93
+ * - **EN** Stop recording the media stream
94
+ * - **CN** 停止录制媒体流
95
+ */
96
+ stopRecording: () => void;
97
+ /**
98
+ * - **EN** Get the media stream being recorded, returns the stream if recording, otherwise returns
99
+ * `undefined`
100
+ * - **CN** 获取正在录制的媒体流,如果正在录制则返回该流,否则返回 `undefined`
101
+ */
102
+ mediaStream?: MediaStream;
103
+ }
104
+ export default useUserMedia;
@@ -0,0 +1,553 @@
1
+ var __create = Object.create;
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __getProtoOf = Object.getPrototypeOf;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
7
+ var __export = (target, all) => {
8
+ for (var name in all)
9
+ __defProp(target, name, { get: all[name], enumerable: true });
10
+ };
11
+ var __copyProps = (to, from, except, desc) => {
12
+ if (from && typeof from === "object" || typeof from === "function") {
13
+ for (let key of __getOwnPropNames(from))
14
+ if (!__hasOwnProp.call(to, key) && key !== except)
15
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
+ }
17
+ return to;
18
+ };
19
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
20
+ // If the importer is in node compatibility mode or this is not an ESM
21
+ // file that has been converted to a CommonJS file using a Babel-
22
+ // compatible transform (i.e. "__esModule" has not been set), then set
23
+ // "default" to the CommonJS "module.exports" for node compatibility.
24
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
25
+ mod
26
+ ));
27
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
28
+
29
+ // src/hooks/useUserMedia.tsx
30
+ var useUserMedia_exports = {};
31
+ __export(useUserMedia_exports, {
32
+ default: () => useUserMedia_default
33
+ });
34
+ module.exports = __toCommonJS(useUserMedia_exports);
35
+ var import_react = require("react");
36
+ var import_antd = require("antd");
37
+ var import_ConfigProvider = __toESM(require("../components/ConfigProvider"));
38
+ var import_context = __toESM(require("../components/ConfigProvider/context"));
39
+ var import_stream = require("../utils/stream");
40
+ var import_useRefFunction = __toESM(require("./useRefFunction"));
41
+ var import_useRefValue = __toESM(require("./useRefValue"));
42
+ var import_useT = __toESM(require("./useT"));
43
+ var requestMicrophoneEnUrlPromise = import("../assets/request-microphone-en.js");
44
+ var requestMicrophoneZhUrlPromise = import("../assets/request-microphone-zh.js");
45
+ var resetMicrophoneEnUrlPromise = import("../assets/reset-microphone-en.js");
46
+ var resetMicrophoneZhUrlPromise = import("../assets/reset-microphone-zh.js");
47
+ var saveAudioDeviceEnUrlPromise1 = import("../assets/save-default-audio1-en.js");
48
+ var saveAudioDeviceZhUrlPromise1 = import("../assets/save-default-audio1-zh.js");
49
+ var saveAudioDeviceEnUrlPromise2 = import("../assets/save-default-audio2-en.js");
50
+ var saveAudioDeviceZhUrlPromise2 = import("../assets/save-default-audio2-zh.js");
51
+ var requestCameraEnUrlPromise = import("../assets/request-camera-en.js");
52
+ var requestCameraZhUrlPromise = import("../assets/request-camera-zh.js");
53
+ var resetCameraEnUrlPromise = import("../assets/reset-camera-en.js");
54
+ var resetCameraZhUrlPromise = import("../assets/reset-camera-zh.js");
55
+ var useUserMedia = (props) => {
56
+ var _a;
57
+ const {
58
+ media,
59
+ pcmAudioOptions,
60
+ disabled,
61
+ streamSliceMs = 500,
62
+ soundDetectionThreshold = 0,
63
+ soundDetectionTimeout = 3e3,
64
+ onStartRecording,
65
+ onStopRecording,
66
+ onStreamChunk,
67
+ onPcmStreamChunk
68
+ } = props;
69
+ const context = (0, import_react.useContext)(import_context.default);
70
+ const { lang } = context;
71
+ const contextRef = (0, import_useRefValue.default)(context);
72
+ const t = (0, import_useT.default)();
73
+ const app = import_antd.App.useApp();
74
+ const modal = ((_a = app.modal) == null ? void 0 : _a.confirm) ? app.modal : import_antd.Modal;
75
+ const modalRef = (0, import_useRefValue.default)(modal);
76
+ const [requestMicrophoneEnUrl, setRequestMicrophoneEnUrl] = (0, import_react.useState)();
77
+ const [requestMicrophoneZhUrl, setRequestMicrophoneZhUrl] = (0, import_react.useState)();
78
+ const [resetMicrophoneEnUrl, setResetMicrophoneEnUrl] = (0, import_react.useState)();
79
+ const [resetMicrophoneZhUrl, setResetMicrophoneZhUrl] = (0, import_react.useState)();
80
+ const [requestCameraEnUrl, setRequestCameraEnUrl] = (0, import_react.useState)();
81
+ const [requestCameraZhUrl, setRequestCameraZhUrl] = (0, import_react.useState)();
82
+ const [resetCameraEnUrl, setResetCameraEnUrl] = (0, import_react.useState)();
83
+ const [resetCameraZhUrl, setResetCameraZhUrl] = (0, import_react.useState)();
84
+ const [isRecording, setIsRecording] = (0, import_react.useState)(false);
85
+ const [mediaStream, setMediaStream] = (0, import_react.useState)();
86
+ const [mediaRecorder, setMediaRecorder] = (0, import_react.useState)(null);
87
+ const stopSoundListeningRef = (0, import_react.useRef)(void 0);
88
+ const closePcmRef = (0, import_react.useRef)(void 0);
89
+ const includeAudio = !!media.audio;
90
+ const exactAudioDeviceIdRef = (0, import_react.useRef)(void 0);
91
+ const rafRef = (0, import_react.useRef)(0);
92
+ const isSpeakingRef = (0, import_react.useRef)(false);
93
+ const silenceVolumeThresholdRef = (0, import_useRefValue.default)(soundDetectionThreshold);
94
+ const silenceDetectDurationRef = (0, import_useRefValue.default)(soundDetectionTimeout);
95
+ const soundDetectStart = (0, import_react.useRef)(0);
96
+ const pcmSampleRateRef = (0, import_react.useRef)(0);
97
+ const onPcmStreamChunkRef = (0, import_useRefValue.default)(onPcmStreamChunk);
98
+ const pcmStreamSlicerRef = (0, import_react.useRef)(
99
+ new import_stream.StreamTimeSlicerClass({
100
+ timeSlice: streamSliceMs,
101
+ onSlice: (channels) => {
102
+ var _a2;
103
+ (_a2 = onPcmStreamChunkRef.current) == null ? void 0 : _a2.call(onPcmStreamChunkRef, channels, pcmSampleRateRef.current);
104
+ }
105
+ })
106
+ );
107
+ const deviceType = (0, import_react.useMemo)(
108
+ () => media.video ? t("hooks.useUserMedia.camera") : t("hooks.useUserMedia.microphone"),
109
+ [media, t]
110
+ );
111
+ const featureName = (0, import_react.useMemo)(
112
+ () => media.video ? t("hooks.featureName.camera") : t("hooks.featureName.microphone"),
113
+ [media, t]
114
+ );
115
+ const showDeniedPopup = () => {
116
+ const resetMicrophoneUrl = lang === "zh-CN" ? resetMicrophoneZhUrl : resetMicrophoneEnUrl;
117
+ const resetCameraUrl = lang === "zh-CN" ? resetCameraZhUrl : resetCameraEnUrl;
118
+ modal.error({
119
+ title: t("hooks.useUserMedia.devicePermission", { deviceType }),
120
+ width: 500,
121
+ content: /* @__PURE__ */ React.createElement("div", null, /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null), /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, /* @__PURE__ */ React.createElement(import_antd.Typography.Text, { strong: true, type: "danger" }, t("hooks.useUserMedia.deniedPermission", { deviceType, featureName }))), /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, t("hooks.useUserMedia.reopenPermissionGuide", { deviceType })), /* @__PURE__ */ React.createElement(
122
+ "img",
123
+ {
124
+ src: media.video ? resetCameraUrl : resetMicrophoneUrl,
125
+ alt: "microphone-permission",
126
+ style: { width: 380 }
127
+ }
128
+ ))
129
+ });
130
+ };
131
+ const recordStream = async () => {
132
+ let stream;
133
+ try {
134
+ const options = media;
135
+ if (media.audio) {
136
+ if (exactAudioDeviceIdRef.current) {
137
+ if (media.audio === true) {
138
+ options.audio = { deviceId: { exact: exactAudioDeviceIdRef.current } };
139
+ } else {
140
+ options.audio = { deviceId: { exact: exactAudioDeviceIdRef.current }, ...media.audio };
141
+ }
142
+ }
143
+ }
144
+ stream = await navigator.mediaDevices.getUserMedia(options);
145
+ setMediaStream(stream);
146
+ onStartRecording == null ? void 0 : onStartRecording(stream);
147
+ const recorder = new MediaRecorder(stream);
148
+ recorder.ondataavailable = (event) => {
149
+ if (event.data.size > 0) {
150
+ onStreamChunk == null ? void 0 : onStreamChunk(event.data);
151
+ }
152
+ };
153
+ if (streamSliceMs) {
154
+ recorder.start(streamSliceMs);
155
+ } else {
156
+ recorder.start();
157
+ }
158
+ setMediaRecorder(recorder);
159
+ setIsRecording(true);
160
+ if (options.audio && onPcmStreamChunkRef.current) {
161
+ try {
162
+ const ctx = new AudioContext(pcmAudioOptions == null ? void 0 : pcmAudioOptions.audioContext);
163
+ pcmSampleRateRef.current = ctx.sampleRate;
164
+ if (ctx.state === "suspended") {
165
+ await ctx.resume();
166
+ }
167
+ const sourceNode = ctx.createMediaStreamSource(stream);
168
+ let node;
169
+ closePcmRef.current = () => {
170
+ node == null ? void 0 : node.port.close();
171
+ node == null ? void 0 : node.disconnect();
172
+ sourceNode.disconnect();
173
+ ctx.close().catch(() => {
174
+ });
175
+ };
176
+ const setupWorklet = async () => {
177
+ try {
178
+ await ctx.audioWorklet.addModule(generatePcmCaptureProcessorModule());
179
+ node = new AudioWorkletNode(ctx, "pcm-capture", pcmAudioOptions == null ? void 0 : pcmAudioOptions.workletOptions);
180
+ node.port.onmessage = (e) => {
181
+ var _a2;
182
+ if (((_a2 = e.data) == null ? void 0 : _a2.type) === "pcm") {
183
+ const channels = e.data.channels;
184
+ pcmStreamSlicerRef.current.push(channels);
185
+ }
186
+ };
187
+ sourceNode.connect(node);
188
+ } catch (err) {
189
+ fallbackScriptProcessor({ ctx, sourceNode, streamSlicer: pcmStreamSlicerRef.current });
190
+ }
191
+ };
192
+ if ("audioWorklet" in ctx) {
193
+ setupWorklet();
194
+ } else {
195
+ fallbackScriptProcessor({ ctx, sourceNode, streamSlicer: pcmStreamSlicerRef.current });
196
+ }
197
+ } catch (e) {
198
+ console.error("setup pcm worklet failed", e);
199
+ }
200
+ }
201
+ return recorder;
202
+ } catch (error) {
203
+ console.error(error);
204
+ if (error instanceof Error && error.name === "NotAllowedError") {
205
+ showDeniedPopup();
206
+ import_antd.notification.error({ message: t("hooks.useUserMedia.deniedPermission", { deviceType, featureName }) });
207
+ throw new Error(t("hooks.useUserMedia.deniedPermission", { deviceType, featureName }));
208
+ }
209
+ import_antd.notification.error({ message: t("hooks.useUserMedia.notSupport") });
210
+ throw new Error(t("hooks.useUserMedia.notSupport"));
211
+ }
212
+ };
213
+ const startRecording = (0, import_useRefFunction.default)(async () => {
214
+ var _a2, _b;
215
+ if (disabled) {
216
+ throw new Error(t("hooks.useUserMedia.disabledWarning"));
217
+ }
218
+ if (isRecording) {
219
+ throw new Error(t("hooks.useUserMedia.isRecordingNow"));
220
+ }
221
+ if (!((_a2 = navigator.mediaDevices) == null ? void 0 : _a2.getUserMedia) || !((_b = navigator.permissions) == null ? void 0 : _b.query)) {
222
+ import_antd.notification.error({
223
+ message: t("hooks.useUserMedia.notSupport")
224
+ });
225
+ throw new Error(t("hooks.useUserMedia.notSupport"));
226
+ }
227
+ try {
228
+ const result = await window.navigator.permissions.query({
229
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
230
+ name: media.video ? "camera" : "microphone"
231
+ });
232
+ if (result.state === "denied") {
233
+ showDeniedPopup();
234
+ import_antd.notification.error({ message: t("hooks.useUserMedia.deniedPermission", { deviceType, featureName }) });
235
+ throw new Error(t("hooks.useUserMedia.deniedPermission", { deviceType, featureName }));
236
+ } else {
237
+ if (result.state === "prompt") {
238
+ const requestMicrophoneUrl = lang === "zh-CN" ? requestMicrophoneZhUrl : requestMicrophoneEnUrl;
239
+ const requestCameraUrl = lang === "zh-CN" ? requestCameraZhUrl : requestCameraEnUrl;
240
+ return new Promise((resolve, reject) => {
241
+ modal.warning({
242
+ title: t("hooks.useUserMedia.devicePermission", { deviceType }),
243
+ content: /* @__PURE__ */ React.createElement("div", null, /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null), /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, /* @__PURE__ */ React.createElement(import_antd.Typography.Text, { strong: true }, t("hooks.useUserMedia.requestTip1", { deviceType }))), /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, t("hooks.useUserMedia.requestTip2", { featureName })), /* @__PURE__ */ React.createElement("div", null, /* @__PURE__ */ React.createElement(
244
+ "img",
245
+ {
246
+ src: media.video ? requestCameraUrl : requestMicrophoneUrl,
247
+ alt: "microphone-permission",
248
+ style: { width: 380 }
249
+ }
250
+ ))),
251
+ onOk: () => {
252
+ try {
253
+ recordStream().then((recorder) => {
254
+ resolve(recorder);
255
+ }).catch((error) => {
256
+ reject(error);
257
+ });
258
+ } catch (error) {
259
+ console.error(error);
260
+ reject(error);
261
+ }
262
+ },
263
+ width: 500
264
+ });
265
+ });
266
+ } else {
267
+ return await recordStream();
268
+ }
269
+ }
270
+ } catch (error) {
271
+ console.error(error);
272
+ throw error;
273
+ }
274
+ });
275
+ const stopRecording = (0, import_useRefFunction.default)(() => {
276
+ var _a2, _b;
277
+ mediaRecorder == null ? void 0 : mediaRecorder.stop();
278
+ mediaStream == null ? void 0 : mediaStream.getTracks().forEach((t2) => t2.stop());
279
+ setMediaStream(void 0);
280
+ setIsRecording(false);
281
+ isSpeakingRef.current = false;
282
+ cancelAnimationFrame(rafRef.current);
283
+ (_a2 = stopSoundListeningRef.current) == null ? void 0 : _a2.call(stopSoundListeningRef);
284
+ (_b = closePcmRef.current) == null ? void 0 : _b.call(closePcmRef);
285
+ onStopRecording == null ? void 0 : onStopRecording();
286
+ });
287
+ const waitForSound = (0, import_useRefFunction.default)((mediaStream2) => {
288
+ const ctx = new AudioContext();
289
+ const source = ctx.createMediaStreamSource(mediaStream2);
290
+ const analyser = ctx.createAnalyser();
291
+ analyser.fftSize = 2048;
292
+ source.connect(analyser);
293
+ soundDetectStart.current = Date.now();
294
+ const data = new Uint8Array(analyser.fftSize);
295
+ const cancelDetect = () => {
296
+ var _a2;
297
+ cancelAnimationFrame(rafRef.current);
298
+ rafRef.current = 0;
299
+ (_a2 = stopSoundListeningRef.current) == null ? void 0 : _a2.call(stopSoundListeningRef);
300
+ };
301
+ const loop = () => {
302
+ analyser.getByteTimeDomainData(data);
303
+ let sum = 0;
304
+ for (const value of data) {
305
+ const v = (value - 128) / 128;
306
+ sum += v * v;
307
+ }
308
+ const rms = Math.sqrt(sum / data.length);
309
+ if (rms > silenceVolumeThresholdRef.current) {
310
+ if (!isSpeakingRef.current) {
311
+ isSpeakingRef.current = true;
312
+ cancelDetect();
313
+ return;
314
+ }
315
+ } else {
316
+ if (Date.now() > soundDetectStart.current + silenceDetectDurationRef.current) {
317
+ navigator.mediaDevices.enumerateDevices().then((devices) => {
318
+ const audioInputs = devices.filter((d) => d.kind === "audioinput");
319
+ modalRef.current.confirm({
320
+ title: t("hooks.useUserMedia.soundDetectTitle"),
321
+ content: /* @__PURE__ */ React.createElement(import_ConfigProvider.default, { ...contextRef.current }, /* @__PURE__ */ React.createElement(
322
+ SaveAudioDeviceForm,
323
+ {
324
+ devices: audioInputs,
325
+ mediaStream: mediaStream2,
326
+ onDeviceChange: (deviceId) => exactAudioDeviceIdRef.current = deviceId
327
+ }
328
+ )),
329
+ width: 500,
330
+ onOk: () => {
331
+ if (exactAudioDeviceIdRef.current) {
332
+ stopRecording();
333
+ setTimeout(() => {
334
+ startRecording();
335
+ });
336
+ }
337
+ },
338
+ onCancel: () => {
339
+ cancelDetect();
340
+ }
341
+ });
342
+ });
343
+ cancelDetect();
344
+ return;
345
+ }
346
+ }
347
+ rafRef.current = requestAnimationFrame(loop);
348
+ };
349
+ loop();
350
+ stopSoundListeningRef.current = () => {
351
+ analyser.disconnect();
352
+ source.disconnect();
353
+ ctx.close().catch(() => {
354
+ });
355
+ stopSoundListeningRef.current = void 0;
356
+ };
357
+ });
358
+ (0, import_react.useEffect)(() => {
359
+ return stopRecording;
360
+ }, []);
361
+ (0, import_react.useEffect)(() => {
362
+ if (disabled && isRecording) {
363
+ stopRecording();
364
+ }
365
+ }, [disabled, isRecording]);
366
+ (0, import_react.useEffect)(() => {
367
+ if (streamSliceMs && pcmStreamSlicerRef.current.timeSlice !== streamSliceMs) {
368
+ pcmStreamSlicerRef.current.timeSlice = streamSliceMs;
369
+ }
370
+ }, [streamSliceMs]);
371
+ (0, import_react.useEffect)(() => {
372
+ if (includeAudio && mediaStream && !isSpeakingRef.current) {
373
+ try {
374
+ waitForSound(mediaStream);
375
+ } catch (e) {
376
+ console.warn("Audio volume detecting failed:", e);
377
+ }
378
+ }
379
+ return () => {
380
+ var _a2;
381
+ (_a2 = stopSoundListeningRef.current) == null ? void 0 : _a2.call(stopSoundListeningRef);
382
+ };
383
+ }, [includeAudio, mediaStream, t]);
384
+ (0, import_react.useEffect)(() => {
385
+ requestMicrophoneEnUrlPromise.then((module2) => {
386
+ setRequestMicrophoneEnUrl(module2.default);
387
+ });
388
+ requestMicrophoneZhUrlPromise.then((module2) => {
389
+ setRequestMicrophoneZhUrl(module2.default);
390
+ });
391
+ resetMicrophoneEnUrlPromise.then((module2) => {
392
+ setResetMicrophoneEnUrl(module2.default);
393
+ });
394
+ resetMicrophoneZhUrlPromise.then((module2) => {
395
+ setResetMicrophoneZhUrl(module2.default);
396
+ });
397
+ requestCameraEnUrlPromise.then((module2) => {
398
+ setRequestCameraEnUrl(module2.default);
399
+ });
400
+ requestCameraZhUrlPromise.then((module2) => {
401
+ setRequestCameraZhUrl(module2.default);
402
+ });
403
+ resetCameraEnUrlPromise.then((module2) => {
404
+ setResetCameraEnUrl(module2.default);
405
+ });
406
+ resetCameraZhUrlPromise.then((module2) => {
407
+ setResetCameraZhUrl(module2.default);
408
+ });
409
+ }, []);
410
+ return {
411
+ isRecording,
412
+ startRecording,
413
+ stopRecording
414
+ };
415
+ };
416
+ function SaveAudioDeviceForm(props) {
417
+ const { devices, mediaStream, onDeviceChange } = props;
418
+ const [form] = import_antd.Form.useForm();
419
+ const t = (0, import_useT.default)();
420
+ const { lang } = (0, import_react.useContext)(import_context.default);
421
+ const [saveAudioDeviceEnUrl1, setSaveAudioDeviceEnUrl1] = (0, import_react.useState)();
422
+ const [saveAudioDeviceEnUrl2, setSaveAudioDeviceEnUrl2] = (0, import_react.useState)();
423
+ const [saveAudioDeviceZhUrl1, setSaveAudioDeviceZhUrl1] = (0, import_react.useState)();
424
+ const [saveAudioDeviceZhUrl2, setSaveAudioDeviceZhUrl2] = (0, import_react.useState)();
425
+ const [saveDefaultAudioDevicePermanently, setSaveDefaultAudioDevicePermanently] = (0, import_react.useState)(false);
426
+ const audioInputs = (0, import_react.useMemo)(() => devices.filter((d) => d.kind === "audioinput"), [devices]);
427
+ const [selectedDeviceId, setSelectedDeviceId] = (0, import_react.useState)(
428
+ () => {
429
+ var _a, _b, _c;
430
+ return ((_b = (_a = mediaStream.getAudioTracks()[0]) == null ? void 0 : _a.getSettings()) == null ? void 0 : _b.deviceId) ?? ((_c = audioInputs[0]) == null ? void 0 : _c.deviceId);
431
+ }
432
+ );
433
+ const openDataImageInNewTab = (0, import_useRefFunction.default)((dataUrl) => {
434
+ var _a;
435
+ if (!dataUrl)
436
+ return;
437
+ const [meta, b64] = dataUrl.split(",");
438
+ const mime = ((_a = meta.match(/data:(.*);base64/)) == null ? void 0 : _a[1]) || "image/png";
439
+ const binary = atob(b64);
440
+ const len = binary.length;
441
+ const bytes = new Uint8Array(len);
442
+ for (let i = 0; i < len; i++)
443
+ bytes[i] = binary.charCodeAt(i);
444
+ const blob = new Blob([bytes], { type: mime });
445
+ const url = URL.createObjectURL(blob);
446
+ window.open(url);
447
+ });
448
+ (0, import_react.useEffect)(() => {
449
+ saveAudioDeviceEnUrlPromise1.then((module2) => {
450
+ setSaveAudioDeviceEnUrl1(module2.default);
451
+ });
452
+ saveAudioDeviceEnUrlPromise2.then((module2) => {
453
+ setSaveAudioDeviceEnUrl2(module2.default);
454
+ });
455
+ saveAudioDeviceZhUrlPromise1.then((module2) => {
456
+ setSaveAudioDeviceZhUrl1(module2.default);
457
+ });
458
+ saveAudioDeviceZhUrlPromise2.then((module2) => {
459
+ setSaveAudioDeviceZhUrl2(module2.default);
460
+ });
461
+ }, []);
462
+ return /* @__PURE__ */ React.createElement(import_antd.Form, { layout: "vertical", form }, /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null), /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, /* @__PURE__ */ React.createElement(import_antd.Typography.Text, null, t("hooks.useUserMedia.soundDetectDescription"))), /* @__PURE__ */ React.createElement(import_antd.Form.Item, { label: t("hooks.useUserMedia.chooseMicrophoneDevice") }, /* @__PURE__ */ React.createElement(
463
+ import_antd.Select,
464
+ {
465
+ options: audioInputs.map((input) => ({
466
+ label: input.label,
467
+ value: input.deviceId
468
+ })),
469
+ defaultValue: selectedDeviceId,
470
+ onChange: (id) => {
471
+ setSelectedDeviceId(id);
472
+ onDeviceChange(id);
473
+ }
474
+ }
475
+ )), /* @__PURE__ */ React.createElement(import_antd.Form.Item, { style: { marginBottom: 0 } }, /* @__PURE__ */ React.createElement(import_antd.Checkbox, { onChange: (e) => setSaveDefaultAudioDevicePermanently(e.target.checked) }, t("hooks.useUserMedia.rememberDefaultAudioDevice"))), saveDefaultAudioDevicePermanently && /* @__PURE__ */ React.createElement("div", null, /* @__PURE__ */ React.createElement(import_antd.Typography.Paragraph, null, /* @__PURE__ */ React.createElement(import_antd.Typography.Text, null, t("hooks.useUserMedia.rememberDefaultAudioDeviceTip"))), /* @__PURE__ */ React.createElement(import_antd.Flex, { gap: 8, align: "flex-start" }, /* @__PURE__ */ React.createElement("div", { style: { flex: 1, minWidth: 0 } }, /* @__PURE__ */ React.createElement(
476
+ "a",
477
+ {
478
+ href: "#",
479
+ onClick: (e) => {
480
+ e.preventDefault();
481
+ openDataImageInNewTab(lang === "zh-CN" ? saveAudioDeviceZhUrl1 : saveAudioDeviceEnUrl1);
482
+ }
483
+ },
484
+ /* @__PURE__ */ React.createElement(
485
+ "img",
486
+ {
487
+ src: lang === "zh-CN" ? saveAudioDeviceZhUrl1 : saveAudioDeviceEnUrl1,
488
+ alt: "the first step to save default audio device",
489
+ style: { width: "100%", height: "auto" }
490
+ }
491
+ )
492
+ )), /* @__PURE__ */ React.createElement("div", { style: { flex: 1, minWidth: 0 } }, /* @__PURE__ */ React.createElement(
493
+ "a",
494
+ {
495
+ href: "#",
496
+ onClick: (e) => {
497
+ e.preventDefault();
498
+ openDataImageInNewTab(lang === "zh-CN" ? saveAudioDeviceZhUrl2 : saveAudioDeviceEnUrl2);
499
+ }
500
+ },
501
+ /* @__PURE__ */ React.createElement(
502
+ "img",
503
+ {
504
+ src: lang === "zh-CN" ? saveAudioDeviceZhUrl2 : saveAudioDeviceEnUrl2,
505
+ alt: "the second step to save default audio device",
506
+ style: { width: "100%", height: "auto" }
507
+ }
508
+ )
509
+ )))));
510
+ }
511
+ function generatePcmCaptureProcessorModule() {
512
+ const workletCode = `
513
+ class PcmCaptureProcessor extends AudioWorkletProcessor {
514
+ process(inputs, outputs, parameters) {
515
+ const channelsIn = inputs[0];
516
+ if (channelsIn && channelsIn[0]) {
517
+ const channels = channelsIn.map((ch) => {
518
+ const copy = new Float32Array(ch.length);
519
+ copy.set(ch);
520
+ return copy;
521
+ });
522
+ this.port.postMessage({ type: 'pcm', channels }, channels.map(ch => ch.buffer));
523
+ }
524
+ return true;
525
+ }
526
+ }
527
+ registerProcessor('pcm-capture', PcmCaptureProcessor);
528
+ `;
529
+ const blob = new Blob([workletCode], { type: "application/javascript" });
530
+ const blobUrl = URL.createObjectURL(blob);
531
+ return blobUrl;
532
+ }
533
+ function fallbackScriptProcessor(options) {
534
+ const { ctx, sourceNode, streamSlicer } = options;
535
+ const bufferSize = 128;
536
+ const processor = ctx.createScriptProcessor(bufferSize, 1, 1);
537
+ processor.onaudioprocess = (ev) => {
538
+ const channels = [];
539
+ for (let i = 0; i < ev.inputBuffer.numberOfChannels; i++) {
540
+ const input = ev.inputBuffer.getChannelData(i);
541
+ const copy = new Float32Array(input.length);
542
+ copy.set(input);
543
+ channels.push(copy);
544
+ }
545
+ streamSlicer.push(channels);
546
+ };
547
+ sourceNode.connect(processor);
548
+ const gain = ctx.createGain();
549
+ gain.gain.value = 0;
550
+ processor.connect(gain).connect(ctx.destination);
551
+ }
552
+ var useUserMedia_default = useUserMedia;
553
+ //# sourceMappingURL=useUserMedia.js.map