@lobehub/ui 1.111.2 → 1.112.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/es/useTTS/hooks/useAzureSpeech.d.ts +4 -5
- package/es/useTTS/hooks/useAzureSpeech.js +39 -14
- package/es/useTTS/hooks/useMicrosoftSpeech.d.ts +4 -3
- package/es/useTTS/hooks/useMicrosoftSpeech.js +37 -12
- package/es/useTTS/hooks/usePressSpeechRecognition.d.ts +6 -0
- package/es/useTTS/hooks/usePressSpeechRecognition.js +39 -0
- package/es/useTTS/hooks/useSpeechSynthes.js +2 -2
- package/es/useTTS/index.d.ts +1 -0
- package/es/useTTS/index.js +1 -0
- package/es/useTTS/services/postAzureSpeech.d.ts +6 -5
- package/es/useTTS/services/postAzureSpeech.js +48 -20
- package/es/useTTS/services/postMicrosoftSpeech.d.ts +4 -1
- package/es/useTTS/services/postMicrosoftSpeech.js +30 -48
- package/package.json +1 -1
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
/// <reference types="node" />
|
|
2
1
|
/// <reference types="react" />
|
|
3
|
-
import {
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
data: Buffer | undefined;
|
|
2
|
+
import { AzureSpeechOptions } from '../services/postAzureSpeech';
|
|
3
|
+
export declare const useAzureSpeech: (defaultText: string, options: AzureSpeechOptions) => {
|
|
4
|
+
data: AudioBufferSourceNode | undefined;
|
|
7
5
|
isLoading: boolean;
|
|
6
|
+
isPlaying: boolean;
|
|
8
7
|
setText: import("react").Dispatch<import("react").SetStateAction<string>>;
|
|
9
8
|
start: () => void;
|
|
10
9
|
stop: () => void;
|
|
@@ -2,37 +2,62 @@ import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
|
|
|
2
2
|
import { useState } from 'react';
|
|
3
3
|
import useSWR from 'swr';
|
|
4
4
|
import { postAzureSpeech } from "../services/postAzureSpeech";
|
|
5
|
-
export var useAzureSpeech = function useAzureSpeech(defaultText, options
|
|
6
|
-
var _useState = useState(
|
|
5
|
+
export var useAzureSpeech = function useAzureSpeech(defaultText, options) {
|
|
6
|
+
var _useState = useState(),
|
|
7
7
|
_useState2 = _slicedToArray(_useState, 2),
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
var _useState3 = useState(
|
|
8
|
+
data = _useState2[0],
|
|
9
|
+
setDate = _useState2[1];
|
|
10
|
+
var _useState3 = useState(defaultText),
|
|
11
11
|
_useState4 = _slicedToArray(_useState3, 2),
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
text = _useState4[0],
|
|
13
|
+
setText = _useState4[1];
|
|
14
|
+
var _useState5 = useState(false),
|
|
15
|
+
_useState6 = _slicedToArray(_useState5, 2),
|
|
16
|
+
shouldFetch = _useState6[0],
|
|
17
|
+
setShouldFetch = _useState6[1];
|
|
18
|
+
var _useState7 = useState(false),
|
|
19
|
+
_useState8 = _slicedToArray(_useState7, 2),
|
|
20
|
+
isPlaying = _useState8[0],
|
|
21
|
+
setIsPlaying = _useState8[1];
|
|
14
22
|
var _useSWR = useSWR(shouldFetch ? [options.name, text].join('-') : null, function () {
|
|
15
|
-
return postAzureSpeech(text, options
|
|
23
|
+
return postAzureSpeech(text, options);
|
|
16
24
|
}, {
|
|
17
|
-
onError: function onError(
|
|
18
|
-
setShouldFetch(false);
|
|
19
|
-
console.error(error);
|
|
25
|
+
onError: function onError() {
|
|
26
|
+
return setShouldFetch(false);
|
|
20
27
|
},
|
|
21
|
-
onSuccess: function onSuccess() {
|
|
28
|
+
onSuccess: function onSuccess(audioBufferSource) {
|
|
22
29
|
setShouldFetch(false);
|
|
30
|
+
setIsPlaying(true);
|
|
31
|
+
setDate(audioBufferSource);
|
|
32
|
+
audioBufferSource.start();
|
|
33
|
+
audioBufferSource.addEventListener('ended', function () {
|
|
34
|
+
setShouldFetch(false);
|
|
35
|
+
setIsPlaying(false);
|
|
36
|
+
});
|
|
23
37
|
}
|
|
24
38
|
}),
|
|
25
|
-
isLoading = _useSWR.isLoading
|
|
26
|
-
data = _useSWR.data;
|
|
39
|
+
isLoading = _useSWR.isLoading;
|
|
27
40
|
return {
|
|
28
41
|
data: data,
|
|
29
42
|
isLoading: isLoading,
|
|
43
|
+
isPlaying: isPlaying,
|
|
30
44
|
setText: setText,
|
|
31
45
|
start: function start() {
|
|
46
|
+
if (isPlaying || shouldFetch) return;
|
|
32
47
|
setShouldFetch(true);
|
|
48
|
+
if (!data) return;
|
|
49
|
+
try {
|
|
50
|
+
setIsPlaying(true);
|
|
51
|
+
data === null || data === void 0 || data.start();
|
|
52
|
+
} catch (_unused) {}
|
|
33
53
|
},
|
|
34
54
|
stop: function stop() {
|
|
55
|
+
if (!isPlaying) return;
|
|
35
56
|
setShouldFetch(false);
|
|
57
|
+
setIsPlaying(false);
|
|
58
|
+
try {
|
|
59
|
+
data === null || data === void 0 || data.stop();
|
|
60
|
+
} catch (_unused2) {}
|
|
36
61
|
}
|
|
37
62
|
};
|
|
38
63
|
};
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
/// <reference types="react" />
|
|
2
|
-
import {
|
|
3
|
-
export declare const useMicrosoftSpeech: (defaultText: string, options:
|
|
4
|
-
data:
|
|
2
|
+
import { type MicrosoftSpeechOptions } from '../services/postMicrosoftSpeech';
|
|
3
|
+
export declare const useMicrosoftSpeech: (defaultText: string, options: MicrosoftSpeechOptions) => {
|
|
4
|
+
data: AudioBufferSourceNode | undefined;
|
|
5
5
|
isLoading: boolean;
|
|
6
|
+
isPlaying: boolean;
|
|
6
7
|
setText: import("react").Dispatch<import("react").SetStateAction<string>>;
|
|
7
8
|
start: () => void;
|
|
8
9
|
stop: () => void;
|
|
@@ -3,36 +3,61 @@ import { useState } from 'react';
|
|
|
3
3
|
import useSWR from 'swr';
|
|
4
4
|
import { postMicrosoftSpeech } from "../services/postMicrosoftSpeech";
|
|
5
5
|
export var useMicrosoftSpeech = function useMicrosoftSpeech(defaultText, options) {
|
|
6
|
-
var _useState = useState(
|
|
6
|
+
var _useState = useState(),
|
|
7
7
|
_useState2 = _slicedToArray(_useState, 2),
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
var _useState3 = useState(
|
|
8
|
+
data = _useState2[0],
|
|
9
|
+
setDate = _useState2[1];
|
|
10
|
+
var _useState3 = useState(defaultText),
|
|
11
11
|
_useState4 = _slicedToArray(_useState3, 2),
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
text = _useState4[0],
|
|
13
|
+
setText = _useState4[1];
|
|
14
|
+
var _useState5 = useState(false),
|
|
15
|
+
_useState6 = _slicedToArray(_useState5, 2),
|
|
16
|
+
shouldFetch = _useState6[0],
|
|
17
|
+
setShouldFetch = _useState6[1];
|
|
18
|
+
var _useState7 = useState(false),
|
|
19
|
+
_useState8 = _slicedToArray(_useState7, 2),
|
|
20
|
+
isPlaying = _useState8[0],
|
|
21
|
+
setIsPlaying = _useState8[1];
|
|
14
22
|
var _useSWR = useSWR(shouldFetch ? [options.name, text].join('-') : null, function () {
|
|
15
23
|
return postMicrosoftSpeech(text, options);
|
|
16
24
|
}, {
|
|
17
|
-
onError: function onError(
|
|
18
|
-
setShouldFetch(false);
|
|
19
|
-
console.error(error);
|
|
25
|
+
onError: function onError() {
|
|
26
|
+
return setShouldFetch(false);
|
|
20
27
|
},
|
|
21
|
-
onSuccess: function onSuccess() {
|
|
28
|
+
onSuccess: function onSuccess(audioBufferSource) {
|
|
22
29
|
setShouldFetch(false);
|
|
30
|
+
setIsPlaying(true);
|
|
31
|
+
setDate(audioBufferSource);
|
|
32
|
+
audioBufferSource.start();
|
|
33
|
+
audioBufferSource.addEventListener('ended', function () {
|
|
34
|
+
setShouldFetch(false);
|
|
35
|
+
setIsPlaying(false);
|
|
36
|
+
});
|
|
23
37
|
}
|
|
24
38
|
}),
|
|
25
|
-
isLoading = _useSWR.isLoading
|
|
26
|
-
data = _useSWR.data;
|
|
39
|
+
isLoading = _useSWR.isLoading;
|
|
27
40
|
return {
|
|
28
41
|
data: data,
|
|
29
42
|
isLoading: isLoading,
|
|
43
|
+
isPlaying: isPlaying,
|
|
30
44
|
setText: setText,
|
|
31
45
|
start: function start() {
|
|
46
|
+
if (isPlaying || shouldFetch) return;
|
|
32
47
|
setShouldFetch(true);
|
|
48
|
+
if (!data) return;
|
|
49
|
+
try {
|
|
50
|
+
setIsPlaying(true);
|
|
51
|
+
data === null || data === void 0 || data.start();
|
|
52
|
+
} catch (_unused) {}
|
|
33
53
|
},
|
|
34
54
|
stop: function stop() {
|
|
55
|
+
if (!isPlaying) return;
|
|
35
56
|
setShouldFetch(false);
|
|
57
|
+
setIsPlaying(false);
|
|
58
|
+
try {
|
|
59
|
+
data === null || data === void 0 || data.stop();
|
|
60
|
+
} catch (_unused2) {}
|
|
36
61
|
}
|
|
37
62
|
};
|
|
38
63
|
};
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import _toConsumableArray from "@babel/runtime/helpers/esm/toConsumableArray";
|
|
2
|
+
import _slicedToArray from "@babel/runtime/helpers/esm/slicedToArray";
|
|
3
|
+
import { useEffect, useState } from 'react';
|
|
4
|
+
import { useSpeechRecognition } from "./useSpeechRecognition";
|
|
5
|
+
export var usePressSpeechRecognition = function usePressSpeechRecognition(locale) {
|
|
6
|
+
var _useState = useState([]),
|
|
7
|
+
_useState2 = _slicedToArray(_useState, 2),
|
|
8
|
+
texts = _useState2[0],
|
|
9
|
+
setTexts = _useState2[1];
|
|
10
|
+
var _useState3 = useState(false),
|
|
11
|
+
_useState4 = _slicedToArray(_useState3, 2),
|
|
12
|
+
isGLobalLoading = _useState4[0],
|
|
13
|
+
setIsGlobalLoading = _useState4[1];
|
|
14
|
+
var _useSpeechRecognition = useSpeechRecognition(locale),
|
|
15
|
+
text = _useSpeechRecognition.text,
|
|
16
|
+
_stop = _useSpeechRecognition.stop,
|
|
17
|
+
_start = _useSpeechRecognition.start,
|
|
18
|
+
isLoading = _useSpeechRecognition.isLoading;
|
|
19
|
+
useEffect(function () {
|
|
20
|
+
if (!isLoading && text && texts.at(-1) !== text) {
|
|
21
|
+
setTexts([].concat(_toConsumableArray(texts), [text]));
|
|
22
|
+
_stop();
|
|
23
|
+
_start();
|
|
24
|
+
}
|
|
25
|
+
}, [isLoading, texts, text]);
|
|
26
|
+
return {
|
|
27
|
+
isLoading: isGLobalLoading,
|
|
28
|
+
start: function start() {
|
|
29
|
+
setTexts([]);
|
|
30
|
+
setIsGlobalLoading(true);
|
|
31
|
+
_start();
|
|
32
|
+
},
|
|
33
|
+
stop: function stop() {
|
|
34
|
+
_stop();
|
|
35
|
+
setIsGlobalLoading(false);
|
|
36
|
+
},
|
|
37
|
+
text: [].concat(_toConsumableArray(texts), [text]).filter(Boolean).join(',')
|
|
38
|
+
};
|
|
39
|
+
};
|
|
@@ -18,8 +18,8 @@ export var useSpeechSynthes = function useSpeechSynthes(defaultText, options) {
|
|
|
18
18
|
utterance.voice = voiceList.find(function (item) {
|
|
19
19
|
return item.name === options.name;
|
|
20
20
|
});
|
|
21
|
-
if (options.pitch) utterance.pitch = options.pitch;
|
|
22
|
-
if (options.rate) utterance.rate = options.rate;
|
|
21
|
+
if (options.pitch) utterance.pitch = options.pitch * 10;
|
|
22
|
+
if (options.rate) utterance.rate = options.rate * 10;
|
|
23
23
|
return utterance;
|
|
24
24
|
}, [text, voiceList, options]);
|
|
25
25
|
speechSynthesis.onvoiceschanged = function () {
|
package/es/useTTS/index.d.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
export { useAzureSpeech } from './hooks/useAzureSpeech';
|
|
2
2
|
export { useEdgeSpeech } from './hooks/useEdgeSpeech';
|
|
3
3
|
export { useMicrosoftSpeech } from './hooks/useMicrosoftSpeech';
|
|
4
|
+
export { usePressSpeechRecognition } from './hooks/usePressSpeechRecognition';
|
|
4
5
|
export { useSpeechRecognition } from './hooks/useSpeechRecognition';
|
|
5
6
|
export { useSpeechSynthes } from './hooks/useSpeechSynthes';
|
|
6
7
|
export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList, } from './utils/getVoiceList';
|
package/es/useTTS/index.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
export { useAzureSpeech } from "./hooks/useAzureSpeech";
|
|
2
2
|
export { useEdgeSpeech } from "./hooks/useEdgeSpeech";
|
|
3
3
|
export { useMicrosoftSpeech } from "./hooks/useMicrosoftSpeech";
|
|
4
|
+
export { usePressSpeechRecognition } from "./hooks/usePressSpeechRecognition";
|
|
4
5
|
export { useSpeechRecognition } from "./hooks/useSpeechRecognition";
|
|
5
6
|
export { useSpeechSynthes } from "./hooks/useSpeechSynthes";
|
|
6
7
|
export { getAzureVoiceList, getEdgeVoiceList, getSpeechSynthesVoiceList } from "./utils/getVoiceList";
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
/// <reference types="node" />
|
|
2
1
|
import { type SsmlOptions } from '../utils/genSSML';
|
|
3
|
-
export interface
|
|
4
|
-
|
|
5
|
-
|
|
2
|
+
export interface AzureSpeechOptions extends SsmlOptions {
|
|
3
|
+
api: {
|
|
4
|
+
key: string;
|
|
5
|
+
region: string;
|
|
6
|
+
};
|
|
6
7
|
}
|
|
7
|
-
export declare const postAzureSpeech: (text: string,
|
|
8
|
+
export declare const postAzureSpeech: (text: string, { api, ...options }: AzureSpeechOptions) => Promise<AudioBufferSourceNode>;
|
|
@@ -1,45 +1,73 @@
|
|
|
1
|
+
import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
|
|
1
2
|
import _asyncToGenerator from "@babel/runtime/helpers/esm/asyncToGenerator";
|
|
3
|
+
var _excluded = ["api"];
|
|
2
4
|
import _regeneratorRuntime from "@babel/runtime/regenerator";
|
|
3
5
|
import { AudioConfig, PropertyId, ResultReason, SpeechConfig, SpeechSynthesisOutputFormat, SpeechSynthesizer } from 'microsoft-cognitiveservices-speech-sdk';
|
|
4
6
|
import { genSSML } from "../utils/genSSML";
|
|
5
7
|
// 纯文本生成语音
|
|
6
8
|
export var postAzureSpeech = /*#__PURE__*/function () {
|
|
7
|
-
var
|
|
8
|
-
var speechConfig, audioConfig, synthesizer, completeCb, errCb;
|
|
9
|
-
return _regeneratorRuntime.wrap(function
|
|
10
|
-
while (1) switch (
|
|
9
|
+
var _ref2 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee2(text, _ref) {
|
|
10
|
+
var api, options, key, region, speechConfig, audioConfig, synthesizer, completeCb, errCb;
|
|
11
|
+
return _regeneratorRuntime.wrap(function _callee2$(_context2) {
|
|
12
|
+
while (1) switch (_context2.prev = _context2.next) {
|
|
11
13
|
case 0:
|
|
12
|
-
|
|
14
|
+
api = _ref.api, options = _objectWithoutProperties(_ref, _excluded);
|
|
15
|
+
key = api.key || process.env.AZURE_SPEECH_KEY || '';
|
|
16
|
+
region = api.key || process.env.AZURE_SPEECH_REGION || '';
|
|
17
|
+
speechConfig = SpeechConfig.fromSubscription(key, region);
|
|
13
18
|
speechConfig.setProperty(PropertyId.SpeechServiceResponse_RequestSentenceBoundary, 'true');
|
|
14
19
|
speechConfig.speechSynthesisOutputFormat = SpeechSynthesisOutputFormat.Webm24Khz16BitMonoOpus;
|
|
15
20
|
audioConfig = AudioConfig.fromDefaultSpeakerOutput();
|
|
16
21
|
synthesizer = new SpeechSynthesizer(speechConfig, audioConfig);
|
|
17
|
-
completeCb = function
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
completeCb = /*#__PURE__*/function () {
|
|
23
|
+
var _ref3 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(result, resolve) {
|
|
24
|
+
var audioData, audioContext, audioBufferSource;
|
|
25
|
+
return _regeneratorRuntime.wrap(function _callee$(_context) {
|
|
26
|
+
while (1) switch (_context.prev = _context.next) {
|
|
27
|
+
case 0:
|
|
28
|
+
if (!(result.reason === ResultReason.SynthesizingAudioCompleted)) {
|
|
29
|
+
_context.next = 9;
|
|
30
|
+
break;
|
|
31
|
+
}
|
|
32
|
+
audioData = result.audioData;
|
|
33
|
+
audioContext = new AudioContext();
|
|
34
|
+
audioBufferSource = audioContext.createBufferSource();
|
|
35
|
+
_context.next = 6;
|
|
36
|
+
return audioContext.decodeAudioData(audioData);
|
|
37
|
+
case 6:
|
|
38
|
+
audioBufferSource.buffer = _context.sent;
|
|
39
|
+
audioBufferSource.connect(audioContext.destination);
|
|
40
|
+
resolve(audioBufferSource);
|
|
41
|
+
case 9:
|
|
42
|
+
synthesizer.close();
|
|
43
|
+
case 10:
|
|
44
|
+
case "end":
|
|
45
|
+
return _context.stop();
|
|
46
|
+
}
|
|
47
|
+
}, _callee);
|
|
48
|
+
}));
|
|
49
|
+
return function completeCb(_x3, _x4) {
|
|
50
|
+
return _ref3.apply(this, arguments);
|
|
51
|
+
};
|
|
52
|
+
}();
|
|
25
53
|
errCb = function errCb(err, reject) {
|
|
26
54
|
reject(err);
|
|
27
55
|
synthesizer.close();
|
|
28
56
|
};
|
|
29
|
-
return
|
|
57
|
+
return _context2.abrupt("return", new Promise(function (resolve, reject) {
|
|
30
58
|
synthesizer.speakSsmlAsync(genSSML(text, options), function (result) {
|
|
31
|
-
return completeCb(result, resolve
|
|
59
|
+
return completeCb(result, resolve);
|
|
32
60
|
}, function (err) {
|
|
33
61
|
return errCb(err, reject);
|
|
34
62
|
});
|
|
35
63
|
}));
|
|
36
|
-
case
|
|
64
|
+
case 11:
|
|
37
65
|
case "end":
|
|
38
|
-
return
|
|
66
|
+
return _context2.stop();
|
|
39
67
|
}
|
|
40
|
-
},
|
|
68
|
+
}, _callee2);
|
|
41
69
|
}));
|
|
42
|
-
return function postAzureSpeech(_x, _x2
|
|
43
|
-
return
|
|
70
|
+
return function postAzureSpeech(_x, _x2) {
|
|
71
|
+
return _ref2.apply(this, arguments);
|
|
44
72
|
};
|
|
45
73
|
}();
|
|
@@ -1,2 +1,5 @@
|
|
|
1
1
|
import { type SsmlOptions } from '../utils/genSSML';
|
|
2
|
-
export
|
|
2
|
+
export interface MicrosoftSpeechOptions extends SsmlOptions {
|
|
3
|
+
api?: string;
|
|
4
|
+
}
|
|
5
|
+
export declare const postMicrosoftSpeech: (text: string, { api, ...options }: MicrosoftSpeechOptions) => Promise<AudioBufferSourceNode>;
|
|
@@ -1,64 +1,46 @@
|
|
|
1
|
+
import _defineProperty from "@babel/runtime/helpers/esm/defineProperty";
|
|
2
|
+
import _objectWithoutProperties from "@babel/runtime/helpers/esm/objectWithoutProperties";
|
|
1
3
|
import _asyncToGenerator from "@babel/runtime/helpers/esm/asyncToGenerator";
|
|
4
|
+
var _excluded = ["api"];
|
|
2
5
|
import _regeneratorRuntime from "@babel/runtime/regenerator";
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
+
function ownKeys(e, r) { var t = Object.keys(e); if (Object.getOwnPropertySymbols) { var o = Object.getOwnPropertySymbols(e); r && (o = o.filter(function (r) { return Object.getOwnPropertyDescriptor(e, r).enumerable; })), t.push.apply(t, o); } return t; }
|
|
7
|
+
function _objectSpread(e) { for (var r = 1; r < arguments.length; r++) { var t = null != arguments[r] ? arguments[r] : {}; r % 2 ? ownKeys(Object(t), !0).forEach(function (r) { _defineProperty(e, r, t[r]); }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(e, Object.getOwnPropertyDescriptors(t)) : ownKeys(Object(t)).forEach(function (r) { Object.defineProperty(e, r, Object.getOwnPropertyDescriptor(t, r)); }); } return e; }
|
|
8
|
+
import qs from 'query-string';
|
|
6
9
|
export var postMicrosoftSpeech = /*#__PURE__*/function () {
|
|
7
|
-
var
|
|
8
|
-
var
|
|
10
|
+
var _ref2 = _asyncToGenerator( /*#__PURE__*/_regeneratorRuntime.mark(function _callee(text, _ref) {
|
|
11
|
+
var api, options, response, audioData, audioContext, audioBufferSource;
|
|
9
12
|
return _regeneratorRuntime.wrap(function _callee$(_context) {
|
|
10
13
|
while (1) switch (_context.prev = _context.next) {
|
|
11
14
|
case 0:
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
});
|
|
20
|
-
|
|
21
|
-
'accept': '*/*',
|
|
22
|
-
'accept-language': 'zh-CN,zh;q=0.9',
|
|
23
|
-
'authority': 'southeastasia.api.speech.microsoft.com',
|
|
24
|
-
'content-type': 'application/json',
|
|
25
|
-
'customvoiceconnectionid': uuidv4(),
|
|
26
|
-
'origin': 'https://speech.microsoft.com',
|
|
27
|
-
'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
|
|
28
|
-
'sec-ch-ua-mobile': '?0',
|
|
29
|
-
'sec-ch-ua-platform': '"Windows"',
|
|
30
|
-
'sec-fetch-dest': 'empty',
|
|
31
|
-
'sec-fetch-mode': 'cors',
|
|
32
|
-
'sec-fetch-site': 'same-site',
|
|
33
|
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
|
|
34
|
-
};
|
|
35
|
-
_context.prev = 2;
|
|
36
|
-
_context.next = 5;
|
|
37
|
-
return fetch(API, {
|
|
38
|
-
body: data,
|
|
39
|
-
headers: DEFAULT_HEADERS,
|
|
40
|
-
method: 'POST',
|
|
41
|
-
// @ts-ignore
|
|
42
|
-
responseType: 'arraybuffer'
|
|
43
|
-
});
|
|
44
|
-
case 5:
|
|
15
|
+
api = _ref.api, options = _objectWithoutProperties(_ref, _excluded);
|
|
16
|
+
_context.next = 3;
|
|
17
|
+
return fetch(qs.stringifyUrl({
|
|
18
|
+
query: _objectSpread({
|
|
19
|
+
text: text
|
|
20
|
+
}, options),
|
|
21
|
+
url: api || process.env.MICROSOFT_SPEECH_PROXY_URL || ''
|
|
22
|
+
}));
|
|
23
|
+
case 3:
|
|
45
24
|
response = _context.sent;
|
|
46
|
-
_context.next =
|
|
25
|
+
_context.next = 6;
|
|
47
26
|
return response.arrayBuffer();
|
|
48
|
-
case
|
|
49
|
-
|
|
27
|
+
case 6:
|
|
28
|
+
audioData = _context.sent;
|
|
29
|
+
audioContext = new AudioContext();
|
|
30
|
+
audioBufferSource = audioContext.createBufferSource();
|
|
31
|
+
_context.next = 11;
|
|
32
|
+
return audioContext.decodeAudioData(audioData);
|
|
50
33
|
case 11:
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
case 15:
|
|
34
|
+
audioBufferSource.buffer = _context.sent;
|
|
35
|
+
audioBufferSource.connect(audioContext.destination);
|
|
36
|
+
return _context.abrupt("return", audioBufferSource);
|
|
37
|
+
case 14:
|
|
56
38
|
case "end":
|
|
57
39
|
return _context.stop();
|
|
58
40
|
}
|
|
59
|
-
}, _callee
|
|
41
|
+
}, _callee);
|
|
60
42
|
}));
|
|
61
43
|
return function postMicrosoftSpeech(_x, _x2) {
|
|
62
|
-
return
|
|
44
|
+
return _ref2.apply(this, arguments);
|
|
63
45
|
};
|
|
64
46
|
}();
|