@94ai/softphone 5.0.10 → 5.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,320 @@
1
+ const TYPED_ARRAYS = {
2
+ '8bitInt': Int8Array,
3
+ '16bitInt': Int16Array,
4
+ '32bitInt': Int32Array,
5
+ '32bitFloat': Float32Array
6
+ }
7
+
8
+ const ENCODINGS = {
9
+ '8bitInt': 128,
10
+ '16bitInt': 32768,
11
+ '32bitInt': 2147483648,
12
+ '32bitFloat': 1
13
+ }
14
+
15
+ const isTypedArray = (data) => {
16
+ return (data.byteLength && data.buffer && data.buffer.constructor === ArrayBuffer)
17
+ }
18
+
19
+ const getFormatedValue = (value, encoding) => {
20
+ const data = new (TYPED_ARRAYS[encoding] || TYPED_ARRAYS['16bitInt'])(value.buffer)
21
+ // console.log('data', value.length, value.buffer, data)
22
+ const float32 = new Float32Array(value.length)
23
+ let i
24
+
25
+ for (i = 0; i < data.length; i++) {
26
+ float32[i] = data[i] / (ENCODINGS[encoding] || ENCODINGS['16bitInt'])
27
+ }
28
+ return float32
29
+ }
30
+
31
+ class WebrtcDiver {
32
+ static normalRtc = window.RTCPeerConnection
33
+ static normalAddTrack = window.RTCPeerConnection.prototype.addTrack
34
+ static normalAddStream = window.RTCPeerConnection.prototype.addStream
35
+ static writers = []
36
+ static stream
37
+ static track
38
+ static rtcList = []
39
+ static ws
40
+ static audioTrackGenerator
41
+
42
+ options = {
43
+ encoding: '16bitInt',
44
+ channels: 2,
45
+ sampleRate: 44100,
46
+ }
47
+
48
+ audioCtx
49
+ gainNode
50
+ samples
51
+ requestId
52
+ startTime
53
+
54
+
55
+ constructor(option = {}) {
56
+ this.options = {
57
+ ...this.options,
58
+ ...option
59
+ }
60
+ this.samples = new Float32Array()
61
+ this.flush = this.flush.bind(this)
62
+ this.requestId = requestAnimationFrame(this.flush)
63
+ this.audioCtx = new (window.AudioContext || window.webkitAudioContext)()
64
+ // context needs to be resumed on iOS and Safari (or it will stay in "suspended" state)
65
+ this.audioCtx.resume()
66
+ this.audioCtx.onstatechange = () => console.log(this.audioCtx.state) // if you want to see "Running" state in console and be happy about it
67
+ this.gainNode = this.audioCtx.createGain()
68
+ this.gainNode.gain.value = 1
69
+ this.gainNode.connect(this.audioCtx.destination)
70
+ this.startTime = this.audioCtx.currentTime
71
+ }
72
+
73
+ feed(data) {
74
+ if (!isTypedArray(data)) return
75
+ data = getFormatedValue(data, this.options.encoding)
76
+ const tmp = new Float32Array(this.samples.length + data.length)
77
+ tmp.set(this.samples, 0)
78
+ tmp.set(data, this.samples.length)
79
+ this.samples = tmp
80
+ }
81
+
82
+ volume(volume) {
83
+ this.gainNode.gain.value = volume
84
+ }
85
+
86
+ destroy() {
87
+ try {
88
+ cancelAnimationFrame(this.requestId)
89
+ this.requestId = null
90
+ this.samples = null
91
+ this.audioCtx.close()
92
+ this.audioCtx = null
93
+ this.gainNode = null
94
+ this.startTime = null
95
+ WebrtcDiver.writers = []
96
+ WebrtcDiver.rtcList = []
97
+ WebrtcDiver.stream = null
98
+ WebrtcDiver.track = null
99
+ this.microphoneTransfer()
100
+ document.getElementById('pcm-button-player').removeEventListener('click', this.arraybufferTransfer)
101
+ document.getElementById('ordinary-microphone').removeEventListener('click', this.microphoneTransfer)
102
+ } catch (e) {
103
+ // console.log(e)
104
+ }
105
+ }
106
+
107
+ flush() {
108
+ if (this.samples.length) {
109
+ // const bufferSource = this.audioCtx.createBufferSource()
110
+ const length = this.samples.length / this.options.channels
111
+ const audioBuffer = this.audioCtx.createBuffer(this.options.channels, length, this.options.sampleRate)
112
+ let audioData
113
+ let channel
114
+ let offset
115
+ let i
116
+ let decrement
117
+
118
+ for (channel = 0; channel < this.options.channels; channel++) {
119
+ audioData = audioBuffer.getChannelData(channel)
120
+ offset = channel
121
+ decrement = 50
122
+ for (i = 0; i < length; i++) {
123
+ audioData[i] = this.samples[offset]
124
+ /* fadein */
125
+ if (i < 50) {
126
+ audioData[i] = (audioData[i] * i) / 50
127
+ }
128
+ /* fadeout*/
129
+ if (i >= (length - 51)) {
130
+ audioData[i] = (audioData[i] * decrement--) / 50
131
+ }
132
+ offset += this.options.channels
133
+ }
134
+ }
135
+
136
+ if (this.startTime < this.audioCtx.currentTime) {
137
+ this.startTime = this.audioCtx.currentTime
138
+ }
139
+ // bufferSource.buffer = audioBuffer
140
+ // bufferSource.connect(this.gainNode)
141
+ // bufferSource.start(this.startTime)
142
+ this.startTime += audioBuffer.duration
143
+ this.samples = new Float32Array()
144
+
145
+ // 创建 AudioData 对象
146
+ const voiceData = new window.AudioData({
147
+ format: 'f32-planar',
148
+ sampleRate: audioBuffer.sampleRate, // 采样率
149
+ numberOfFrames: audioBuffer.length / audioBuffer.numberOfChannels, // 每帧样本数
150
+ numberOfChannels: audioBuffer.numberOfChannels, // 声道
151
+ timestamp: this.audioCtx.currentTime * 1e6,
152
+ data: audioData
153
+ })
154
+ WebrtcDiver.writers.forEach(item => {
155
+ item.write(voiceData)
156
+ })
157
+ }
158
+ this.requestId = requestAnimationFrame(this.flush)
159
+ }
160
+
161
+ static refreshWriters() {
162
+ WebrtcDiver.writers = []
163
+ WebrtcDiver.audioTrackGenerator = new MediaStreamTrackGenerator({ kind: 'audio' })
164
+ const writableStream = WebrtcDiver.audioTrackGenerator.writable
165
+ const writer = writableStream.getWriter()
166
+ WebrtcDiver.writers.push(writer)
167
+ }
168
+
169
+ addTrack() {
170
+ console.log('调用addTrack拦截: ', arguments)
171
+ WebrtcDiver.refreshWriters()
172
+ WebrtcDiver.normalAddTrack.apply(this, [WebrtcDiver.audioTrackGenerator])
173
+ }
174
+
175
+ addStream() {
176
+ console.log('调用addStream拦截: ', arguments)
177
+ if (WebrtcDiver.stream) {
178
+ console.log('设置指定麦克风: ', WebrtcDiver.stream)
179
+ arguments[0] = WebrtcDiver.track
180
+ }
181
+ WebrtcDiver.normalAddStream.apply(this, arguments)
182
+ }
183
+
184
+ resetWriteAdd() {
185
+ window.RTCPeerConnection.prototype.addTrack = this.addTrack
186
+ window.RTCPeerConnection.prototype.addStream = this.addStream
187
+ if (window.webkitRTCPeerConnection) {
188
+ window.webkitRTCPeerConnection.prototype.addTrack = this.addTrack
189
+ window.webkitRTCPeerConnection.prototype.addTrack = this.addStream
190
+ }
191
+ console.log('重写 -> addTrack & addStream -> 完成')
192
+ }
193
+
194
+ monitorResetRtcAdd() {
195
+ Object.defineProperty(window.RTCPeerConnection.prototype, 'addStream', {
196
+ get: () => {
197
+ console.log('监听到 addStream 被获取')
198
+ return this.addStream
199
+ },
200
+ set: (f) => {
201
+ if (this.addStream.toString() !== f.toString()) {
202
+ console.log('监听到第三方重写addStream, 继续重写')
203
+ WebrtcDiver.normalAddStream = f
204
+ window.RTCPeerConnection.prototype.addStream = this.addStream
205
+ }
206
+ }
207
+ })
208
+ Object.defineProperty(window.RTCPeerConnection.prototype, 'addTrack', {
209
+ get: () => {
210
+ console.log('监听到 addTrack 被获取')
211
+ return this.addTrack
212
+ },
213
+ set: (f) => {
214
+ if (this.addTrack.toString() !== f.toString()) {
215
+ console.log('监听到第三方重写addTrack, 继续重写')
216
+ WebrtcDiver.normalAddTrack = f
217
+ window.RTCPeerConnection.prototype.addTrack = this.addTrack
218
+ }
219
+ }
220
+ })
221
+ console.log('开启重写监听 -> addStream & addTrack')
222
+ }
223
+
224
+ monitorResetRtcPrototype() {
225
+ WebrtcDiver.normalRtc = new Proxy(WebrtcDiver.normalRtc, {
226
+ get: (target, key) => {
227
+ return Reflect.get(target, key)
228
+ },
229
+ set: (target, key, value) => {
230
+ if (key === 'prototype') {
231
+ console.log('监听到 RTCPeerConnection.prototype 被重写, 继续重写')
232
+ const result = Reflect.set(target, key, value)
233
+ this.monitorResetRtcAdd()
234
+ return result
235
+ } else {
236
+ return Reflect.set(target, key, value)
237
+ }
238
+ },
239
+ construct: function(target, otherArray) {
240
+ const that = new target(...otherArray)
241
+ WebrtcDiver.rtcList.push(that)
242
+ console.log('监听到RTC创建: ', that)
243
+ return that
244
+ }
245
+ })
246
+ console.log('开启重写监听 -> RTCPeerConnection.prototype')
247
+ }
248
+
249
+ monitorResetRtc() {
250
+ Object.defineProperty(window, 'RTCPeerConnection', {
251
+ get: () => {
252
+ return WebrtcDiver.normalRtc
253
+ },
254
+ set: (f) => {
255
+ if (WebrtcDiver.normalRtc.toString() !== f.toString()) {
256
+ console.log('监听到第三方重写RTCPeerConnection, 继续重写')
257
+ WebrtcDiver.normalRtc = f
258
+ this.monitorResetRtcPrototype()
259
+ this.monitorResetRtcAdd()
260
+ }
261
+ }
262
+ })
263
+ console.log('开启重写监听 -> RTCPeerConnection')
264
+ }
265
+
266
+ openPcmData() {
267
+ WebrtcDiver.ws = new WebSocket('ws://127.0.0.1:8899');
268
+ WebrtcDiver.ws.binaryType = 'arraybuffer';
269
+ WebrtcDiver.ws.addEventListener('message', (event) => {
270
+ // 可以传 ArrayBuffer 或者 任意TypedArray
271
+ this.feed(new Uint16Array(event.data));
272
+ });
273
+ }
274
+
275
+ arraybufferTransfer = async () => {
276
+ this.openPcmData()
277
+ const sender = WebrtcDiver.rtcList[WebrtcDiver.rtcList.length - 1].getSenders().find(sender => sender.track.kind === 'audio');
278
+ if (sender) {
279
+ sender.track.stop()
280
+ WebrtcDiver.refreshWriters()
281
+ await sender.replaceTrack(WebrtcDiver.audioTrackGenerator);
282
+ }
283
+ document.getElementById('pcm-button-player').disabled = true
284
+ document.getElementById('ordinary-microphone').disabled = false
285
+ }
286
+
287
+ microphoneTransfer = async () => {
288
+ try {
289
+ if (WebrtcDiver.ws) {
290
+ WebrtcDiver.ws.close(1000, 'Normal closure');
291
+ WebrtcDiver.ws.onopen = null;
292
+ WebrtcDiver.ws.onmessage = null;
293
+ WebrtcDiver.ws.onerror = null;
294
+ WebrtcDiver.ws.onclose = null;
295
+ WebrtcDiver.ws = null
296
+ this.samples = new Float32Array()
297
+ const localStream = await navigator.mediaDevices.getUserMedia({ audio: true });
298
+ const audioTrack = localStream.getAudioTracks()[0];
299
+ const sender = WebrtcDiver.rtcList[WebrtcDiver.rtcList.length - 1].getSenders().find(sender => sender.track.kind === 'audio');
300
+ if (sender) {
301
+ sender.track.stop()
302
+ await sender.replaceTrack(audioTrack);
303
+ }
304
+ document.getElementById('ordinary-microphone').disabled = true
305
+ document.getElementById('pcm-button-player').disabled = false
306
+ }
307
+ } catch (e) {
308
+ // console.log(e)
309
+ }
310
+ }
311
+
312
+ async init() {
313
+ this.resetWriteAdd()
314
+ this.monitorResetRtcAdd()
315
+ this.monitorResetRtcPrototype()
316
+ this.monitorResetRtc()
317
+ document.getElementById('pcm-button-player').addEventListener('click', this.arraybufferTransfer)
318
+ document.getElementById('ordinary-microphone').addEventListener('click', this.microphoneTransfer)
319
+ }
320
+ }
@@ -0,0 +1,77 @@
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport"
6
+ content="width=device-width, initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no" />
7
+ <title>softphone</title>
8
+ <style>
9
+ .nf-transparent {
10
+ visibility: hidden;
11
+ }
12
+ .nf-softphone-text {
13
+ text-align: center;
14
+ line-height: 32px;
15
+ margin-top: -32px;
16
+ }
17
+ .nf-softphone-container {
18
+ margin-left: 10px;
19
+ height: 32px;
20
+ min-width: 600px;
21
+ }
22
+ .nf-softphone-iframe-container {
23
+ height: 32px;
24
+ width: 100%;
25
+ overflow: visible;
26
+ }
27
+ .nf-softphone-iframe {
28
+ width: 100vw;
29
+ height: 100vh;
30
+ top: 0;
31
+ left: 0;
32
+ }
33
+ </style>
34
+ </head>
35
+ <body>
36
+ <div id='softpone-context1' style='overflow: hidden'>
37
+ <div style="display: flex;height: 32px;">
38
+ <div style="min-width: 600px;width: 600px;" id='softphone1'></div>
39
+ </div>
40
+ </div>
41
+ <script src="./util.js"></script>
42
+ <script src="./softphone.js"></script>
43
+ <script>
44
+ const agentTag1 = 'zoujh-test'
45
+ const appKey1 = '032d44009bff1752'
46
+ const appSecret1 = '4c7304c94c5e8725613516d4d6db679b'
47
+
48
+ /**
49
+ 👈 拿到实例后可以手动触发软电话实例的各种动作,如签入,签出,接听,忽略,挂断等等,注意这个实例不是软电话实例,是链接软电话通讯的实例
50
+ */
51
+ const nfSoftPhone1 = (new SoftphoneManager()).initSoftphone({ // 👈 当多实例时使用类创建新实例
52
+ el: '#softphone1', // 👈 软电话容器
53
+ selector: '#softpone-context1', // 👈 软电话容器查询上下文,用于多实例隔离软电话dom的查询环境
54
+ ancestorOrigin: 'nf-softphone1', // origin,ancestorOrigin,destinationOrigin具体含义参见下文,如果是单实例,可以写死origin=ai-softphone&destinationOrigin=nf-softphone&ancestorOrigin=nf-softphone
55
+ destinationOrigin: 'nf-softphone1',
56
+
57
+ agentTag: agentTag1, // 👈 坐席唯一标志
58
+ appKey: appKey1, // 👈 企业appKey
59
+ appSecret: appSecret1,
60
+ extStatus: '1', // 👈 初始化是否处于小休状态, 非1 小休 1 在线
61
+
62
+ softphoneConnectCallBack: (data) => {
63
+ console.log('softphone-connect')
64
+ }, // 👈 签入签出回调
65
+ softphoneCallRefreshCallBack: (data) => {console.log('softphone-call-refresh')}, // 👈 来电刷新来电记录列表回调
66
+ softphoneSeatStatusChangeCallBack: (data) => {console.log('softphone-seats-status-change')},// 👈 小休和在线切换回调
67
+ softphoneAcceptCallBack: (data) => {console.log('softphone-accept')},// 👈 接听回调
68
+ softphoneIgnoreCallBack: (data) => {console.log('softphone-ignore')},// 👈 忽略回调
69
+ softphoneHangupCallBack: (data) => {console.log('softphone-hangup')},// 👈 挂断回调
70
+ softphoneSessionStateChangeCallBack: (data) => {console.log('softphone-session-state-change')},// 👈 会话状态改变回调
71
+ softphoneIncomingCallBack: (data) => {console.log('softphone-incoming')},// 👈 来电回调
72
+ softphoneSendDtmfCallBack: (data) => {console.log('softphone-send-dtmf')},// 👈 转人工回调
73
+ softphoneConnectRegisteredCallBack: (data) => {console.log('softphone-connect-registered')},// 👈 动态绑定动作完成回调(在这之后才能执行动作类总线通讯)
74
+ });
75
+ </script>
76
+ </body>
77
+ </html>