com.adrenak.univoice 3.0.0 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -54
- package/Runtime/Adrenak.UniVoice.Runtime.asmdef +19 -3
- package/Runtime/ClientSession.cs +137 -0
- package/Runtime/{Interfaces/IChatroomNetwork.cs.meta → ClientSession.cs.meta} +1 -1
- package/Runtime/Common/Utils.cs +55 -0
- package/Runtime/{Types/ChatroomAgentMode.cs.meta → Common/Utils.cs.meta} +1 -1
- package/Runtime/Common.meta +8 -0
- package/Runtime/Impl/Filters/GaussianAudioBlur.cs +90 -0
- package/Runtime/{ChatroomAgent.cs.meta → Impl/Filters/GaussianAudioBlur.cs.meta} +1 -1
- package/Runtime/Impl/Filters/OpusFilter.cs +85 -0
- package/Runtime/Impl/Filters/OpusFilter.cs.meta +11 -0
- package/Runtime/Impl/Filters.meta +8 -0
- package/Runtime/Impl/Inputs/UniMicInput.cs +39 -0
- package/Runtime/Impl/Inputs/UniMicInput.cs.meta +11 -0
- package/Runtime/Impl/Inputs.meta +8 -0
- package/Runtime/Impl/Networks/Mirror/MirrorClient.cs +161 -0
- package/Runtime/Impl/Networks/Mirror/MirrorClient.cs.meta +11 -0
- package/Runtime/Impl/Networks/Mirror/MirrorMessage.cs +21 -0
- package/Runtime/Impl/Networks/Mirror/MirrorMessage.cs.meta +11 -0
- package/Runtime/Impl/Networks/Mirror/MirrorMessageTags.cs +16 -0
- package/Runtime/Impl/Networks/Mirror/MirrorMessageTags.cs.meta +11 -0
- package/Runtime/Impl/Networks/Mirror/MirrorModeObserver.cs +43 -0
- package/Runtime/Impl/Networks/Mirror/MirrorModeObserver.cs.meta +11 -0
- package/Runtime/Impl/Networks/Mirror/MirrorServer.cs +234 -0
- package/Runtime/Impl/Networks/Mirror/MirrorServer.cs.meta +11 -0
- package/Runtime/Impl/Networks/Mirror.meta +8 -0
- package/Runtime/Impl/Networks.meta +8 -0
- package/Runtime/Impl/Outputs/StreamedAudioSourceOutput.cs +56 -0
- package/Runtime/Impl/Outputs/StreamedAudioSourceOutput.cs.meta +11 -0
- package/Runtime/Impl/Outputs.meta +8 -0
- package/Runtime/Impl.meta +8 -0
- package/Runtime/Interfaces/IAudioClient.cs +77 -0
- package/Runtime/Interfaces/IAudioClient.cs.meta +11 -0
- package/Runtime/Interfaces/IAudioFilter.cs +10 -0
- package/Runtime/Interfaces/IAudioFilter.cs.meta +11 -0
- package/Runtime/Interfaces/IAudioInput.cs +1 -24
- package/Runtime/Interfaces/IAudioOutput.cs +4 -30
- package/Runtime/Interfaces/IAudioOutputFactory.cs +2 -7
- package/Runtime/Interfaces/IAudioServer.cs +41 -0
- package/Runtime/Interfaces/IAudioServer.cs.meta +11 -0
- package/Runtime/Types/{ChatroomAudioSegment.cs → AudioFrame.cs} +7 -6
- package/Runtime/Types/VoiceSettings.cs +53 -0
- package/Runtime/Types/VoiceSettings.cs.meta +11 -0
- package/Samples~/Group Chat Sample/Prefabs/Mic Toggle.prefab +235 -0
- package/{CHANGELOG.md.meta → Samples~/Group Chat Sample/Prefabs/Mic Toggle.prefab.meta } +2 -2
- package/Samples~/Group Chat Sample/Prefabs/Peer View.prefab +851 -0
- package/Samples~/Group Chat Sample/Prefabs/Peer View.prefab.meta +7 -0
- package/Samples~/Group Chat Sample/Prefabs.meta +8 -0
- package/Samples~/Group Chat Sample/Scenes/GroupVoiceCallSample-Mirror.unity +2160 -0
- package/Samples~/Group Chat Sample/Scenes/GroupVoiceCallSample-Mirror.unity.meta +7 -0
- package/Samples~/Group Chat Sample/Scenes.meta +8 -0
- package/Samples~/Group Chat Sample/Scripts/GroupVoiceCallMirrorSample.cs +217 -0
- package/Samples~/Group Chat Sample/Scripts/GroupVoiceCallMirrorSample.cs.meta +11 -0
- package/Samples~/Group Chat Sample/Scripts/PeerView.cs +74 -0
- package/Samples~/Group Chat Sample/Scripts/PeerView.cs.meta +11 -0
- package/Samples~/Group Chat Sample/Scripts.meta +8 -0
- package/Samples~/Group Chat Sample/Sprites/mic.png +0 -0
- package/Samples~/Group Chat Sample/Sprites/mic.png.meta +88 -0
- package/Samples~/Group Chat Sample/Sprites/off.png +0 -0
- package/Samples~/Group Chat Sample/Sprites/off.png.meta +88 -0
- package/Samples~/Group Chat Sample/Sprites/on.png +0 -0
- package/Samples~/Group Chat Sample/Sprites/on.png.meta +88 -0
- package/Samples~/Group Chat Sample/Sprites/speaker.png +0 -0
- package/Samples~/Group Chat Sample/Sprites/speaker.png.meta +88 -0
- package/Samples~/Group Chat Sample/Sprites.meta +8 -0
- package/Samples~/Group Chat Sample.meta +8 -0
- package/package.json +15 -5
- package/CHANGELOG.md +0 -67
- package/Runtime/ChatroomAgent.cs +0 -260
- package/Runtime/Interfaces/IChatroomNetwork.cs +0 -125
- package/Runtime/Types/ChatroomAgentMode.cs +0 -22
- package/Runtime/Types/ChatroomPeerSettings.cs +0 -18
- package/Runtime/Types/ChatroomPeerSettings.cs.meta +0 -11
- /package/Runtime/Types/{ChatroomAudioSegment.cs.meta → AudioFrame.cs.meta} +0 -0
package/README.md
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
Note: Inbuilt implementations and samples have been removed from this repository. They'll be added to separate repositories soon.
|
|
2
|
-
|
|
3
1
|
# UniVoice
|
|
4
2
|
UniVoice is a voice chat/VoIP solution for Unity.
|
|
5
3
|
|
|
@@ -9,73 +7,53 @@ Some features of UniVoice:
|
|
|
9
7
|
- ⚙ Peer specific settings. Don't want to listen to a peer? Mute them. Don't want someone listening to you? Mute yourself against them.
|
|
10
8
|
|
|
11
9
|
- 🎨 Customize your audio input, output and networking layer.
|
|
12
|
-
* 🎤 __Configurable Audio Input__:
|
|
10
|
+
* 🎤 __Configurable Audio Input__: UniVoice is audio input agnostic. It supports mic audio input out of the box and you can change the source of outgoing audio by implementing the `IAudioInput` interrace.
|
|
13
11
|
|
|
14
|
-
* 🔊 __Configurable Audio Output__:
|
|
12
|
+
* 🔊 __Configurable Audio Output__: UniVoice is audio output agnostic. Out of the box is supports playing peer audio using Unity AudioSource. You can divert incoming audio to anywhere you want by implementing the `IAudioOutput` interface.
|
|
15
13
|
|
|
16
|
-
* 🌐 __Configurable Network__:
|
|
17
|
-
|
|
18
|
-
# Docs
|
|
19
|
-
Manuals and sample projects are not available yet. For the API reference, please visit http://www.vatsalambastha.com/univoice
|
|
20
|
-
|
|
21
|
-
# Usage
|
|
22
|
-
## Creating a chatroom agent
|
|
23
|
-
- To be able to host and join voice chatrooms, you need a `ChatroomAgent` instance.
|
|
14
|
+
* 🌐 __Configurable Network__: UniVoice is network agnostic and supports Mirror out of the box. You can implement the `IAudioClient` and `IAudioServer` interfaces using the networking plugin of your choice to make it compatible with it.
|
|
24
15
|
|
|
16
|
+
## Installation
|
|
17
|
+
⚠️ [OpenUPM](https://openupm.com/packages/com.adrenak.univoice/?subPage=versions) may not have up to date releases. Install using NPM registry instead 👇
|
|
18
|
+
|
|
19
|
+
Ensure you have the NPM registry in the `packages.json` file of your Unity project with the following scopes:
|
|
25
20
|
```
|
|
26
|
-
|
|
21
|
+
"scopedRegistries": [
|
|
22
|
+
{
|
|
23
|
+
"name": "npmjs",
|
|
24
|
+
"url": "https://registry.npmjs.org",
|
|
25
|
+
"scopes": [
|
|
26
|
+
"com.npmjs",
|
|
27
|
+
"com.adrenak.univoice",
|
|
28
|
+
"com.adrenak.brw",
|
|
29
|
+
"com.adrenak.unimic",
|
|
30
|
+
"com.adrenak.unityopus"
|
|
31
|
+
]
|
|
32
|
+
}
|
|
33
|
+
}
|
|
27
34
|
```
|
|
28
35
|
|
|
29
|
-
##
|
|
30
|
-
|
|
31
|
-
Every peer in the chatroom is assigned an ID by the host. And every peer has a peer list, representing the other peers in the chatroom.
|
|
36
|
+
## Docs
|
|
37
|
+
Am API reference is available: http://www.vatsalambastha.com/univoice
|
|
32
38
|
|
|
33
|
-
|
|
34
|
-
|
|
39
|
+
## Samples
|
|
40
|
+
This repository contains a sample scene for the Mirror network, which is the best place to see how UniVoice can be integrated into your project.
|
|
35
41
|
|
|
36
|
-
|
|
37
|
-
`agent.Network.PeersIDs`
|
|
38
|
-
|
|
39
|
-
`agent.Network` also provides methods to host or join a chatroom. Here is how you use them:
|
|
42
|
+
To try the sample, import Mirror and add the `UNIVOICE_MIRROR_NETWORK` compilation symbol to your project.
|
|
40
43
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
agent.Network.HostChatroom(optional_data);
|
|
44
|
+
## Dependencies
|
|
45
|
+
[com.adrenak.brw](https://www.github.com/adrenak/brw)`@1.0.1` for reading and writing messages for communication. See `MirrorServer.cs` and `MirrorClient.cs` where they're used.
|
|
44
46
|
|
|
45
|
-
|
|
46
|
-
agent.Network.JoinChatroom(optional_data);
|
|
47
|
+
[com.adrenak.unimic](https://www.github.com/adrenak/unimic)`@3.2.1` for easily capturing audio from any connected mic devices. See `UniMicInput.cs` for usage.
|
|
47
48
|
|
|
48
|
-
|
|
49
|
-
agent.Network.LeaveChatroom(optional_data);
|
|
49
|
+
[com.adrenak.unityopus](https://www.github.com/adrenak/unityopus)`@1.0.0` for Opus encoding and decoding. See `OpusFilter.cs` for usage
|
|
50
50
|
|
|
51
|
-
|
|
52
|
-
agent.Network.CloseChatroom(optional_data);
|
|
53
|
-
|
|
54
|
-
```
|
|
55
|
-
## Muting Audio
|
|
56
|
-
To mute everyone in the chatroom, use `agent.MuteOthers = true;` or set it to `false` to unmute them all.
|
|
57
|
-
|
|
58
|
-
To mute yourself use `agent.MuteSelf = true;` or set it to `false` to unmute yourself. This will stop sending your audio to all the peers in the chatroom.
|
|
59
|
-
|
|
60
|
-
For muting a specific peer, first get the peers settings object using this:
|
|
61
|
-
```
|
|
62
|
-
agent.PeerSettings[id].muteThem = true; // where id belongs to the peer in question
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
If you want to mute yourself towards a specific peer, use this:
|
|
66
|
-
`agent.PeerSettings[id].muteSelf = true; // where id belongs to the peer in question`
|
|
67
|
-
|
|
68
|
-
## Events
|
|
69
|
-
`agent.Network` provides several network related events. Refer to the [API reference](http://www.vatsalambastha.com/univoice/api/Adrenak.UniVoice.ChatroomAgent.html) for them.
|
|
70
|
-
|
|
71
|
-
# License and Support
|
|
51
|
+
## License and Support
|
|
72
52
|
This project is under the [MIT license](https://github.com/adrenak/univoice/blob/master/LICENSE).
|
|
73
53
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
__Commercial consultation and development can be arranged__ but is subject to schedule and availability.
|
|
54
|
+
Community contributions are welcome.
|
|
77
55
|
|
|
78
|
-
|
|
56
|
+
## Contact
|
|
79
57
|
The developer can be reached at the following links:
|
|
80
58
|
|
|
81
59
|
[Website](http://www.vatsalambastha.com)
|
|
@@ -1,3 +1,19 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
1
|
+
{
|
|
2
|
+
"name": "Adrenak.UniVoice.Runtime",
|
|
3
|
+
"references": [
|
|
4
|
+
"GUID:f87ecb857e752164ab814a3de8eb0262",
|
|
5
|
+
"GUID:1f776cd02c03a7b4280b6b649d7758e2",
|
|
6
|
+
"GUID:30817c1a0e6d646d99c048fc403f5979",
|
|
7
|
+
"GUID:725ee7191c021de4dbf9269590ded755",
|
|
8
|
+
"GUID:34d15e3fb5fa4b541b5a93a6dc182cc5"
|
|
9
|
+
],
|
|
10
|
+
"includePlatforms": [],
|
|
11
|
+
"excludePlatforms": [],
|
|
12
|
+
"allowUnsafeCode": false,
|
|
13
|
+
"overrideReferences": false,
|
|
14
|
+
"precompiledReferences": [],
|
|
15
|
+
"autoReferenced": true,
|
|
16
|
+
"defineConstraints": [],
|
|
17
|
+
"versionDefines": [],
|
|
18
|
+
"noEngineReferences": false
|
|
19
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
using System;
|
|
2
|
+
using System.Collections.Generic;
|
|
3
|
+
|
|
4
|
+
using UnityEngine;
|
|
5
|
+
|
|
6
|
+
namespace Adrenak.UniVoice {
|
|
7
|
+
/// <summary>
|
|
8
|
+
/// Handles a client session.
|
|
9
|
+
/// Requires an implementation of <see cref="IAudioClient{T}"/>, <see cref="IAudioInput"/> and <see cref="IAudioOutputFactory"/> each.
|
|
10
|
+
/// Allows adding input and output filters and handles their execution.
|
|
11
|
+
/// </summary>
|
|
12
|
+
/// <typeparam name="T"></typeparam>
|
|
13
|
+
public class ClientSession<T> : IDisposable {
|
|
14
|
+
/// <summary>
|
|
15
|
+
/// The <see cref="IAudioOutput"/> instances of each peer in the session
|
|
16
|
+
/// </summary>
|
|
17
|
+
public Dictionary<T, IAudioOutput> PeerOutputs { get; private set; } = new Dictionary<T, IAudioOutput>();
|
|
18
|
+
|
|
19
|
+
/// <summary>
|
|
20
|
+
/// The input <see cref="IAudioFilter"/> that will be applied to the outgoing audio for all the peers.
|
|
21
|
+
/// Note that filters are executed in the order they are present in this list
|
|
22
|
+
/// </summary>
|
|
23
|
+
public List<IAudioFilter> InputFilters { get; set; } = new List<IAudioFilter>();
|
|
24
|
+
|
|
25
|
+
/// <summary>
|
|
26
|
+
/// The output <see cref="IAudioFilter"/> that will be applied to the incoming audio for all the peers.
|
|
27
|
+
/// Note that filters are executed in the order they are present in this list.
|
|
28
|
+
/// </summary>
|
|
29
|
+
public List<IAudioFilter> OutputFilters { get; set; } = new List<IAudioFilter>();
|
|
30
|
+
|
|
31
|
+
public ClientSession(IAudioClient<T> client, IAudioInput input, IAudioOutputFactory outputFactory) {
|
|
32
|
+
Client = client;
|
|
33
|
+
Input = input;
|
|
34
|
+
OutputFactory = outputFactory;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/// <summary>
|
|
38
|
+
/// The <see cref="IAudioClient{T}"/> that's used for networking
|
|
39
|
+
/// </summary>
|
|
40
|
+
IAudioClient<T> client;
|
|
41
|
+
public IAudioClient<T> Client {
|
|
42
|
+
get => client;
|
|
43
|
+
set {
|
|
44
|
+
if(client != null)
|
|
45
|
+
client.Dispose();
|
|
46
|
+
client = value;
|
|
47
|
+
|
|
48
|
+
Client.OnLeft += () => {
|
|
49
|
+
foreach (var output in PeerOutputs)
|
|
50
|
+
output.Value.Dispose();
|
|
51
|
+
PeerOutputs.Clear();
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
Client.OnPeerJoined += id => {
|
|
55
|
+
try {
|
|
56
|
+
var output = outputFactory.Create();
|
|
57
|
+
PeerOutputs.Add(id, output);
|
|
58
|
+
}
|
|
59
|
+
catch (Exception e) {
|
|
60
|
+
Debug.LogException(e);
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
Client.OnPeerLeft += id => {
|
|
65
|
+
if (!PeerOutputs.ContainsKey(id))
|
|
66
|
+
return;
|
|
67
|
+
|
|
68
|
+
PeerOutputs[id].Dispose();
|
|
69
|
+
PeerOutputs.Remove(id);
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
client.OnReceivedPeerAudioFrame += (id, audioFrame) => {
|
|
73
|
+
if (!PeerOutputs.ContainsKey(id))
|
|
74
|
+
return;
|
|
75
|
+
|
|
76
|
+
if (OutputFilters != null) {
|
|
77
|
+
foreach (var filter in OutputFilters)
|
|
78
|
+
audioFrame = filter.Run(audioFrame);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
PeerOutputs[id].Feed(audioFrame);
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
IAudioInput input;
|
|
87
|
+
/// <summary>
|
|
88
|
+
/// The <see cref="IAudioInput"/> that's used for sourcing outgoing audio
|
|
89
|
+
/// </summary>
|
|
90
|
+
public IAudioInput Input {
|
|
91
|
+
get => input;
|
|
92
|
+
set {
|
|
93
|
+
if(input != null)
|
|
94
|
+
input.Dispose();
|
|
95
|
+
input = value;
|
|
96
|
+
input.OnFrameReady += frame => {
|
|
97
|
+
if (InputFilters != null) {
|
|
98
|
+
foreach (var filter in InputFilters)
|
|
99
|
+
frame = filter.Run(frame);
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
Client.SendAudioFrame(frame);
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
IAudioOutputFactory outputFactory;
|
|
108
|
+
/// <summary>
|
|
109
|
+
/// The <see cref="IAudioOutputFactory"/> that creates the <see cref="IAudioOutput"/> of peers
|
|
110
|
+
/// </summary>
|
|
111
|
+
public IAudioOutputFactory OutputFactory {
|
|
112
|
+
get => outputFactory;
|
|
113
|
+
set {
|
|
114
|
+
outputFactory = value;
|
|
115
|
+
|
|
116
|
+
foreach (var output in PeerOutputs)
|
|
117
|
+
output.Value.Dispose();
|
|
118
|
+
PeerOutputs.Clear();
|
|
119
|
+
|
|
120
|
+
foreach (var id in Client.PeerIDs) {
|
|
121
|
+
try {
|
|
122
|
+
var output = outputFactory.Create();
|
|
123
|
+
PeerOutputs.Add(id, output);
|
|
124
|
+
}
|
|
125
|
+
catch (Exception e) {
|
|
126
|
+
Debug.LogException(e);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
public void Dispose() {
|
|
133
|
+
Client.Dispose();
|
|
134
|
+
Input.Dispose();
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
using System.IO;
|
|
2
|
+
using System.IO.Compression;
|
|
3
|
+
using System.Net.Sockets;
|
|
4
|
+
using System.Net;
|
|
5
|
+
using System.Runtime.Serialization.Formatters.Binary;
|
|
6
|
+
using System;
|
|
7
|
+
using UnityEngine;
|
|
8
|
+
|
|
9
|
+
namespace Adrenak.UniVoice {
|
|
10
|
+
public class Utils {
|
|
11
|
+
public class Bytes {
|
|
12
|
+
public static byte[] FloatsToBytes(float[] floats) {
|
|
13
|
+
int byteCount = sizeof(float) * floats.Length;
|
|
14
|
+
byte[] byteArray = new byte[byteCount];
|
|
15
|
+
|
|
16
|
+
Buffer.BlockCopy(floats, 0, byteArray, 0, byteCount);
|
|
17
|
+
|
|
18
|
+
return byteArray;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
public static float[] BytesToFloats(byte[] bytes) {
|
|
22
|
+
int floatCount = bytes.Length / sizeof(float);
|
|
23
|
+
float[] floatArray = new float[floatCount];
|
|
24
|
+
|
|
25
|
+
Buffer.BlockCopy(bytes, 0, floatArray, 0, bytes.Length);
|
|
26
|
+
|
|
27
|
+
return floatArray;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
public static class Audio {
|
|
32
|
+
static float[] audioF;
|
|
33
|
+
static float sumOfSquares;
|
|
34
|
+
public static float CalculateRMS(byte[] audio) {
|
|
35
|
+
audioF = Bytes.BytesToFloats(audio);
|
|
36
|
+
|
|
37
|
+
foreach(var x in audioF)
|
|
38
|
+
sumOfSquares += x * x;
|
|
39
|
+
return Mathf.Sqrt(sumOfSquares / audioF.Length);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
public static class Network {
|
|
44
|
+
public static string LocalIPv4Address {
|
|
45
|
+
get {
|
|
46
|
+
using (Socket socket = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, 0)) {
|
|
47
|
+
socket.Connect("8.8.8.8", 65530);
|
|
48
|
+
IPEndPoint endPoint = socket.LocalEndPoint as IPEndPoint;
|
|
49
|
+
return endPoint.Address.ToString();
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
using System;
|
|
2
|
+
|
|
3
|
+
using UnityEngine;
|
|
4
|
+
|
|
5
|
+
namespace Adrenak.UniVoice.Filters {
|
|
6
|
+
/// <summary>
|
|
7
|
+
/// A filter that applies Gaussian blur over audio data to smoothen it.
|
|
8
|
+
/// This is somewhat effective in removing noise from the audio.
|
|
9
|
+
/// </summary>
|
|
10
|
+
public class GaussianAudioBlur : IAudioFilter {
|
|
11
|
+
readonly float sigma;
|
|
12
|
+
readonly int range;
|
|
13
|
+
byte[] lastInput;
|
|
14
|
+
|
|
15
|
+
public GaussianAudioBlur(float sigma = 2, int range = 2) {
|
|
16
|
+
this.sigma = sigma;
|
|
17
|
+
this.range = range;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
public AudioFrame Run(AudioFrame frame) {
|
|
21
|
+
var input = frame.samples;
|
|
22
|
+
if (input == null || input.Length == 0) {
|
|
23
|
+
frame.samples = null;
|
|
24
|
+
return frame;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// If this is the first audio input we've received, we simply apply the gaussian filter
|
|
28
|
+
// and return the result.
|
|
29
|
+
if (lastInput == null) {
|
|
30
|
+
lastInput = input;
|
|
31
|
+
frame.samples = Utils.Bytes.FloatsToBytes(
|
|
32
|
+
ApplyGaussianFilter(Utils.Bytes.BytesToFloats(input))
|
|
33
|
+
);
|
|
34
|
+
return frame;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Else, if we've had some input before, we also consider the previously processed
|
|
38
|
+
// audio. We make an array that has both the previous and the current input, smoothen
|
|
39
|
+
// it, and then return the second half of the array. This reducing jittering by making
|
|
40
|
+
// the smoothing a little more seamless.
|
|
41
|
+
else {
|
|
42
|
+
// Create an all input, that also has the input from the last time this filter ran.
|
|
43
|
+
byte[] allInput = new byte[lastInput.Length + input.Length];
|
|
44
|
+
Buffer.BlockCopy(lastInput, 0, allInput, 0, lastInput.Length);
|
|
45
|
+
Buffer.BlockCopy(input, 0, allInput, lastInput.Length, input.Length);
|
|
46
|
+
|
|
47
|
+
// smoothen all input
|
|
48
|
+
byte[] allInputSmooth = Utils.Bytes.FloatsToBytes(
|
|
49
|
+
ApplyGaussianFilter(Utils.Bytes.BytesToFloats(allInput))
|
|
50
|
+
);
|
|
51
|
+
|
|
52
|
+
// get the second half of the smoothened values
|
|
53
|
+
byte[] result = new byte[input.Length];
|
|
54
|
+
Buffer.BlockCopy(allInputSmooth, lastInput.Length, result, 0, input.Length);
|
|
55
|
+
|
|
56
|
+
lastInput = input;
|
|
57
|
+
frame.samples = result;
|
|
58
|
+
return frame;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
float[] ApplyGaussianFilter(float[] inputArray) {
|
|
63
|
+
int length = inputArray.Length;
|
|
64
|
+
float[] smoothedArray = new float[length];
|
|
65
|
+
|
|
66
|
+
for (int i = 0; i < length; i++) {
|
|
67
|
+
float sum = 0.0f;
|
|
68
|
+
float weightSum = 0.0f;
|
|
69
|
+
|
|
70
|
+
for (int j = -range; j <= range; j++) {
|
|
71
|
+
int index = i + j;
|
|
72
|
+
if (index >= 0 && index < length) {
|
|
73
|
+
float weight = Gaussian(j, sigma);
|
|
74
|
+
sum += inputArray[index] * weight;
|
|
75
|
+
weightSum += weight;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
smoothedArray[i] = sum / weightSum;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return smoothedArray;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
float Gaussian(int x, float sigma) {
|
|
86
|
+
return (float)Mathf.Exp(-(x * x) / (2 * sigma * sigma))
|
|
87
|
+
/ ((float)Mathf.Sqrt(2 * Mathf.PI) * sigma);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
using Adrenak.UnityOpus;
|
|
2
|
+
|
|
3
|
+
using System;
|
|
4
|
+
|
|
5
|
+
/*
|
|
6
|
+
* Opus encoding and decoding are VERY important for any real world use of UniVoice as without
|
|
7
|
+
* encoding the size of audio data is much (over 10x) larger.
|
|
8
|
+
* For more info see https://www.github.com/adrenak/UnityOpus
|
|
9
|
+
*/
|
|
10
|
+
namespace Adrenak.UniVoice.Filters {
|
|
11
|
+
/// <summary>
|
|
12
|
+
/// A filter that encodes audio using Opus. Use this as an output filter
|
|
13
|
+
/// to reduce the size of outgoing client audio
|
|
14
|
+
/// </summary>
|
|
15
|
+
public class OpusEncodeFilter : IAudioFilter {
|
|
16
|
+
Encoder encoder;
|
|
17
|
+
byte[] outputBuffer;
|
|
18
|
+
|
|
19
|
+
public OpusEncodeFilter(Encoder encoder) {
|
|
20
|
+
this.encoder = encoder;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
public AudioFrame Run(AudioFrame input) {
|
|
24
|
+
if(outputBuffer == null)
|
|
25
|
+
outputBuffer = new byte[input.samples.Length * 4];
|
|
26
|
+
else if(input.samples.Length != outputBuffer.Length * 4)
|
|
27
|
+
outputBuffer = new byte[input.samples.Length * 4];
|
|
28
|
+
|
|
29
|
+
int encodeResult = encoder.Encode(Utils.Bytes.BytesToFloats(input.samples), outputBuffer);
|
|
30
|
+
if (encodeResult > 0) {
|
|
31
|
+
byte[] encodedBytes = new byte[encodeResult];
|
|
32
|
+
Array.Copy(outputBuffer, encodedBytes, encodedBytes.Length);
|
|
33
|
+
return new AudioFrame {
|
|
34
|
+
timestamp = 0,
|
|
35
|
+
frequency = input.frequency,
|
|
36
|
+
channelCount = input.channelCount,
|
|
37
|
+
samples = encodedBytes
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
else {
|
|
41
|
+
return new AudioFrame {
|
|
42
|
+
timestamp = 0,
|
|
43
|
+
frequency = input.frequency,
|
|
44
|
+
channelCount = input.channelCount,
|
|
45
|
+
samples = null
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/// <summary>
|
|
52
|
+
/// Decodes Opus encoded audio. Use this as a filter for incoming client audio.
|
|
53
|
+
/// </summary>
|
|
54
|
+
public class OpusDecodeFilter : IAudioFilter {
|
|
55
|
+
Decoder decoder;
|
|
56
|
+
float[] outputBuffer;
|
|
57
|
+
|
|
58
|
+
public OpusDecodeFilter(Decoder decoder, int outputBufferLength = 48000) {
|
|
59
|
+
this.decoder = decoder;
|
|
60
|
+
outputBuffer = new float[outputBufferLength];
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
public AudioFrame Run(AudioFrame input) {
|
|
64
|
+
var decodeResult = decoder.Decode(input.samples, input.samples.Length, outputBuffer);
|
|
65
|
+
if(decodeResult > 0) {
|
|
66
|
+
float[] decoded = new float[decodeResult];
|
|
67
|
+
Array.Copy(outputBuffer, decoded, decoded.Length);
|
|
68
|
+
return new AudioFrame {
|
|
69
|
+
timestamp = 0,
|
|
70
|
+
frequency = input.frequency,
|
|
71
|
+
channelCount = input.channelCount,
|
|
72
|
+
samples = Utils.Bytes.FloatsToBytes(decoded)
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
else {
|
|
76
|
+
return new AudioFrame {
|
|
77
|
+
timestamp = 0,
|
|
78
|
+
frequency = input.frequency,
|
|
79
|
+
channelCount = input.channelCount,
|
|
80
|
+
samples = null
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
using System;
|
|
2
|
+
|
|
3
|
+
using Adrenak.UniMic;
|
|
4
|
+
|
|
5
|
+
using UnityEngine;
|
|
6
|
+
|
|
7
|
+
namespace Adrenak.UniVoice.Inputs {
|
|
8
|
+
/// <summary>
|
|
9
|
+
/// An <see cref="IAudioInput"/> implementation based on UniMic.
|
|
10
|
+
/// For more on UniMic, visit https://www.github.com/adrenak/unimic
|
|
11
|
+
/// </summary>
|
|
12
|
+
public class UniMicInput : IAudioInput {
|
|
13
|
+
const string TAG = "UniMicInput";
|
|
14
|
+
|
|
15
|
+
public event Action<AudioFrame> OnFrameReady;
|
|
16
|
+
|
|
17
|
+
public Mic.Device Device { get; private set; }
|
|
18
|
+
|
|
19
|
+
public UniMicInput(Mic.Device device) {
|
|
20
|
+
Device = device;
|
|
21
|
+
device.OnFrameCollected += OnFrameCollected;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
private void OnFrameCollected(int frequency, int channels, float[] samples) {
|
|
25
|
+
var frame = new AudioFrame {
|
|
26
|
+
timestamp = 0,
|
|
27
|
+
frequency = frequency,
|
|
28
|
+
channelCount = channels,
|
|
29
|
+
samples = Utils.Bytes.FloatsToBytes(samples)
|
|
30
|
+
};
|
|
31
|
+
OnFrameReady?.Invoke(frame);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
public void Dispose() {
|
|
35
|
+
Device.OnFrameCollected -= OnFrameCollected;
|
|
36
|
+
Debug.unityLogger.Log(TAG, "Disposed");
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|