hypnosound 1.5.7 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.prettierrc +5 -5
- package/README.md +21 -17
- package/index.html +51 -48
- package/index.js +28 -1
- package/package.json +30 -30
- package/src/audio/bass.js +32 -0
- package/src/audio/energy.js +0 -1
- package/src/audio/index.js +3 -0
- package/src/audio/mids.js +33 -0
- package/src/audio/treble.js +33 -0
- package/src/utils/calculateStats.js +24 -18
package/.prettierrc
CHANGED
package/README.md
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
# hypnosound
|
|
2
|
+
|
|
2
3
|
A little library for extracting audio features, and optionally applying statistics to them.
|
|
3
4
|
|
|
4
5
|
## Usage
|
|
6
|
+
|
|
5
7
|
Check out [index.html](./index.html) for a simple example. You can run it via `npm run start`.
|
|
6
8
|
|
|
7
9
|
You can either use the AudioProcessor, which maintains state and calculates the statistics for you, or use of the functions directly in a functional way. Everything can be used functionally except for spectralFlux, which requires state.
|
|
@@ -9,9 +11,9 @@ You can either use the AudioProcessor, which maintains state and calculates the
|
|
|
9
11
|
### AudioProcessor
|
|
10
12
|
|
|
11
13
|
```javascript
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
14
|
+
import AudioProcessor from 'hypnosound'
|
|
15
|
+
const a = new AudioProcessor()
|
|
16
|
+
console.log({
|
|
15
17
|
energy: a.energy(fft),
|
|
16
18
|
spectralCentroid: a.spectralCentroid(fft),
|
|
17
19
|
spectralCrest: a.spectralCrest(fft),
|
|
@@ -22,22 +24,26 @@ You can either use the AudioProcessor, which maintains state and calculates the
|
|
|
22
24
|
spectralRoughness: a.spectralRoughness(fft),
|
|
23
25
|
spectralSkew: a.spectralSkew(fft),
|
|
24
26
|
spectralSpread: a.spectralSpread(fft),
|
|
25
|
-
|
|
27
|
+
})
|
|
28
|
+
```
|
|
26
29
|
|
|
27
|
-
|
|
30
|
+
Each audio feature comes with statistics, which are calculated automatically. You can access them like so:
|
|
28
31
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
32
|
+
```javascript
|
|
33
|
+
const { value, stats } = a.energy(fft)
|
|
34
|
+
console.log(`the current value for energy is ${value}`)
|
|
35
|
+
console.log(
|
|
36
|
+
`here are some stats: zScore: ${stats.zScore}, normalized: ${stats.normalized}, standardDeviation: ${stats.standardDeviation}, median: ${stats.median}, mean: ${stats.mean}, min: ${stats.min}, max: ${stats.max}`,
|
|
37
|
+
)
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
⚠️ **Warning: Each call to a function will update the statistics for that feature. so I'd recommend saving the result of the function call to a variable and then use that**
|
|
36
41
|
|
|
37
42
|
### Functional
|
|
43
|
+
|
|
38
44
|
```javascript
|
|
39
|
-
import {energy} from 'hypnosound'
|
|
40
|
-
console.log(energy(fft))
|
|
45
|
+
import { energy } from 'hypnosound' // or any other audio feature EXCEPT spectralFlux
|
|
46
|
+
console.log(energy(fft)) // returns the instantaneous energy value.
|
|
41
47
|
```
|
|
42
48
|
|
|
43
49
|
You may want to calculate statistics for the audio features on your own, but still use the functional style.
|
|
@@ -51,7 +57,5 @@ const calculateStats = makeCalculateStats()
|
|
|
51
57
|
const value = spectralCentroid(fft)
|
|
52
58
|
const stats = calculateStats(value) // WARNING: each call to calculateStats will update the state.
|
|
53
59
|
|
|
54
|
-
console.log({value, stats})
|
|
60
|
+
console.log({ value, stats })
|
|
55
61
|
```
|
|
56
|
-
|
|
57
|
-
|
package/index.html
CHANGED
|
@@ -1,55 +1,58 @@
|
|
|
1
|
-
<!
|
|
1
|
+
<!doctype html>
|
|
2
2
|
<html lang="en">
|
|
3
|
-
<head>
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
</head>
|
|
8
|
-
<body>
|
|
9
|
-
<button id="start">Start Listening</button>
|
|
10
|
-
<script type="module">
|
|
11
|
-
import AudioProcessor from './index.js'
|
|
12
|
-
const button = document.getElementById('start')
|
|
13
|
-
button.addEventListener('click', async () => {
|
|
14
|
-
const a = new AudioProcessor()
|
|
15
|
-
try {
|
|
16
|
-
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
17
|
-
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
|
18
|
-
const source = audioContext.createMediaStreamSource(stream);
|
|
19
|
-
const analyser = audioContext.createAnalyser();
|
|
3
|
+
<head>
|
|
4
|
+
<meta charset="UTF-8" />
|
|
5
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
6
|
+
<title>Audio Capture and Analysis</title>
|
|
7
|
+
</head>
|
|
20
8
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
9
|
+
<body>
|
|
10
|
+
<button id="start">Start Listening</button>
|
|
11
|
+
<script type="module">
|
|
12
|
+
import AudioProcessor from './index.js'
|
|
13
|
+
const button = document.getElementById('start')
|
|
14
|
+
button.addEventListener('click', async () => {
|
|
15
|
+
const a = new AudioProcessor()
|
|
16
|
+
try {
|
|
17
|
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
|
|
18
|
+
const audioContext = new (window.AudioContext || window.webkitAudioContext)()
|
|
19
|
+
const source = audioContext.createMediaStreamSource(stream)
|
|
20
|
+
const analyser = audioContext.createAnalyser()
|
|
26
21
|
|
|
27
|
-
|
|
28
|
-
|
|
22
|
+
source.connect(analyser)
|
|
23
|
+
analyser.fftSize = 32768 / 2 // Or whatever size you need
|
|
24
|
+
analyser.smoothingTimeConstant = 0
|
|
25
|
+
const bufferLength = analyser.frequencyBinCount
|
|
26
|
+
const dataArray = new Uint8Array(bufferLength)
|
|
29
27
|
|
|
30
|
-
|
|
28
|
+
const draw = () => {
|
|
29
|
+
requestAnimationFrame(draw)
|
|
31
30
|
|
|
32
|
-
|
|
33
|
-
console.log(
|
|
34
|
-
// energy: a.energy(dataArray),
|
|
35
|
-
// spectralCentroid: a.spectralCentroid(dataArray),
|
|
36
|
-
// spectralCrest: a.spectralCrest(dataArray),
|
|
37
|
-
// spectralEntropy: a.spectralEntropy(dataArray),
|
|
38
|
-
// spectralFlux: a.spectralFlux(dataArray),
|
|
39
|
-
// spectralKurtosis: a.spectralKurtosis(dataArray),
|
|
40
|
-
// spectralRolloff: a.spectralRolloff(dataArray),
|
|
41
|
-
// spectralRoughness: a.spectralRoughness(dataArray),
|
|
42
|
-
// spectralSkew: a.spectralSkew(dataArray),
|
|
43
|
-
// spectralSpread: a.spectralSpread(dataArray),
|
|
44
|
-
a.pitchClass(dataArray),
|
|
45
|
-
);
|
|
46
|
-
};
|
|
31
|
+
analyser.getByteFrequencyData(dataArray)
|
|
47
32
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
33
|
+
// This is where the magic happens, but be careful what you log...
|
|
34
|
+
console.log({
|
|
35
|
+
// energy: a.energy(dataArray),
|
|
36
|
+
// spectralCentroid: a.spectralCentroid(dataArray),
|
|
37
|
+
// spectralCrest: a.spectralCrest(dataArray),
|
|
38
|
+
// spectralEntropy: a.spectralEntropy(dataArray),
|
|
39
|
+
// spectralFlux: a.spectralFlux(dataArray),
|
|
40
|
+
// spectralKurtosis: a.spectralKurtosis(dataArray),
|
|
41
|
+
// spectralRolloff: a.spectralRolloff(dataArray),
|
|
42
|
+
// spectralRoughness: a.spectralRoughness(dataArray),
|
|
43
|
+
// spectralSkew: a.spectralSkew(dataArray),
|
|
44
|
+
// spectralSpread: a.spectralSpread(dataArray),
|
|
45
|
+
bass: a.bass(dataArray),
|
|
46
|
+
mids: a.mids(dataArray),
|
|
47
|
+
treble: a.treble(dataArray),
|
|
48
|
+
})
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
draw()
|
|
52
|
+
} catch (error) {
|
|
53
|
+
console.error('Something went wrong:', error)
|
|
54
|
+
}
|
|
55
|
+
})
|
|
56
|
+
</script>
|
|
57
|
+
</body>
|
|
55
58
|
</html>
|
package/index.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { StatTypes, makeCalculateStats } from './src/utils/calculateStats.js'
|
|
2
|
-
import {applyKaiserWindow} from './src/utils/applyKaiserWindow.js'
|
|
2
|
+
import { applyKaiserWindow } from './src/utils/applyKaiserWindow.js'
|
|
3
3
|
import energy from './src/audio/energy.js'
|
|
4
4
|
import spectralCentroid from './src/audio/spectralCentroid.js'
|
|
5
5
|
import spectralCrest from './src/audio/spectralCrest.js'
|
|
@@ -11,6 +11,9 @@ import spectralRoughness from './src/audio/spectralRoughness.js'
|
|
|
11
11
|
import spectralSkew from './src/audio/spectralSkew.js'
|
|
12
12
|
import spectralSpread from './src/audio/spectralSpread.js'
|
|
13
13
|
import pitchClass from './src/audio/pitchClass.js'
|
|
14
|
+
import bass from './src/audio/bass.js'
|
|
15
|
+
import treble from './src/audio/treble.js'
|
|
16
|
+
import mids from './src/audio/mids.js'
|
|
14
17
|
class AudioProcessor {
|
|
15
18
|
constructor() {
|
|
16
19
|
// aah, state management
|
|
@@ -39,6 +42,12 @@ class AudioProcessor {
|
|
|
39
42
|
this.statCalculators.spectralSpread = makeCalculateStats()
|
|
40
43
|
|
|
41
44
|
this.statCalculators.pitchClass = makeCalculateStats()
|
|
45
|
+
|
|
46
|
+
this.statCalculators.bass = makeCalculateStats()
|
|
47
|
+
|
|
48
|
+
this.statCalculators.treble = makeCalculateStats()
|
|
49
|
+
|
|
50
|
+
this.statCalculators.mids = makeCalculateStats()
|
|
42
51
|
}
|
|
43
52
|
|
|
44
53
|
energy = (fft) => {
|
|
@@ -117,6 +126,24 @@ class AudioProcessor {
|
|
|
117
126
|
const stats = this.statCalculators.pitchClass(value)
|
|
118
127
|
return { value, stats }
|
|
119
128
|
}
|
|
129
|
+
bass = (fft) => {
|
|
130
|
+
const windowedFft = applyKaiserWindow(fft)
|
|
131
|
+
const value = bass(windowedFft)
|
|
132
|
+
const stats = this.statCalculators.bass(value)
|
|
133
|
+
return { value, stats }
|
|
134
|
+
}
|
|
135
|
+
treble = (fft) => {
|
|
136
|
+
const windowedFft = applyKaiserWindow(fft)
|
|
137
|
+
const value = treble(windowedFft)
|
|
138
|
+
const stats = this.statCalculators.treble(value)
|
|
139
|
+
return { value, stats }
|
|
140
|
+
}
|
|
141
|
+
mids = (fft) => {
|
|
142
|
+
const windowedFft = applyKaiserWindow(fft)
|
|
143
|
+
const value = mids(windowedFft)
|
|
144
|
+
const stats = this.statCalculators.mids(value)
|
|
145
|
+
return { value, stats }
|
|
146
|
+
}
|
|
120
147
|
}
|
|
121
148
|
export default AudioProcessor
|
|
122
149
|
export {
|
package/package.json
CHANGED
|
@@ -1,32 +1,32 @@
|
|
|
1
1
|
{
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
2
|
+
"name": "hypnosound",
|
|
3
|
+
"type": "module",
|
|
4
|
+
"version": "1.6.0",
|
|
5
|
+
"description": "A small library for analyzing audio",
|
|
6
|
+
"main": "index.js",
|
|
7
|
+
"scripts": {
|
|
8
|
+
"test": "echo \"Error: no test specified\" && exit 1",
|
|
9
|
+
"start": "live-server .",
|
|
10
|
+
"format": "eslint --fix ."
|
|
11
|
+
},
|
|
12
|
+
"repository": {
|
|
13
|
+
"type": "git",
|
|
14
|
+
"url": "git+https://github.com/hypnodroid/hypnosound.git"
|
|
15
|
+
},
|
|
16
|
+
"keywords": [
|
|
17
|
+
"Audio",
|
|
18
|
+
"sound",
|
|
19
|
+
"music"
|
|
20
|
+
],
|
|
21
|
+
"author": "redaphid <iam@aaronherres.com>",
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"bugs": {
|
|
24
|
+
"url": "https://github.com/hypnodroid/hypnosound/issues"
|
|
25
|
+
},
|
|
26
|
+
"homepage": "https://github.com/hypnodroid/hypnosound#readme",
|
|
27
|
+
"devDependencies": {
|
|
28
|
+
"eslint": "^8.57.0",
|
|
29
|
+
"eslint-plugin-prettier": "^5.1.3",
|
|
30
|
+
"live-server": "^1.2.2"
|
|
31
|
+
}
|
|
32
32
|
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
export default function bass(fft) {
|
|
2
|
+
const sampleRate = 44100
|
|
3
|
+
const totalSamples = fft.length
|
|
4
|
+
return calculateBassPower(fft, sampleRate, totalSamples)
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
function calculateBassPower(fft, sampleRate, totalSamples) {
|
|
8
|
+
const lowerBound = 20 // 20 Hz
|
|
9
|
+
const upperBound = 160 // 160 Hz
|
|
10
|
+
let bassEnergy = 0
|
|
11
|
+
let maxEnergy = 0
|
|
12
|
+
|
|
13
|
+
// Calculate frequency resolution
|
|
14
|
+
const frequencyResolution = sampleRate / totalSamples
|
|
15
|
+
|
|
16
|
+
for (let i = 0; i < fft.length; i++) {
|
|
17
|
+
let frequency = i * frequencyResolution
|
|
18
|
+
let magnitude = Math.abs(fft[i]) / totalSamples
|
|
19
|
+
let power = magnitude * magnitude
|
|
20
|
+
|
|
21
|
+
// Accumulate max energy for normalization
|
|
22
|
+
maxEnergy += power
|
|
23
|
+
|
|
24
|
+
// Isolate and accumulate bass frequencies
|
|
25
|
+
if (frequency >= lowerBound && frequency <= upperBound) {
|
|
26
|
+
bassEnergy += power
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
// Normalize bass energy from 0 to 1
|
|
30
|
+
let normalizedBassPower = bassEnergy / maxEnergy
|
|
31
|
+
return isNaN(normalizedBassPower) ? 0 : normalizedBassPower // Scale by 10 if needed, similar to your original function
|
|
32
|
+
}
|
package/src/audio/energy.js
CHANGED
package/src/audio/index.js
CHANGED
|
@@ -9,3 +9,6 @@ export * as spectralRoughness from './spectralRoughness'
|
|
|
9
9
|
export * as spectralSkew from './spectralSkew'
|
|
10
10
|
export * as spectralSpread from './spectralSpread'
|
|
11
11
|
export * as pitchClass from './pitchClass'
|
|
12
|
+
export * as bass from './bass'
|
|
13
|
+
export * as treble from './treble'
|
|
14
|
+
export * as mids from './mids'
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
export default function mids(fft) {
|
|
2
|
+
const sampleRate = 44100
|
|
3
|
+
const totalSamples = fft.length
|
|
4
|
+
return calculateMidPower(fft, sampleRate, totalSamples)
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
function calculateMidPower(fft, sampleRate, totalSamples) {
|
|
8
|
+
const lowerBound = 400 // 400 Hz
|
|
9
|
+
const upperBound = 4000 // 4000 Hz
|
|
10
|
+
let midEnergy = 0
|
|
11
|
+
let maxEnergy = 0
|
|
12
|
+
|
|
13
|
+
// Calculate frequency resolution
|
|
14
|
+
const frequencyResolution = sampleRate / totalSamples
|
|
15
|
+
|
|
16
|
+
for (let i = 0; i < fft.length; i++) {
|
|
17
|
+
let frequency = i * frequencyResolution
|
|
18
|
+
let magnitude = Math.abs(fft[i]) / totalSamples
|
|
19
|
+
let power = magnitude * magnitude
|
|
20
|
+
|
|
21
|
+
// Accumulate max energy for normalization
|
|
22
|
+
maxEnergy += power
|
|
23
|
+
|
|
24
|
+
// Isolate and accumulate mid frequencies
|
|
25
|
+
if (frequency >= lowerBound && frequency <= upperBound) {
|
|
26
|
+
midEnergy += power
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Normalize mid energy from 0 to 1
|
|
31
|
+
let normalizedMidPower = midEnergy / maxEnergy
|
|
32
|
+
return isNaN(normalizedMidPower) ? 0 : normalizedMidPower // Scale by 10 if needed, similar to your original function
|
|
33
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
export default function treble(fft) {
|
|
2
|
+
const sampleRate = 44100
|
|
3
|
+
const totalSamples = fft.length
|
|
4
|
+
return calculateTreblePower(fft, sampleRate, totalSamples)
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
function calculateTreblePower(fft, sampleRate, totalSamples) {
|
|
8
|
+
const lowerBound = 4000 // 4000 Hz
|
|
9
|
+
const upperBound = 20000 // 20000 Hz, adjust based on your audio context
|
|
10
|
+
let trebleEnergy = 0
|
|
11
|
+
let maxEnergy = 0
|
|
12
|
+
|
|
13
|
+
// Calculate frequency resolution
|
|
14
|
+
const frequencyResolution = sampleRate / totalSamples
|
|
15
|
+
|
|
16
|
+
for (let i = 0; i < fft.length; i++) {
|
|
17
|
+
let frequency = i * frequencyResolution
|
|
18
|
+
let magnitude = Math.abs(fft[i]) / totalSamples
|
|
19
|
+
let power = magnitude * magnitude
|
|
20
|
+
|
|
21
|
+
// Accumulate max energy for normalization
|
|
22
|
+
maxEnergy += power
|
|
23
|
+
|
|
24
|
+
// Isolate and accumulate treble frequencies
|
|
25
|
+
if (frequency >= lowerBound && frequency <= upperBound) {
|
|
26
|
+
trebleEnergy += power
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Normalize treble energy from 0 to 1
|
|
31
|
+
let normalizedTreblePower = trebleEnergy / maxEnergy
|
|
32
|
+
return isNaN(normalizedTreblePower) ? 0 : normalizedTreblePower // Scale by 10 if needed, similar to your original function
|
|
33
|
+
}
|
|
@@ -138,23 +138,29 @@ export function makeCalculateStats(historySize = 500) {
|
|
|
138
138
|
return lowerHalf[0]
|
|
139
139
|
}
|
|
140
140
|
}
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
141
|
+
function erf(x) {
|
|
142
|
+
// Constants
|
|
143
|
+
const a1 = 0.254829592
|
|
144
|
+
const a2 = -0.284496736
|
|
145
|
+
const a3 = 1.421413741
|
|
146
|
+
const a4 = -1.453152027
|
|
147
|
+
const a5 = 1.061405429
|
|
148
|
+
const p = 0.3275911
|
|
149
|
+
|
|
150
|
+
// Save the sign of x
|
|
151
|
+
const sign = x < 0 ? -1 : 1
|
|
152
|
+
x = Math.abs(x)
|
|
153
|
+
|
|
154
|
+
// A&S formula 7.1.26
|
|
155
|
+
const t = 1.0 / (1.0 + p * x)
|
|
156
|
+
const y = 1.0 - ((((a5 * t + a4) * t + a3) * t + a2) * t + a1) * t * Math.exp(-x * x)
|
|
157
|
+
|
|
158
|
+
return sign * y
|
|
146
159
|
}
|
|
147
160
|
|
|
148
|
-
function
|
|
149
|
-
|
|
150
|
-
return 0
|
|
151
|
-
}
|
|
152
|
-
let median = calculateMedian(values)
|
|
153
|
-
let absoluteDeviations = values.map((value) => Math.abs(value - median))
|
|
154
|
-
let medianAbsoluteDeviation = calculateMedian(absoluteDeviations)
|
|
155
|
-
return medianAbsoluteDeviation
|
|
161
|
+
function normalizeZScore(zScore) {
|
|
162
|
+
return 0.5 * (1 + erf(zScore / Math.sqrt(2)))
|
|
156
163
|
}
|
|
157
|
-
|
|
158
164
|
return function calculateStats(value) {
|
|
159
165
|
if (typeof value !== 'number') throw new Error('Input must be a number')
|
|
160
166
|
|
|
@@ -178,12 +184,12 @@ export function makeCalculateStats(historySize = 500) {
|
|
|
178
184
|
let min = minQueue.length ? minQueue[0] : 0
|
|
179
185
|
let max = maxQueue.length ? maxQueue[0] : 0
|
|
180
186
|
let median = calculateMedian()
|
|
181
|
-
let
|
|
182
|
-
|
|
187
|
+
let normalized = queue.length ? (value - min) / (max - min) : 0
|
|
188
|
+
let zScore = variance ? (value - mean) / Math.sqrt(variance) : 0
|
|
183
189
|
return {
|
|
184
190
|
current: value,
|
|
185
|
-
zScore: (
|
|
186
|
-
normalized
|
|
191
|
+
zScore: normalizeZScore(zScore),
|
|
192
|
+
normalized,
|
|
187
193
|
standardDeviation: Math.sqrt(variance),
|
|
188
194
|
median,
|
|
189
195
|
mean,
|