biomQuant 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Mjolnir2307
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,108 @@
1
+ Metadata-Version: 2.4
2
+ Name: biomQuant
3
+ Version: 0.0.1
4
+ Summary: A package consisting of evaluation measures for gesture biometric quantification.
5
+ Home-page: https://github.com/Mjolnir2307/measurementSuite
6
+ Author: Mjolnir2307
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE.txt
9
+ Dynamic: author
10
+ Dynamic: description
11
+ Dynamic: description-content-type
12
+ Dynamic: home-page
13
+ Dynamic: license-file
14
+ Dynamic: summary
15
+
16
+ # biomQuant
17
+
18
+ A package consisting of evaluation measures for gesture biometric quantification.
19
+
20
+ This package provide four measures:
21
+
22
+ 1. Rank deviation ($\hat{r}$)
23
+ 2. Relevance ($\mathcal{R}$)
24
+ 3. Trend match distance ($\Psi$)
25
+ 4. ICGD Score ($C_d$)
26
+
27
+ We combine these in advanced acceptance score
28
+
29
+ ${A_r}^*~=~\frac{\sum_{j=1}^{G}\Bigl( \frac{2^{\lambda\mathcal{R}_j}}{\exp(\kappa*(r_{j}^{\Delta} - r_{j}^{\hat{e}} ))}\Bigr)}{\sqrt{\log_2(2+\nu\Psi)}}*{\exp(-\beta C_d)}$
30
+
31
+ Here, $G$ is the number of gestures. While $r_{j}^{\Delta}$ and $r_{j}^{\hat{e}}$ denote ranks of the $j^{th}$ gesture wrt the estimated biometric estimates and the ground truth respectively. $\lambda,~\kappa,~\nu,~\text{and}~\beta$ are the scaling factors.
32
+
33
+ We further normalize this into $nA_r^*(\Delta)$. Mathematically,
34
+
35
+ $nA_r^*(\Delta)=\frac{A_r^{*}(\Delta)}{A_r^{*}(\hat{e})}$
36
+
37
+ Where, $A_r^*(\Delta)$ and $A_r^*(\hat{e})$ represents $A_r^*$ values for the output DGBQA scores and ground truth, i.e.,
38
+
39
+ $A_r^*(\hat{e})=\sum_{j=1}^{G}2^{\lambda \Bigl[ \gamma\Bigl({\frac{G-{r_{j}^{\hat{e}}}+1}{G}\Bigr){\hat{e}\left[r_{j}^{\hat{e}}\right]}}+\Bigl(\frac{r_{j}^{\hat{e}}}{G}\Bigr)\Bigl(1-\hat{e}\left[r_{j}^{\hat{e}}\right]\Bigr) \Bigr]}$
40
+
41
+ ## Requirements
42
+
43
+ 1. numpy
44
+ 2. sckit-learn
45
+ 3. scipy
46
+ 4. tensorflow $\geq$ 2.8.0
47
+
48
+ ## How to use
49
+
50
+ 1. Advanced Acceptance Score
51
+
52
+ ```python
53
+ from biomQaunt.advancedAcceptance import comp_advancedAcceptance
54
+ nAr_star = comp_advancedAcceptance(biometricParams,
55
+ groundTruth,
56
+ embeddings,
57
+ labels,
58
+ G=numGestures)
59
+ ```
60
+
61
+ 2. Rank deviation
62
+
63
+ ```python
64
+ from biomQaunt.rankDev import rankDev
65
+ r_prime = rankDev(1-groundTruth,
66
+ biometricParams,
67
+ G=numGestures)
68
+ ```
69
+
70
+ 3. Relevance
71
+
72
+ ```python
73
+ import numpy as np
74
+ from biomQaunt.acceptanceScore import compAr
75
+
76
+ def preProcess(inputVec):
77
+ inputVec = (inputVec - np.mean(inputVec))/np.std(inputVec)
78
+ return inputVec/np.linalg.norm(inputVec)
79
+
80
+ relevance = compAr(preProcess(biometricParams),
81
+ preProcess(groundTruth),
82
+ normalizer=False,
83
+ relevance=True)
84
+ ```
85
+
86
+ 4. ICGD score
87
+
88
+ ```python
89
+ import tensorflow as tf
90
+ from biomQaunt.icgd import compICGD
91
+
92
+ def normalisation_layer(x):
93
+ return(tf.math.l2_normalize(x, axis=1, epsilon=1e-12))
94
+
95
+ embeddings = tf.keras.layers.Lambda(normalisation_layer)(embeddings)
96
+
97
+ icgdScore = icgdScore(embeddings.numpy(),
98
+ labels)
99
+ ```
100
+
101
+ 5. Trend match distance
102
+
103
+ ```python
104
+ from biomQaunt.trendMatch import compTrendMatchDist
105
+ psi = rankDev(biometricParams,
106
+ groundTruth,
107
+ G=numGestures)
108
+ ```
@@ -0,0 +1,93 @@
1
+ # biomQuant
2
+
3
+ A package consisting of evaluation measures for gesture biometric quantification.
4
+
5
+ This package provide four measures:
6
+
7
+ 1. Rank deviation ($\hat{r}$)
8
+ 2. Relevance ($\mathcal{R}$)
9
+ 3. Trend match distance ($\Psi$)
10
+ 4. ICGD Score ($C_d$)
11
+
12
+ We combine these in advanced acceptance score
13
+
14
+ ${A_r}^*~=~\frac{\sum_{j=1}^{G}\Bigl( \frac{2^{\lambda\mathcal{R}_j}}{\exp(\kappa*(r_{j}^{\Delta} - r_{j}^{\hat{e}} ))}\Bigr)}{\sqrt{\log_2(2+\nu\Psi)}}*{\exp(-\beta C_d)}$
15
+
16
+ Here, $G$ is the number of gestures. While $r_{j}^{\Delta}$ and $r_{j}^{\hat{e}}$ denote ranks of the $j^{th}$ gesture wrt the estimated biometric estimates and the ground truth respectively. $\lambda,~\kappa,~\nu,~\text{and}~\beta$ are the scaling factors.
17
+
18
+ We further normalize this into $nA_r^*(\Delta)$. Mathematically,
19
+
20
+ $nA_r^*(\Delta)=\frac{A_r^{*}(\Delta)}{A_r^{*}(\hat{e})}$
21
+
22
+ Where, $A_r^*(\Delta)$ and $A_r^*(\hat{e})$ represents $A_r^*$ values for the output DGBQA scores and ground truth, i.e.,
23
+
24
+ $A_r^*(\hat{e})=\sum_{j=1}^{G}2^{\lambda \Bigl[ \gamma\Bigl({\frac{G-{r_{j}^{\hat{e}}}+1}{G}\Bigr){\hat{e}\left[r_{j}^{\hat{e}}\right]}}+\Bigl(\frac{r_{j}^{\hat{e}}}{G}\Bigr)\Bigl(1-\hat{e}\left[r_{j}^{\hat{e}}\right]\Bigr) \Bigr]}$
25
+
26
+ ## Requirements
27
+
28
+ 1. numpy
29
+ 2. sckit-learn
30
+ 3. scipy
31
+ 4. tensorflow $\geq$ 2.8.0
32
+
33
+ ## How to use
34
+
35
+ 1. Advanced Acceptance Score
36
+
37
+ ```python
38
+ from biomQaunt.advancedAcceptance import comp_advancedAcceptance
39
+ nAr_star = comp_advancedAcceptance(biometricParams,
40
+ groundTruth,
41
+ embeddings,
42
+ labels,
43
+ G=numGestures)
44
+ ```
45
+
46
+ 2. Rank deviation
47
+
48
+ ```python
49
+ from biomQaunt.rankDev import rankDev
50
+ r_prime = rankDev(1-groundTruth,
51
+ biometricParams,
52
+ G=numGestures)
53
+ ```
54
+
55
+ 3. Relevance
56
+
57
+ ```python
58
+ import numpy as np
59
+ from biomQaunt.acceptanceScore import compAr
60
+
61
+ def preProcess(inputVec):
62
+ inputVec = (inputVec - np.mean(inputVec))/np.std(inputVec)
63
+ return inputVec/np.linalg.norm(inputVec)
64
+
65
+ relevance = compAr(preProcess(biometricParams),
66
+ preProcess(groundTruth),
67
+ normalizer=False,
68
+ relevance=True)
69
+ ```
70
+
71
+ 4. ICGD score
72
+
73
+ ```python
74
+ import tensorflow as tf
75
+ from biomQaunt.icgd import compICGD
76
+
77
+ def normalisation_layer(x):
78
+ return(tf.math.l2_normalize(x, axis=1, epsilon=1e-12))
79
+
80
+ embeddings = tf.keras.layers.Lambda(normalisation_layer)(embeddings)
81
+
82
+ icgdScore = icgdScore(embeddings.numpy(),
83
+ labels)
84
+ ```
85
+
86
+ 5. Trend match distance
87
+
88
+ ```python
89
+ from biomQaunt.trendMatch import compTrendMatchDist
90
+ psi = rankDev(biometricParams,
91
+ groundTruth,
92
+ G=numGestures)
93
+ ```
File without changes
@@ -0,0 +1,118 @@
1
+ ######## Importing libraries
2
+ import numpy as np
3
+
4
+ ###### Rank Computation
5
+ def rank_compute_acc(rank_vector,val_to_rank):
6
+
7
+ """
8
+ Function to derive rank of a particular value
9
+
10
+ INPUTS:-
11
+ 1) rank_vector: Sorted Vector in which ranking is to be performed
12
+ 2) val_to_rank: Value whoose rank is to be derived
13
+
14
+ OUTPUTS:-
15
+ 1) rank_val: Ranked Value - The best is rank '1', but we use index of '0' all along but just the Relevance Function
16
+ """
17
+ #rank_vector_sort = -np.sort(-rank_vector) # Sorting the Vector: The biggest value is the better-most value
18
+
19
+ for item_idx,item in enumerate(rank_vector): # Iterating over the vector
20
+ if(item == val_to_rank): # Match-Found
21
+ rank_val = item_idx # Rank Assignment
22
+ break
23
+
24
+ return rank_val
25
+
26
+ ####### Acceptance Score
27
+ def compAr(dgbqa,
28
+ e_prime,
29
+ G,
30
+ normalizer,
31
+ relevance,
32
+ lambda_scale=2,
33
+ kappa=1,
34
+ ):
35
+
36
+ """
37
+ Function to compute Acceptance Score: Sum over all Gestures(Relevance/Rank Deviation)
38
+
39
+ INPUTS:-
40
+ 1) dgbqa: Array of unranked but normalized DGBQA score values
41
+ 2) e_prime: Array of unranked but normalized (1 - EER) values
42
+ 3) G: Total number of gestures considered for analysis
43
+ 3) normalizer: If True, Ar for e_prime will be computed. Default value = False
44
+ 4) relevance: If True, relevance value is reuturned. Default value = False
45
+
46
+ OUTPUTS:-
47
+ 1) Ar: Acceptance Score: Sum over all Gestures(Relevance/Rank Deviation)
48
+ """
49
+
50
+ ##### Defining Essentials
51
+ e_prime_sort = -(np.sort(-e_prime)) # Sorted e_prime
52
+ dgbqa_sort = -(np.sort(-dgbqa)) # Sorted DGBQA-Score Values
53
+ dgbqa_re = [] # DGBQA-Scores Ordered as per the e_prime_sort
54
+ arrangement_idx = [] # List to store arrangement orders of e_prime, with the values starting from 0, and ending at G-1
55
+ Ar = 0 # Initializing Acceptance Value as zero
56
+ #lambda_scale = 2 # Scaling factor for relevance
57
+ gamma = 2 # Scaling Factor for the first term in relevance formulation
58
+ #kappa = 1 # Scaling Factor for the Rank-Deviation Penalty
59
+
60
+ if(normalizer == False): # Checking if the Ar is being estimated for the normalizer or not
61
+
62
+ ##### Aranging DGBQA Scores as per e_prime's order
63
+ for idx, val in enumerate(e_prime_sort):
64
+
65
+ for idx_search, val_search in enumerate(e_prime):
66
+
67
+ if(val == val_search):
68
+ arrangement_idx.append(idx_search) # Finding Index on e_prime that matches with e_prime_sort
69
+ dgbqa_re.append(dgbqa[idx_search]) # Appending terms in dgbqa_re as per the order in e_prime_sort: The best rank is the first term
70
+ break
71
+
72
+ #print(arrangement_idx)
73
+
74
+ ##### Ar Estimation
75
+ for r_e_j, dgbqa_r_e_j in enumerate(dgbqa_re): # Iterating over all the gestures in the set
76
+
77
+ if(relevance == False):
78
+
79
+ #### Relevance Gain
80
+ R_j = gamma*((G-(r_e_j+1)+1)/G)*dgbqa_r_e_j + ((r_e_j+1)/G)*(1 - dgbqa_r_e_j)
81
+ R_j = 2**(lambda_scale*R_j)
82
+
83
+ #### Rank Deviation Penalty
84
+ ### Rank Computation
85
+ rank_dgbqa = rank_compute_acc(np.array(dgbqa_sort),dgbqa_r_e_j) # Rank Derived as per DGBQA-Score Estimates
86
+ rank_e_prime = rank_compute_acc(np.array(dgbqa_re),dgbqa_r_e_j) # Rank Derived as per e-prime-sort based sorting of DGBQA-Scores
87
+
88
+ ### Rank Deviation
89
+ rank_dev_j = np.exp(kappa*np.abs(rank_dgbqa - rank_e_prime))
90
+
91
+ #### Ar Estimate for the Current Gesture
92
+ Ar_j = R_j/rank_dev_j
93
+ Ar = Ar + Ar_j # Adding this to the Metric
94
+ #print(r_e_j,R_j,np.abs(rank_dgbqa - rank_e_prime),rank_dev_j,Ar_j)
95
+
96
+ else:
97
+
98
+ R_j = gamma*((G-(r_e_j+1)+1)/G)*dgbqa_r_e_j + ((r_e_j+1)/G)*(1 - dgbqa_r_e_j)
99
+ Ar = Ar + R_j # Adding this to the Metric
100
+
101
+ return Ar
102
+
103
+ else:
104
+
105
+ ##### Ar Estimation
106
+ for r_e_j, eprime_r_e_j in enumerate(e_prime_sort): # Iterating over all the gestures in the set
107
+
108
+ #### Relevance Gain
109
+ R_j = gamma*((G - (r_e_j+1)+1)/G)*eprime_r_e_j + ((r_e_j+1)/G)*(1 - eprime_r_e_j)
110
+ #print(r_e_j,R_j)
111
+ R_j = 2**(lambda_scale*R_j)
112
+ #print(r_e_j,R_j)
113
+
114
+ #### Ar Estimate for the Current Gesture
115
+ Ar_j = R_j
116
+ Ar = Ar + Ar_j # Adding this to the Metric
117
+
118
+ return Ar
@@ -0,0 +1,88 @@
1
+ from rankDev import *
2
+ from icgd import *
3
+ from trendMatch import *
4
+ from acceptanceScore import *
5
+
6
+ def comp_advancedAcceptance(biomQuant,
7
+ e_prime,
8
+ embeddings,
9
+ labels,
10
+ beta=0.75,
11
+ nu=1,
12
+ kappa=1,
13
+ lambdaVal=2,
14
+ G=10,
15
+ normalize=True
16
+ ):
17
+
18
+ """
19
+ Function to return normalized or vanilla advanced acceptance score
20
+
21
+ INPUTS:-
22
+ 1) biomQaunt: Biometric characteristics corresponding to G gestures
23
+ 2) e_prime: Ground truth scores for the G gestures
24
+ 3) embeddings: Embeddings for ICGD score computation
25
+ 4) labels: Gesture labels
26
+ 5) beta: Scaling parameter for ICGD score
27
+ 6) nu: Scaling parameter for trend match distance
28
+ 7) kappa: Scaling parameter for rank deviation
29
+ 8) lambdaVal: Scaling parameter for Relevance
30
+ 9) G: Total number of gestures
31
+ 10) normalize: If True, the nAr* is returned. Default: True
32
+
33
+ OUTPUTS:-
34
+ 1) Ar*: Advanced acceptance score, normalized version.
35
+ """
36
+
37
+ def preProcess(inputVec):
38
+ inputVec = (inputVec - np.mean(inputVec))/np.std(inputVec)
39
+ return inputVec/np.linalg.norm(inputVec)
40
+
41
+ biomQuant = preProcess(biomQuant)
42
+ e_prime = preProcess(e_prime)
43
+
44
+ Ar = compAr(biomQuant,
45
+ e_prime,
46
+ G,
47
+ normalizer=False,
48
+ relevance=False,
49
+ lambda_scale=lambdaVal,
50
+ kappa=kappa) # Acceptance score (relevance + rank deviation)
51
+
52
+ icgdScore = compICGD(embeddings,
53
+ labels) # ICGD score
54
+
55
+ psi = compTrendMatchDist(biomQuant,
56
+ e_prime,
57
+ G) # Trend match distance
58
+
59
+ if normalize==False:
60
+ return Ar*((np.log2(2+nu*psi))**(-1/2))*np.exp(-beta*icgdScore)
61
+
62
+ else:
63
+ Ar_max = compAr(biomQuant,
64
+ e_prime,
65
+ G,
66
+ normalizer=True,
67
+ relevance=False,
68
+ lambda_scale=lambdaVal,
69
+ kappa=kappa) # Ar(e_prime)
70
+
71
+ return (Ar*((np.log2(2+nu*psi))**(-1/2))*np.exp(-beta*icgdScore))/(Ar_max)
72
+
73
+
74
+
75
+ if __name__ == "__main__":
76
+
77
+ biomQuant = np.random.normal(size=(11,))
78
+ e_prime = np.random.normal(size=(11,))
79
+
80
+ embeddings = np.random.normal(size=(100,32))
81
+ labels = np.random.randint(low=0,high=10,size=(100,))
82
+
83
+ print(comp_advancedAcceptance(biomQuant,
84
+ e_prime,
85
+ embeddings,
86
+ labels))
87
+
88
+
@@ -0,0 +1,75 @@
1
+ ######## Importing libraries
2
+ import numpy as np
3
+ import tensorflow as tf
4
+
5
+ ###### CGID and Decorrelated-CGID Score
6
+
7
+ ##### Mask Generation
8
+ #### Positive Mask
9
+ def get_positive_mask(labels):
10
+ """
11
+ Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
12
+ Args:
13
+ labels: tf.int32 `Tensor` with shape [batch_size]
14
+ Returns:
15
+ mask: tf.bool `Tensor` with shape [batch_size, batch_size]
16
+ """
17
+ # Check that i and j are distinct
18
+ indices_equal = tf.cast(tf.eye(labels.shape[0]), tf.bool)
19
+ indices_not_equal = tf.logical_not(indices_equal)
20
+
21
+ # Check if labels[i] == labels[j]
22
+ # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
23
+ labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
24
+
25
+ # Combine the two masks``
26
+ mask = tf.logical_and(indices_not_equal, labels_equal)
27
+
28
+ return mask
29
+
30
+ ###### Negative Mask - Different Mask
31
+ def get_negative_mask(labels):
32
+ """Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
33
+ Args:
34
+ labels: tf.int32 `Tensor` with shape [batch_size]
35
+ Returns:
36
+ mask: tf.bool `Tensor` with shape [batch_size, batch_size]
37
+ """
38
+ # Check if labels[i] != labels[k]
39
+ # Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
40
+ labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
41
+
42
+ mask = tf.logical_not(labels_equal)
43
+
44
+ return mask
45
+
46
+ ####### ICGD
47
+ def compICGD(f_theta,y_hgr):
48
+
49
+ """
50
+ Function to compute ICGD score
51
+
52
+ INPUTS:-
53
+ 1) f_theta: Embeddings of the shape (N,d); N-Total Examples, d: Embedding Dimensions
54
+ 2) y_hgr: HGR labels of the shape (N,)
55
+
56
+ OUTPUTS:-
57
+ 1) icgdScore: Average of masked gram matrix
58
+ """
59
+
60
+ ##### L2-Normalization
61
+ f_theta = tf.math.l2_normalize(f_theta,axis=1)
62
+
63
+ ##### Different Gesture Mask Computation
64
+ delta_bar = get_negative_mask(y_hgr)
65
+ delta_bar = np.reshape(delta_bar.numpy(),(delta_bar.shape[0],delta_bar.shape[1]))
66
+
67
+ ##### Gramian Matrix Formation
68
+ G_bar = tf.cast(tf.linalg.matmul(f_theta,f_theta,transpose_b=True),dtype=tf.float32)
69
+ cgid_score_matrix = np.multiply(delta_bar,G_bar.numpy())
70
+ cgid_score_matrix_mask = (cgid_score_matrix >= 0)
71
+ #plot_heatmap(cgid_score_matrix*cgid_score_matrix_mask,filepath)
72
+
73
+ ##### CGID-Score
74
+ icgdScore = (np.sum(cgid_score_matrix*cgid_score_matrix_mask))/(np.sum(cgid_score_matrix_mask))
75
+ return icgdScore
@@ -0,0 +1,62 @@
1
+ ######## Importing libraries
2
+ import numpy as np
3
+
4
+ ####### Rank-Deviation
5
+ ###### Rank Derivation
6
+ def rank_compute(rank_vector,val_to_rank,reverse_flag):
7
+
8
+ """
9
+ Function to derive rank of a particular value
10
+
11
+ INPUTS:-
12
+ 1) rank_vector: Vector in which ranking is to be performed
13
+ 2) val_to_rank: Value whoose rank is to be derived
14
+ 3) reverse_flag: Flag to signify if the order of sort is to be reversed
15
+
16
+ OUTPUTS:-
17
+ 1) rank_val: Ranked Value - The best is rank '0'
18
+ """
19
+
20
+ if(reverse_flag == True):
21
+ rank_vector_sort = -(np.sort(-rank_vector))
22
+ else:
23
+ rank_vector_sort = np.sort(rank_vector) # Sorting the Vector
24
+ for item_idx,item in enumerate(rank_vector_sort): # Iterating over the vector
25
+ if(item == val_to_rank): # Match-Found
26
+ rank_val = item_idx # Rank Assignment
27
+ break
28
+
29
+ return rank_val
30
+
31
+ ###### Avg. Rank Deviation
32
+ def rankDev(eer_vec,dgbqa_vec,G_Total):
33
+
34
+ """
35
+ Computation of average rank deviation
36
+
37
+ INPUTS:-
38
+ 1) eer_vec: Vector of EER values
39
+ 2) dgbqa_vec: Vector of DGBQA values
40
+ 3) G_Total: Total gestures in the vector
41
+
42
+ OUTPUTS:-
43
+ 1) avg_rank_deviation: Total Deviation in Rank/Total Number of Gestures
44
+ """
45
+
46
+ rank_deviation = 0 # Total Rank Deviation
47
+
48
+ for g_idx in range(G_Total):
49
+
50
+ #### Values of the Current Gesture
51
+ eer_val_curr = eer_vec[g_idx] # EER Value
52
+ dgbqa_val_curr = dgbqa_vec[g_idx] # DGBQA Value
53
+
54
+ #### Rank of the Current Gesture
55
+ eer_rank_curr = rank_compute(eer_vec,eer_val_curr,True) # EER Rank
56
+ dgbqa_rank_curr = rank_compute(dgbqa_vec,dgbqa_val_curr,False) # DGBQA Rank
57
+
58
+ #### Rank Difference
59
+ rank_deviation = rank_deviation + np.abs(eer_rank_curr - dgbqa_rank_curr)
60
+
61
+ avg_rank_deviation = rank_deviation/G_Total # Computing Avg. over all the gestures
62
+ return avg_rank_deviation
@@ -0,0 +1,77 @@
1
+ ######## Importing libraries
2
+ import numpy as np
3
+
4
+ ####### Pattern Match Distance
5
+ def compTrendMatchDist(dgbqa,e_prime,G):
6
+
7
+ """
8
+ Function to compute Pattern-Match Distance
9
+
10
+ INPUTS:-
11
+ 1) dgbqa: Array of unranked but normalized DGBQA score values
12
+ 2) e_prime: Array of unranked but normalized (1 - EER) values
13
+ 3) G: Total number of gestures considered for analysis
14
+
15
+ OUTPUTS:-
16
+ 1) pm_dist: Pattern-Match Distance
17
+ """
18
+ ##### Defining Essentials
19
+ e_prime_sort = -np.sort(-np.array(e_prime)) # Sorting e_prime
20
+ dgbqa_re = [] # DGBQA-Scores Ordered as per the e_prime_sort
21
+ arrangement_idx = [] # List to store arrangement orders of e_prime, with the values starting from 0, and ending at G-1
22
+ pm_dist = 0 # Initializing pm_dist as 0
23
+ dgbqa_re_f = [] # Low-to-High Ranked List of DGBQA-Score sorted as per e_prime
24
+ e_prime_sort_f = [] # Low-to-High Ranked List of e_prime values
25
+
26
+ ##### e_prime in reverse order
27
+ for idx in range(len(e_prime_sort)):
28
+ e_prime_sort_f.append(e_prime_sort[len(e_prime_sort)-idx-1])
29
+
30
+ e_prime_sort_f = np.array(e_prime_sort_f) # Array Formation
31
+
32
+ ##### Aranging DGBQA Scores as per e_prime's order
33
+ for idx, val in enumerate(e_prime_sort):
34
+
35
+ for idx_search, val_search in enumerate(e_prime):
36
+
37
+ if(val == val_search):
38
+ arrangement_idx.append(idx_search) # Finding Index on e_prime that matches with e_prime_sort
39
+ dgbqa_re.append(dgbqa[idx_search]) # Appending terms in dgbqa_re as per the order in e_prime_sort: The best rank is the first term
40
+ break
41
+
42
+ dgbqa_re_b = np.array(dgbqa_re) # Array Formation
43
+
44
+ ##### Aranging DGBQA Scores as per e_prime's order in Low to High
45
+ for idx in range(len(dgbqa_re)):
46
+ dgbqa_re_f.append(dgbqa_re[len(dgbqa_re)-idx-1])
47
+
48
+ dgbqa_re_f = np.array(dgbqa_re_f) # Array Formation
49
+
50
+ ##### Pattern-Matching Distance Estimation
51
+
52
+ #### Slope Computation
53
+ tan_theta_f = dgbqa_re_f[1:] - dgbqa_re_f[:-1] # Forward Movement Slopes
54
+ tan_theta_b = tan_theta_f # Backward Movement Slope is Similar as the Forward Movement Slope, just estimation method differs
55
+
56
+ #### Forward Value Computation
57
+ ### Estimation
58
+ e_bar_f = e_prime_sort_f[:-1] + tan_theta_f # e2_bar_f to eG_bar_f
59
+ eG_bar_f = e_bar_f[-1] # Extracting Last Value
60
+ e_bar_f = e_bar_f[:-1] # Storing Middle 'G-2' Values
61
+
62
+ ### Error Computation
63
+ error_f = np.sum(np.abs(e_bar_f - e_prime_sort_f[1:-1])) + 2*(np.abs(eG_bar_f - e_prime_sort_f[-1]))
64
+
65
+ #### Backward Value Computation
66
+ ### Estimation
67
+ e_bar_b = e_prime_sort_f[1:] - tan_theta_f # e1_bar to e(G-1)_bar_f
68
+ e1_bar_b = e_bar_b[0] # Extracting First Value
69
+ e_bar_b = e_bar_b[1:] # Storing Middle 'G-2' Values
70
+
71
+ ### Error Compuation
72
+ error_b = np.sum(np.abs(e_bar_b - e_prime_sort_f[1:-1])) + 2*(np.abs(e1_bar_b - e_prime_sort_f[0]))
73
+
74
+ #### Pattern-Matching Distance
75
+ pm_dist = 0.5*(error_f + error_b)
76
+
77
+ return pm_dist
@@ -0,0 +1,108 @@
1
+ Metadata-Version: 2.4
2
+ Name: biomQuant
3
+ Version: 0.0.1
4
+ Summary: A package consisting of evaluation measures for gesture biometric quantification.
5
+ Home-page: https://github.com/Mjolnir2307/measurementSuite
6
+ Author: Mjolnir2307
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE.txt
9
+ Dynamic: author
10
+ Dynamic: description
11
+ Dynamic: description-content-type
12
+ Dynamic: home-page
13
+ Dynamic: license-file
14
+ Dynamic: summary
15
+
16
+ # biomQuant
17
+
18
+ A package consisting of evaluation measures for gesture biometric quantification.
19
+
20
+ This package provide four measures:
21
+
22
+ 1. Rank deviation ($\hat{r}$)
23
+ 2. Relevance ($\mathcal{R}$)
24
+ 3. Trend match distance ($\Psi$)
25
+ 4. ICGD Score ($C_d$)
26
+
27
+ We combine these in advanced acceptance score
28
+
29
+ ${A_r}^*~=~\frac{\sum_{j=1}^{G}\Bigl( \frac{2^{\lambda\mathcal{R}_j}}{\exp(\kappa*(r_{j}^{\Delta} - r_{j}^{\hat{e}} ))}\Bigr)}{\sqrt{\log_2(2+\nu\Psi)}}*{\exp(-\beta C_d)}$
30
+
31
+ Here, $G$ is the number of gestures. While $r_{j}^{\Delta}$ and $r_{j}^{\hat{e}}$ denote ranks of the $j^{th}$ gesture wrt the estimated biometric estimates and the ground truth respectively. $\lambda,~\kappa,~\nu,~\text{and}~\beta$ are the scaling factors.
32
+
33
+ We further normalize this into $nA_r^*(\Delta)$. Mathematically,
34
+
35
+ $nA_r^*(\Delta)=\frac{A_r^{*}(\Delta)}{A_r^{*}(\hat{e})}$
36
+
37
+ Where, $A_r^*(\Delta)$ and $A_r^*(\hat{e})$ represents $A_r^*$ values for the output DGBQA scores and ground truth, i.e.,
38
+
39
+ $A_r^*(\hat{e})=\sum_{j=1}^{G}2^{\lambda \Bigl[ \gamma\Bigl({\frac{G-{r_{j}^{\hat{e}}}+1}{G}\Bigr){\hat{e}\left[r_{j}^{\hat{e}}\right]}}+\Bigl(\frac{r_{j}^{\hat{e}}}{G}\Bigr)\Bigl(1-\hat{e}\left[r_{j}^{\hat{e}}\right]\Bigr) \Bigr]}$
40
+
41
+ ## Requirements
42
+
43
+ 1. numpy
44
+ 2. sckit-learn
45
+ 3. scipy
46
+ 4. tensorflow $\geq$ 2.8.0
47
+
48
+ ## How to use
49
+
50
+ 1. Advanced Acceptance Score
51
+
52
+ ```python
53
+ from biomQaunt.advancedAcceptance import comp_advancedAcceptance
54
+ nAr_star = comp_advancedAcceptance(biometricParams,
55
+ groundTruth,
56
+ embeddings,
57
+ labels,
58
+ G=numGestures)
59
+ ```
60
+
61
+ 2. Rank deviation
62
+
63
+ ```python
64
+ from biomQaunt.rankDev import rankDev
65
+ r_prime = rankDev(1-groundTruth,
66
+ biometricParams,
67
+ G=numGestures)
68
+ ```
69
+
70
+ 3. Relevance
71
+
72
+ ```python
73
+ import numpy as np
74
+ from biomQaunt.acceptanceScore import compAr
75
+
76
+ def preProcess(inputVec):
77
+ inputVec = (inputVec - np.mean(inputVec))/np.std(inputVec)
78
+ return inputVec/np.linalg.norm(inputVec)
79
+
80
+ relevance = compAr(preProcess(biometricParams),
81
+ preProcess(groundTruth),
82
+ normalizer=False,
83
+ relevance=True)
84
+ ```
85
+
86
+ 4. ICGD score
87
+
88
+ ```python
89
+ import tensorflow as tf
90
+ from biomQaunt.icgd import compICGD
91
+
92
+ def normalisation_layer(x):
93
+ return(tf.math.l2_normalize(x, axis=1, epsilon=1e-12))
94
+
95
+ embeddings = tf.keras.layers.Lambda(normalisation_layer)(embeddings)
96
+
97
+ icgdScore = icgdScore(embeddings.numpy(),
98
+ labels)
99
+ ```
100
+
101
+ 5. Trend match distance
102
+
103
+ ```python
104
+ from biomQaunt.trendMatch import compTrendMatchDist
105
+ psi = rankDev(biometricParams,
106
+ groundTruth,
107
+ G=numGestures)
108
+ ```
@@ -0,0 +1,13 @@
1
+ LICENSE.txt
2
+ README.md
3
+ setup.py
4
+ biomQuant/__init__.py
5
+ biomQuant/acceptanceScore.py
6
+ biomQuant/advancedAcceptance.py
7
+ biomQuant/icgd.py
8
+ biomQuant/rankDev.py
9
+ biomQuant/trendMatch.py
10
+ biomQuant.egg-info/PKG-INFO
11
+ biomQuant.egg-info/SOURCES.txt
12
+ biomQuant.egg-info/dependency_links.txt
13
+ biomQuant.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ biomQuant
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,19 @@
1
+ from setuptools import setup, find_packages
2
+ from os import path
3
+
4
+ working_directory = path.abspath(path.dirname(__file__))
5
+
6
+ with open(path.join(working_directory, 'README.md'), encoding='utf-8') as f:
7
+ long_description = f.read()
8
+
9
+ setup(
10
+ name='biomQuant', # name of packe which will be package dir below project
11
+ version='0.0.1',
12
+ url='https://github.com/Mjolnir2307/measurementSuite',
13
+ author='Mjolnir2307',
14
+ description='A package consisting of evaluation measures for gesture biometric quantification.',
15
+ long_description=long_description,
16
+ long_description_content_type='text/markdown',
17
+ packages=find_packages(),
18
+ install_requires=[],
19
+ )