irt_ruby 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/irt_ruby/rasch_model.rb +123 -33
- data/lib/irt_ruby/three_parameter_model.rb +154 -41
- data/lib/irt_ruby/two_parameter_model.rb +131 -40
- data/lib/irt_ruby/version.rb +1 -1
- data/lib/irt_ruby.rb +1 -0
- metadata +26 -10
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: eb346c93f7beebb572f5c80663276c1f0285f55fe3382b0c6dc43ee17e3c2d04
|
4
|
+
data.tar.gz: 9fcc3c15cd54e969ffc531fd004d5911eed71234c1218a643ac137aca3ae85ea
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1c78192318e3fb78c9ee9c33919fb5dced7d09e4c38af7f56b9f508c1d5e26fc4aab9665e4acdd12ce711dedae12299ef054182ac9d2a25458a5561c9d31a501
|
7
|
+
data.tar.gz: 1577007bae07c567fc05b6d2ad6037b3264d0ec2a23086eabe317ec332d896416d94dd257eb18a4e63d9b7ba7a21356cf3a8579bd1a419d849946280934a8f58
|
data/lib/irt_ruby/rasch_model.rb
CHANGED
@@ -1,58 +1,148 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "matrix"
|
4
|
-
|
5
3
|
module IrtRuby
|
6
|
-
# A class representing the Rasch model for Item Response Theory.
|
4
|
+
# A class representing the Rasch model for Item Response Theory (ability - difficulty).
|
5
|
+
# Incorporates:
|
6
|
+
# - Adaptive learning rate
|
7
|
+
# - Missing data handling (skip nil)
|
8
|
+
# - Multiple convergence checks (log-likelihood + parameter updates)
|
7
9
|
class RaschModel
|
8
|
-
|
10
|
+
MISSING_STRATEGIES = %i[ignore treat_as_incorrect treat_as_correct].freeze
|
11
|
+
|
12
|
+
def initialize(data,
|
13
|
+
max_iter: 1000,
|
14
|
+
tolerance: 1e-6,
|
15
|
+
param_tolerance: 1e-6,
|
16
|
+
learning_rate: 0.01,
|
17
|
+
decay_factor: 0.5,
|
18
|
+
missing_strategy: :ignore)
|
19
|
+
# data: A Matrix or array-of-arrays of responses (0/1 or nil for missing).
|
20
|
+
# missing_strategy: :ignore (skip), :treat_as_incorrect, :treat_as_correct
|
21
|
+
|
9
22
|
@data = data
|
10
|
-
@
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
23
|
+
@data_array = data.to_a
|
24
|
+
num_rows = @data_array.size
|
25
|
+
num_cols = @data_array.first.size
|
26
|
+
|
27
|
+
raise ArgumentError, "missing_strategy must be one of #{MISSING_STRATEGIES}" unless MISSING_STRATEGIES.include?(missing_strategy)
|
28
|
+
|
29
|
+
@missing_strategy = missing_strategy
|
30
|
+
|
31
|
+
# Initialize parameters near zero
|
32
|
+
@abilities = Array.new(num_rows) { rand(-0.25..0.25) }
|
33
|
+
@difficulties = Array.new(num_cols) { rand(-0.25..0.25) }
|
34
|
+
|
35
|
+
@max_iter = max_iter
|
36
|
+
@tolerance = tolerance
|
37
|
+
@param_tolerance = param_tolerance
|
38
|
+
@learning_rate = learning_rate
|
39
|
+
@decay_factor = decay_factor
|
15
40
|
end
|
16
41
|
|
17
|
-
# Sigmoid function to calculate probability
|
18
42
|
def sigmoid(x)
|
19
43
|
1.0 / (1.0 + Math.exp(-x))
|
20
44
|
end
|
21
45
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
@
|
26
|
-
|
46
|
+
def resolve_missing(resp)
|
47
|
+
return [resp, false] unless resp.nil?
|
48
|
+
|
49
|
+
case @missing_strategy
|
50
|
+
when :ignore
|
51
|
+
[nil, true]
|
52
|
+
when :treat_as_incorrect
|
53
|
+
[0, false]
|
54
|
+
when :treat_as_correct
|
55
|
+
[1, false]
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def log_likelihood
|
60
|
+
total_ll = 0.0
|
61
|
+
@data_array.each_with_index do |row, i|
|
62
|
+
row.each_with_index do |resp, j|
|
63
|
+
value, skip = resolve_missing(resp)
|
64
|
+
next if skip
|
65
|
+
|
27
66
|
prob = sigmoid(@abilities[i] - @difficulties[j])
|
28
|
-
|
67
|
+
total_ll += if value == 1
|
68
|
+
Math.log(prob + 1e-15)
|
69
|
+
else
|
70
|
+
Math.log((1 - prob) + 1e-15)
|
71
|
+
end
|
29
72
|
end
|
30
73
|
end
|
31
|
-
|
74
|
+
total_ll
|
32
75
|
end
|
33
76
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
77
|
+
def compute_gradient
|
78
|
+
grad_abilities = Array.new(@abilities.size, 0.0)
|
79
|
+
grad_difficulties = Array.new(@difficulties.size, 0.0)
|
80
|
+
|
81
|
+
@data_array.each_with_index do |row, i|
|
82
|
+
row.each_with_index do |resp, j|
|
83
|
+
value, skip = resolve_missing(resp)
|
84
|
+
next if skip
|
85
|
+
|
86
|
+
prob = sigmoid(@abilities[i] - @difficulties[j])
|
87
|
+
error = value - prob
|
88
|
+
|
89
|
+
grad_abilities[i] += error
|
90
|
+
grad_difficulties[j] -= error
|
45
91
|
end
|
46
|
-
|
47
|
-
break if (last_likelihood - current_likelihood).abs < @tolerance
|
92
|
+
end
|
48
93
|
|
49
|
-
|
94
|
+
[grad_abilities, grad_difficulties]
|
95
|
+
end
|
96
|
+
|
97
|
+
def apply_gradient_update(grad_abilities, grad_difficulties)
|
98
|
+
old_abilities = @abilities.dup
|
99
|
+
old_difficulties = @difficulties.dup
|
100
|
+
|
101
|
+
@abilities.each_index do |i|
|
102
|
+
@abilities[i] += @learning_rate * grad_abilities[i]
|
103
|
+
end
|
104
|
+
|
105
|
+
@difficulties.each_index do |j|
|
106
|
+
@difficulties[j] += @learning_rate * grad_difficulties[j]
|
107
|
+
end
|
108
|
+
|
109
|
+
[old_abilities, old_difficulties]
|
110
|
+
end
|
111
|
+
|
112
|
+
def average_param_update(old_abilities, old_difficulties)
|
113
|
+
deltas = []
|
114
|
+
@abilities.each_with_index do |a, i|
|
115
|
+
deltas << (a - old_abilities[i]).abs
|
116
|
+
end
|
117
|
+
@difficulties.each_with_index do |d, j|
|
118
|
+
deltas << (d - old_difficulties[j]).abs
|
50
119
|
end
|
120
|
+
deltas.sum / deltas.size
|
51
121
|
end
|
52
122
|
|
53
|
-
# Fit the model to the data
|
54
123
|
def fit
|
55
|
-
|
124
|
+
prev_ll = log_likelihood
|
125
|
+
|
126
|
+
@max_iter.times do
|
127
|
+
grad_abilities, grad_difficulties = compute_gradient
|
128
|
+
|
129
|
+
old_a, old_d = apply_gradient_update(grad_abilities, grad_difficulties)
|
130
|
+
|
131
|
+
current_ll = log_likelihood
|
132
|
+
param_delta = average_param_update(old_a, old_d)
|
133
|
+
|
134
|
+
if current_ll < prev_ll
|
135
|
+
@abilities = old_a
|
136
|
+
@difficulties = old_d
|
137
|
+
@learning_rate *= @decay_factor
|
138
|
+
else
|
139
|
+
ll_diff = (current_ll - prev_ll).abs
|
140
|
+
break if ll_diff < @tolerance && param_delta < @param_tolerance
|
141
|
+
|
142
|
+
prev_ll = current_ll
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
56
146
|
{ abilities: @abilities, difficulties: @difficulties }
|
57
147
|
end
|
58
148
|
end
|
@@ -1,68 +1,181 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "matrix"
|
4
|
-
|
5
3
|
module IrtRuby
|
6
|
-
# A class representing the Three-Parameter model for Item Response Theory.
|
4
|
+
# A class representing the Three-Parameter model (3PL) for Item Response Theory.
|
5
|
+
# Incorporates:
|
6
|
+
# - Adaptive learning rate
|
7
|
+
# - Missing data handling
|
8
|
+
# - Parameter clamping for discrimination, guessing
|
9
|
+
# - Multiple convergence checks
|
10
|
+
# - Separate gradient calculation & updates
|
7
11
|
class ThreeParameterModel
|
8
|
-
|
12
|
+
MISSING_STRATEGIES = %i[ignore treat_as_incorrect treat_as_correct].freeze
|
13
|
+
|
14
|
+
def initialize(data,
|
15
|
+
max_iter: 1000,
|
16
|
+
tolerance: 1e-6,
|
17
|
+
param_tolerance: 1e-6,
|
18
|
+
learning_rate: 0.01,
|
19
|
+
decay_factor: 0.5,
|
20
|
+
missing_strategy: :ignore)
|
9
21
|
@data = data
|
10
|
-
@
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
@
|
22
|
+
@data_array = data.to_a
|
23
|
+
num_rows = @data_array.size
|
24
|
+
num_cols = @data_array.first.size
|
25
|
+
|
26
|
+
raise ArgumentError, "missing_strategy must be one of #{MISSING_STRATEGIES}" unless MISSING_STRATEGIES.include?(missing_strategy)
|
27
|
+
|
28
|
+
@missing_strategy = missing_strategy
|
29
|
+
|
30
|
+
# Initialize parameters
|
31
|
+
@abilities = Array.new(num_rows) { rand(-0.25..0.25) }
|
32
|
+
@difficulties = Array.new(num_cols) { rand(-0.25..0.25) }
|
33
|
+
@discriminations = Array.new(num_cols) { rand(0.5..1.5) }
|
34
|
+
@guessings = Array.new(num_cols) { rand(0.0..0.3) }
|
35
|
+
|
36
|
+
@max_iter = max_iter
|
37
|
+
@tolerance = tolerance
|
38
|
+
@param_tolerance = param_tolerance
|
39
|
+
@learning_rate = learning_rate
|
40
|
+
@decay_factor = decay_factor
|
17
41
|
end
|
18
42
|
|
19
|
-
# Sigmoid function to calculate probability
|
20
43
|
def sigmoid(x)
|
21
44
|
1.0 / (1.0 + Math.exp(-x))
|
22
45
|
end
|
23
46
|
|
24
|
-
# Probability
|
47
|
+
# Probability for the 3PL model: c + (1-c)*sigmoid(a*(θ - b))
|
25
48
|
def probability(theta, a, b, c)
|
26
|
-
c + (1 - c) * sigmoid(a * (theta - b))
|
49
|
+
c + (1.0 - c) * sigmoid(a * (theta - b))
|
50
|
+
end
|
51
|
+
|
52
|
+
def resolve_missing(resp)
|
53
|
+
return [resp, false] unless resp.nil?
|
54
|
+
|
55
|
+
case @missing_strategy
|
56
|
+
when :ignore
|
57
|
+
[nil, true]
|
58
|
+
when :treat_as_incorrect
|
59
|
+
[0, false]
|
60
|
+
when :treat_as_correct
|
61
|
+
[1, false]
|
62
|
+
end
|
27
63
|
end
|
28
64
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
65
|
+
def log_likelihood
|
66
|
+
ll = 0.0
|
67
|
+
@data_array.each_with_index do |row, i|
|
68
|
+
row.each_with_index do |resp, j|
|
69
|
+
value, skip = resolve_missing(resp)
|
70
|
+
next if skip
|
71
|
+
|
72
|
+
prob = probability(@abilities[i],
|
73
|
+
@discriminations[j],
|
74
|
+
@difficulties[j],
|
75
|
+
@guessings[j])
|
76
|
+
|
77
|
+
ll += if value == 1
|
78
|
+
Math.log(prob + 1e-15)
|
79
|
+
else
|
80
|
+
Math.log((1 - prob) + 1e-15)
|
81
|
+
end
|
36
82
|
end
|
37
83
|
end
|
38
|
-
|
84
|
+
ll
|
39
85
|
end
|
40
86
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
@
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
87
|
+
def compute_gradient
|
88
|
+
grad_abilities = Array.new(@abilities.size, 0.0)
|
89
|
+
grad_difficulties = Array.new(@difficulties.size, 0.0)
|
90
|
+
grad_discriminations = Array.new(@discriminations.size, 0.0)
|
91
|
+
grad_guessings = Array.new(@guessings.size, 0.0)
|
92
|
+
|
93
|
+
@data_array.each_with_index do |row, i|
|
94
|
+
row.each_with_index do |resp, j|
|
95
|
+
value, skip = resolve_missing(resp)
|
96
|
+
next if skip
|
97
|
+
|
98
|
+
theta = @abilities[i]
|
99
|
+
a = @discriminations[j]
|
100
|
+
b = @difficulties[j]
|
101
|
+
c = @guessings[j]
|
102
|
+
|
103
|
+
prob = probability(theta, a, b, c)
|
104
|
+
error = value - prob
|
105
|
+
|
106
|
+
grad_abilities[i] += error * a * (1 - c)
|
107
|
+
grad_difficulties[j] -= error * a * (1 - c)
|
108
|
+
grad_discriminations[j] += error * (theta - b) * (1 - c)
|
109
|
+
|
110
|
+
grad_guessings[j] += error * 1.0
|
55
111
|
end
|
56
|
-
|
57
|
-
break if (last_likelihood - current_likelihood).abs < @tolerance
|
112
|
+
end
|
58
113
|
|
59
|
-
|
114
|
+
[grad_abilities, grad_difficulties, grad_discriminations, grad_guessings]
|
115
|
+
end
|
116
|
+
|
117
|
+
def apply_gradient_update(ga, gd, gdisc, gc)
|
118
|
+
old_a = @abilities.dup
|
119
|
+
old_d = @difficulties.dup
|
120
|
+
old_disc = @discriminations.dup
|
121
|
+
old_c = @guessings.dup
|
122
|
+
|
123
|
+
@abilities.each_index do |i|
|
124
|
+
@abilities[i] += @learning_rate * ga[i]
|
60
125
|
end
|
126
|
+
|
127
|
+
@difficulties.each_index do |j|
|
128
|
+
@difficulties[j] += @learning_rate * gd[j]
|
129
|
+
end
|
130
|
+
|
131
|
+
@discriminations.each_index do |j|
|
132
|
+
@discriminations[j] += @learning_rate * gdisc[j]
|
133
|
+
@discriminations[j] = 0.01 if @discriminations[j] < 0.01
|
134
|
+
@discriminations[j] = 5.0 if @discriminations[j] > 5.0
|
135
|
+
end
|
136
|
+
|
137
|
+
@guessings.each_index do |j|
|
138
|
+
@guessings[j] += @learning_rate * gc[j]
|
139
|
+
@guessings[j] = 0.0 if @guessings[j] < 0.0
|
140
|
+
@guessings[j] = 0.35 if @guessings[j] > 0.35
|
141
|
+
end
|
142
|
+
|
143
|
+
[old_a, old_d, old_disc, old_c]
|
144
|
+
end
|
145
|
+
|
146
|
+
def average_param_update(old_a, old_d, old_disc, old_c)
|
147
|
+
deltas = []
|
148
|
+
@abilities.each_with_index { |x, i| deltas << (x - old_a[i]).abs }
|
149
|
+
@difficulties.each_with_index { |x, j| deltas << (x - old_d[j]).abs }
|
150
|
+
@discriminations.each_with_index { |x, j| deltas << (x - old_disc[j]).abs }
|
151
|
+
@guessings.each_with_index { |x, j| deltas << (x - old_c[j]).abs }
|
152
|
+
deltas.sum / deltas.size
|
61
153
|
end
|
62
154
|
|
63
|
-
# Fit the model to the data
|
64
155
|
def fit
|
65
|
-
|
156
|
+
prev_ll = log_likelihood
|
157
|
+
|
158
|
+
@max_iter.times do
|
159
|
+
ga, gd, gdisc, gc = compute_gradient
|
160
|
+
old_a, old_d, old_disc, old_c = apply_gradient_update(ga, gd, gdisc, gc)
|
161
|
+
|
162
|
+
curr_ll = log_likelihood
|
163
|
+
param_delta = average_param_update(old_a, old_d, old_disc, old_c)
|
164
|
+
|
165
|
+
if curr_ll < prev_ll
|
166
|
+
@abilities = old_a
|
167
|
+
@difficulties = old_d
|
168
|
+
@discriminations = old_disc
|
169
|
+
@guessings = old_c
|
170
|
+
@learning_rate *= @decay_factor
|
171
|
+
else
|
172
|
+
ll_diff = (curr_ll - prev_ll).abs
|
173
|
+
break if ll_diff < @tolerance && param_delta < @param_tolerance
|
174
|
+
|
175
|
+
prev_ll = curr_ll
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
66
179
|
{
|
67
180
|
abilities: @abilities,
|
68
181
|
difficulties: @difficulties,
|
@@ -1,65 +1,156 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "matrix"
|
4
|
-
|
5
3
|
module IrtRuby
|
6
|
-
# A class representing the Two-Parameter model for
|
4
|
+
# A class representing the Two-Parameter model (2PL) for IRT.
|
5
|
+
# Incorporates:
|
6
|
+
# - Adaptive learning rate
|
7
|
+
# - Missing data handling
|
8
|
+
# - Parameter clamping for discrimination
|
9
|
+
# - Multiple convergence checks
|
10
|
+
# - Separate gradient calculation & parameter update
|
7
11
|
class TwoParameterModel
|
8
|
-
|
12
|
+
MISSING_STRATEGIES = %i[ignore treat_as_incorrect treat_as_correct].freeze
|
13
|
+
|
14
|
+
def initialize(data, max_iter: 1000, tolerance: 1e-6, param_tolerance: 1e-6,
|
15
|
+
learning_rate: 0.01, decay_factor: 0.5,
|
16
|
+
missing_strategy: :ignore)
|
9
17
|
@data = data
|
10
|
-
@
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
18
|
+
@data_array = data.to_a
|
19
|
+
num_rows = @data_array.size
|
20
|
+
num_cols = @data_array.first.size
|
21
|
+
|
22
|
+
raise ArgumentError, "missing_strategy must be one of #{MISSING_STRATEGIES}" unless MISSING_STRATEGIES.include?(missing_strategy)
|
23
|
+
|
24
|
+
@missing_strategy = missing_strategy
|
25
|
+
|
26
|
+
# Initialize parameters
|
27
|
+
# Typically: ability ~ 0, difficulty ~ 0, discrimination ~ 1
|
28
|
+
@abilities = Array.new(num_rows) { rand(-0.25..0.25) }
|
29
|
+
@difficulties = Array.new(num_cols) { rand(-0.25..0.25) }
|
30
|
+
@discriminations = Array.new(num_cols) { rand(0.5..1.5) }
|
31
|
+
|
32
|
+
@max_iter = max_iter
|
33
|
+
@tolerance = tolerance
|
34
|
+
@param_tolerance = param_tolerance
|
35
|
+
@learning_rate = learning_rate
|
36
|
+
@decay_factor = decay_factor
|
16
37
|
end
|
17
38
|
|
18
|
-
# Sigmoid function
|
19
39
|
def sigmoid(x)
|
20
40
|
1.0 / (1.0 + Math.exp(-x))
|
21
41
|
end
|
22
42
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
@
|
27
|
-
|
43
|
+
def resolve_missing(resp)
|
44
|
+
return [resp, false] unless resp.nil?
|
45
|
+
|
46
|
+
case @missing_strategy
|
47
|
+
when :ignore
|
48
|
+
[nil, true]
|
49
|
+
when :treat_as_incorrect
|
50
|
+
[0, false]
|
51
|
+
when :treat_as_correct
|
52
|
+
[1, false]
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def log_likelihood
|
57
|
+
ll = 0.0
|
58
|
+
@data_array.each_with_index do |row, i|
|
59
|
+
row.each_with_index do |resp, j|
|
60
|
+
value, skip = resolve_missing(resp)
|
61
|
+
next if skip
|
62
|
+
|
28
63
|
prob = sigmoid(@discriminations[j] * (@abilities[i] - @difficulties[j]))
|
29
|
-
if
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
64
|
+
ll += if value == 1
|
65
|
+
Math.log(prob + 1e-15)
|
66
|
+
else
|
67
|
+
Math.log((1 - prob) + 1e-15)
|
68
|
+
end
|
34
69
|
end
|
35
70
|
end
|
36
|
-
|
71
|
+
ll
|
37
72
|
end
|
38
73
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
@
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
74
|
+
def compute_gradient
|
75
|
+
grad_abilities = Array.new(@abilities.size, 0.0)
|
76
|
+
grad_difficulties = Array.new(@difficulties.size, 0.0)
|
77
|
+
grad_discriminations = Array.new(@discriminations.size, 0.0)
|
78
|
+
|
79
|
+
@data_array.each_with_index do |row, i|
|
80
|
+
row.each_with_index do |resp, j|
|
81
|
+
value, skip = resolve_missing(resp)
|
82
|
+
next if skip
|
83
|
+
|
84
|
+
prob = sigmoid(@discriminations[j] * (@abilities[i] - @difficulties[j]))
|
85
|
+
error = value - prob
|
86
|
+
|
87
|
+
grad_abilities[i] += error * @discriminations[j]
|
88
|
+
grad_difficulties[j] -= error * @discriminations[j]
|
89
|
+
grad_discriminations[j] += error * (@abilities[i] - @difficulties[j])
|
51
90
|
end
|
52
|
-
|
53
|
-
|
91
|
+
end
|
92
|
+
|
93
|
+
[grad_abilities, grad_difficulties, grad_discriminations]
|
94
|
+
end
|
95
|
+
|
96
|
+
def apply_gradient_update(ga, gd, gdisc)
|
97
|
+
old_a = @abilities.dup
|
98
|
+
old_d = @difficulties.dup
|
99
|
+
old_disc = @discriminations.dup
|
100
|
+
|
101
|
+
@abilities.each_index do |i|
|
102
|
+
@abilities[i] += @learning_rate * ga[i]
|
103
|
+
end
|
54
104
|
|
55
|
-
|
105
|
+
@difficulties.each_index do |j|
|
106
|
+
@difficulties[j] += @learning_rate * gd[j]
|
56
107
|
end
|
108
|
+
|
109
|
+
@discriminations.each_index do |j|
|
110
|
+
@discriminations[j] += @learning_rate * gdisc[j]
|
111
|
+
@discriminations[j] = 0.01 if @discriminations[j] < 0.01
|
112
|
+
@discriminations[j] = 5.0 if @discriminations[j] > 5.0
|
113
|
+
end
|
114
|
+
|
115
|
+
[old_a, old_d, old_disc]
|
116
|
+
end
|
117
|
+
|
118
|
+
def average_param_update(old_a, old_d, old_disc)
|
119
|
+
deltas = []
|
120
|
+
@abilities.each_with_index { |x, i| deltas << (x - old_a[i]).abs }
|
121
|
+
@difficulties.each_with_index { |x, j| deltas << (x - old_d[j]).abs }
|
122
|
+
@discriminations.each_with_index { |x, j| deltas << (x - old_disc[j]).abs }
|
123
|
+
deltas.sum / deltas.size
|
57
124
|
end
|
58
125
|
|
59
|
-
# Fit the model to the data
|
60
126
|
def fit
|
61
|
-
|
62
|
-
|
127
|
+
prev_ll = log_likelihood
|
128
|
+
|
129
|
+
@max_iter.times do
|
130
|
+
ga, gd, gdisc = compute_gradient
|
131
|
+
old_a, old_d, old_disc = apply_gradient_update(ga, gd, gdisc)
|
132
|
+
|
133
|
+
curr_ll = log_likelihood
|
134
|
+
param_delta = average_param_update(old_a, old_d, old_disc)
|
135
|
+
|
136
|
+
if curr_ll < prev_ll
|
137
|
+
@abilities = old_a
|
138
|
+
@difficulties = old_d
|
139
|
+
@discriminations = old_disc
|
140
|
+
@learning_rate *= @decay_factor
|
141
|
+
else
|
142
|
+
ll_diff = (curr_ll - prev_ll).abs
|
143
|
+
break if ll_diff < @tolerance && param_delta < @param_tolerance
|
144
|
+
|
145
|
+
prev_ll = curr_ll
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
{
|
150
|
+
abilities: @abilities,
|
151
|
+
difficulties: @difficulties,
|
152
|
+
discriminations: @discriminations
|
153
|
+
}
|
63
154
|
end
|
64
155
|
end
|
65
156
|
end
|
data/lib/irt_ruby/version.rb
CHANGED
data/lib/irt_ruby.rb
CHANGED
metadata
CHANGED
@@ -1,15 +1,29 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: irt_ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex Kholodniak
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2025-03-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: matrix
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: 0.4.2
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: 0.4.2
|
13
27
|
- !ruby/object:Gem::Dependency
|
14
28
|
name: bundler
|
15
29
|
requirement: !ruby/object:Gem::Requirement
|
@@ -52,10 +66,12 @@ dependencies:
|
|
52
66
|
- - "~>"
|
53
67
|
- !ruby/object:Gem::Version
|
54
68
|
version: '3.0'
|
55
|
-
description: IrtRuby
|
56
|
-
|
57
|
-
|
58
|
-
|
69
|
+
description: "IrtRuby provides implementations of the Rasch model, Two-Parameter model,
|
70
|
+
\nand Three-Parameter model for Item Response Theory (IRT). \nIt allows you to estimate
|
71
|
+
the abilities of individuals and the difficulties, \ndiscriminations, and guessing
|
72
|
+
parameters of items based on their responses \nto a set of items. This version adds
|
73
|
+
support for multiple missing data \nstrategies (:ignore, :treat_as_incorrect, :treat_as_correct),
|
74
|
+
expanded \ntest coverage, and improved adaptive optimization.\n"
|
59
75
|
email:
|
60
76
|
- alexandrkholodniak@gmail.com
|
61
77
|
executables: []
|
@@ -73,7 +89,7 @@ licenses:
|
|
73
89
|
metadata:
|
74
90
|
homepage_uri: https://github.com/SyntaxSpirits/irt_ruby
|
75
91
|
source_code_uri: https://github.com/SyntaxSpirits/irt_ruby
|
76
|
-
changelog_uri: https://github.com/SyntaxSpirits/irt_ruby/CHANGELOG.md
|
92
|
+
changelog_uri: https://github.com/SyntaxSpirits/irt_ruby/blob/main/CHANGELOG.md
|
77
93
|
post_install_message:
|
78
94
|
rdoc_options: []
|
79
95
|
require_paths:
|
@@ -89,9 +105,9 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
89
105
|
- !ruby/object:Gem::Version
|
90
106
|
version: '0'
|
91
107
|
requirements: []
|
92
|
-
rubygems_version: 3.
|
108
|
+
rubygems_version: 3.5.9
|
93
109
|
signing_key:
|
94
110
|
specification_version: 4
|
95
|
-
summary: A Ruby gem that provides
|
96
|
-
|
111
|
+
summary: A Ruby gem that provides Rasch, 2PL, and 3PL models for Item Response Theory
|
112
|
+
(IRT), with flexible missing data strategies.
|
97
113
|
test_files: []
|