bio-statsample-timeseries 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,124 @@
1
+ #require 'debugger'
2
+ module Statsample
3
+ module ARIMA
4
+ class ARIMA < Statsample::Vector
5
+ include Statsample::TimeSeries
6
+
7
+ def arima(ds, p, i, q)
8
+ #prototype
9
+ if q.zero?
10
+ self.ar(p)
11
+ elsif p.zero?
12
+ self.ma(p)
13
+ end
14
+ end
15
+
16
+ def ar(p)
17
+ #AutoRegressive part of model
18
+ #http://en.wikipedia.org/wiki/Autoregressive_model#Definition
19
+ #For finding parameters(to fit), we will use either Yule-walker
20
+ #or Burg's algorithm(more efficient)
21
+ end
22
+
23
+ def yule_walker()
24
+ #To be implemented
25
+ end
26
+
27
+ def create_vector(arr)
28
+ Statsample::Vector.new(arr, :scale)
29
+ end
30
+
31
+ #tentative AR(p) simulator
32
+ def ar_sim(n, phi, sigma)
33
+ #using random number generator for inclusion of white noise
34
+ err_nor = Distribution::Normal.rng(0, sigma)
35
+ #creating buffer with 10 random values
36
+ buffer = Array.new(10, err_nor.call())
37
+
38
+ x = buffer + Array.new(n, 0)
39
+
40
+ #For now "phi" are the known model parameters
41
+ #later we will obtain it by Yule-walker/Burg
42
+
43
+ #instead of starting from 0, start from 11
44
+ #and later take away buffer values for failsafe
45
+ 11.upto(n+11) do |i|
46
+ if i <= phi.size
47
+ #dependent on previous accumulation of x
48
+ backshifts = create_vector(x[0...i].reverse)
49
+ else
50
+ #dependent on number of phi size/order
51
+ backshifts = create_vector(x[(i - phi.size)...i].reverse)
52
+ end
53
+ parameters = create_vector(phi[0...backshifts.size])
54
+
55
+ summation = (backshifts * parameters).inject(:+)
56
+ x[i] = summation + err_nor.call()
57
+ end
58
+ x - buffer
59
+ end
60
+
61
+ #moving average simulator
62
+ def ma_sim(n, theta, sigma)
63
+ #n is number of observations (eg: 1000)
64
+ #theta are the model parameters containting q values
65
+ #q is the order of MA
66
+ mean = theta.to_ts.mean()
67
+ whitenoise_gen = Distribution::Normal.rng(0, sigma)
68
+ x = Array.new(n, 0)
69
+ q = theta.size
70
+ noise_arr = (n+1).times.map { whitenoise_gen.call() }
71
+
72
+ 1.upto(n) do |i|
73
+ #take care that noise vector doesn't try to index -ve value:
74
+ if i <= q
75
+ noises = create_vector(noise_arr[0..i].reverse)
76
+ else
77
+ noises = create_vector(noise_arr[(i-q)..i].reverse)
78
+ end
79
+ weights = create_vector([1] + theta[0...noises.size - 1])
80
+
81
+ summation = (weights * noises).inject(:+)
82
+ x[i] = mean + summation
83
+ end
84
+ x
85
+ end
86
+
87
+ #arma simulator
88
+ def arma_sim(n, p, q, sigma)
89
+ #represented by :
90
+ #http://upload.wikimedia.org/math/2/e/d/2ed0485927b4370ae288f1bc1fe2fc8b.png
91
+
92
+
93
+ whitenoise_gen = Distribution::Normal.rng(0, sigma)
94
+ noise_arr = (n+11).times.map { whitenoise_gen.call() }
95
+
96
+ buffer = Array.new(10, whitenoise_gen.call())
97
+ x = buffer + Array.new(n, 0)
98
+
99
+ 11.upto(n+11) do |i|
100
+ if i <= p.size
101
+ backshifts = create_vector(x[0...i].reverse)
102
+ else
103
+ backshifts = create_vector(x[(i - p.size)...i].reverse)
104
+ end
105
+ parameters = create_vector(p[0...backshifts.size])
106
+
107
+ ar_summation = (backshifts * parameters).inject(:+)
108
+
109
+ if i <= q.size
110
+ noises = create_vector(noise_arr[0..i].reverse)
111
+ else
112
+ noises = create_vector(noise_arr[(i-q.size)..i].reverse)
113
+ end
114
+ weights = create_vector([1] + q[0...noises.size - 1])
115
+
116
+ ma_summation = (weights * noises).inject(:+)
117
+
118
+ x[i] = ar_summation + ma_summation
119
+ end
120
+ x - buffer
121
+ end
122
+ end
123
+ end
124
+ end
@@ -0,0 +1,181 @@
1
+ require 'bio-statsample-timeseries/timeseries/pacf'
2
+ module Statsample::TimeSeriesShorthands
3
+ # Creates a new Statsample::TimeSeries object
4
+ # Argument should be equal to TimeSeries.new
5
+ def to_time_series(*args)
6
+ Statsample::TimeSeries::TimeSeries.new(self, :scale, *args)
7
+ end
8
+
9
+ alias :to_ts :to_time_series
10
+ end
11
+
12
+ class Array
13
+ include Statsample::TimeSeriesShorthands
14
+ end
15
+
16
+ module Statsample
17
+ module TimeSeries
18
+ # Collection of data indexed by time.
19
+ # The order goes from earliest to latest.
20
+ class TimeSeries < Statsample::Vector
21
+ include Statsample::TimeSeries::Pacf
22
+ # Calculates the autocorrelation coefficients of the series.
23
+ #
24
+ # The first element is always 1, since that is the correlation
25
+ # of the series with itself.
26
+ #
27
+ # Usage:
28
+ #
29
+ # ts = (1..100).map { rand }.to_time_series
30
+ #
31
+ # ts.acf # => array with first 21 autocorrelations
32
+ # ts.acf 3 # => array with first 3 autocorrelations
33
+ #
34
+ def acf max_lags = nil
35
+ max_lags ||= (10 * Math.log10(size)).to_i
36
+
37
+ (0..max_lags).map do |i|
38
+ if i == 0
39
+ 1.0
40
+ else
41
+ m = self.mean
42
+
43
+ # can't use Pearson coefficient since the mean for the lagged series should
44
+ # be the same as the regular series
45
+ ((self - m) * (self.lag(i) - m)).sum / self.variance_sample / (self.size - 1)
46
+ end
47
+ end
48
+ end
49
+
50
+ def pacf(max_lags = nil, method = 'yw')
51
+ #parameters:
52
+ #max_lags => maximum number of lags for pcf
53
+ #method => for autocovariance in yule_walker:
54
+ #'yw' for 'yule-walker unbaised', 'mle' for biased maximum likelihood
55
+
56
+ max_lags ||= (10 * Math.log10(size)).to_i
57
+ Pacf::Pacf.pacf_yw(self, max_lags, method)
58
+ end
59
+
60
+ # Lags the series by k periods.
61
+ #
62
+ # The convention is to set the oldest observations (the first ones
63
+ # in the series) to nil so that the size of the lagged series is the
64
+ # same as the original.
65
+ #
66
+ # Usage:
67
+ #
68
+ # ts = (1..10).map { rand }.to_time_series
69
+ # # => [0.69, 0.23, 0.44, 0.71, ...]
70
+ #
71
+ # ts.lag # => [nil, 0.69, 0.23, 0.44, ...]
72
+ # ts.lag 2 # => [nil, nil, 0.69, 0.23, ...]
73
+ #
74
+ def lag k = 1
75
+ return self if k == 0
76
+
77
+ dup.tap do |lagged|
78
+ (lagged.size - 1).downto k do |i|
79
+ lagged[i] = lagged[i - k]
80
+ end
81
+
82
+ (0...k).each do |i|
83
+ lagged[i] = nil
84
+ end
85
+ lagged.set_valid_data
86
+ end
87
+ end
88
+
89
+ # Performs a first difference of the series.
90
+ #
91
+ # The convention is to set the oldest observations (the first ones
92
+ # in the series) to nil so that the size of the diffed series is the
93
+ # same as the original.
94
+ #
95
+ # Usage:
96
+ #
97
+ # ts = (1..10).map { rand }.to_ts
98
+ # # => [0.69, 0.23, 0.44, 0.71, ...]
99
+ #
100
+ # ts.diff # => [nil, -0.46, 0.21, 0.27, ...]
101
+ #
102
+ def diff
103
+ self - self.lag
104
+ end
105
+
106
+ # Calculates a moving average of the series using the provided
107
+ # lookback argument. The lookback defaults to 10 periods.
108
+ #
109
+ # Usage:
110
+ #
111
+ # ts = (1..100).map { rand }.to_ts
112
+ # # => [0.69, 0.23, 0.44, 0.71, ...]
113
+ #
114
+ # # first 9 observations are nil
115
+ # ts.ma # => [ ... nil, 0.484... , 0.445... , 0.513 ... , ... ]
116
+ def ma n = 10
117
+ return mean if n >= size
118
+
119
+ ([nil] * (n - 1) + (0..(size - n)).map do |i|
120
+ self[i...(i + n)].inject(&:+) / n
121
+ end).to_time_series
122
+ end
123
+
124
+ # Calculates an exponential moving average of the series using a
125
+ # specified parameter. If wilder is false (the default) then the EMA
126
+ # uses a smoothing value of 2 / (n + 1), if it is true then it uses the
127
+ # Welles Wilder smoother of 1 / n.
128
+ #
129
+ # Warning for EMA usage: EMAs are unstable for small series, as they
130
+ # use a lot more than n observations to calculate. The series is stable
131
+ # if the size of the series is >= 3.45 * (n + 1)
132
+ #
133
+ # Usage:
134
+ #
135
+ # ts = (1..100).map { rand }.to_ts
136
+ # # => [0.69, 0.23, 0.44, 0.71, ...]
137
+ #
138
+ # # first 9 observations are nil
139
+ # ts.ema # => [ ... nil, 0.509... , 0.433..., ... ]
140
+ def ema n = 10, wilder = false
141
+ smoother = wilder ? 1.0 / n : 2.0 / (n + 1)
142
+
143
+ # need to start everything from the first non-nil observation
144
+ start = self.data.index { |i| i != nil }
145
+
146
+ # first n - 1 observations are nil
147
+ base = [nil] * (start + n - 1)
148
+
149
+ # nth observation is just a moving average
150
+ base << self[start...(start + n)].inject(0.0) { |s, a| a.nil? ? s : s + a } / n
151
+
152
+ (start + n).upto size - 1 do |i|
153
+ base << self[i] * smoother + (1 - smoother) * base.last
154
+ end
155
+
156
+ base.to_time_series
157
+ end
158
+
159
+ # Calculates the MACD (moving average convergence-divergence) of the time
160
+ # series - this is a comparison of a fast EMA with a slow EMA.
161
+ def macd fast = 12, slow = 26, signal = 9
162
+ series = ema(fast) - ema(slow)
163
+ [series, series.ema(signal)]
164
+ end
165
+
166
+ # Borrow the operations from Vector, but convert to time series
167
+ def + series
168
+ super.to_a.to_ts
169
+ end
170
+
171
+ def - series
172
+ super.to_a.to_ts
173
+ end
174
+
175
+ def to_s
176
+ sprintf("Time Series(type:%s, n:%d)[%s]", @type.to_s, @data.size,
177
+ @data.collect{|d| d.nil? ? "nil":d}.join(","))
178
+ end
179
+ end
180
+ end
181
+ end
@@ -0,0 +1,100 @@
1
+ module Statsample
2
+ module TimeSeries
3
+ module Pacf
4
+ class Pacf
5
+
6
+ def self.pacf_yw(timeseries, max_lags, method = 'yw')
7
+ #partial autocorrelation by yule walker equations.
8
+ #Inspiration: StatsModels
9
+ pacf = [1.0]
10
+ (1..max_lags).map do |i|
11
+ pacf << yule_walker(timeseries, i, method)[-1]
12
+ end
13
+ pacf
14
+ end
15
+
16
+ def self.yule_walker(ts, k = 1, method='yw')
17
+ #From the series, estimates AR(p)(autoregressive) parameter
18
+ #using Yule-Waler equation. See -
19
+ #http://en.wikipedia.org/wiki/Autoregressive_moving_average_model
20
+
21
+ #parameters:
22
+ #ts = series
23
+ #k = order, default = 1
24
+ #method = can be 'yw' or 'mle'. If 'yw' then it is unbiased, denominator
25
+ #is (n - k)
26
+
27
+ #returns:
28
+ #rho => autoregressive coefficients
29
+ ts = ts - ts.mean
30
+ n = ts.size
31
+ if method.downcase.eql? 'yw'
32
+ #unbiased => denominator = (n - k)
33
+ denom =->(k) { n - k }
34
+ else
35
+ #mle
36
+ #denominator => (n)
37
+ denom =->(k) { n }
38
+ end
39
+ r = Array.new(k + 1) { 0.0 }
40
+ r[0] = ts.map { |x| x ** 2 }.inject(:+).to_f / denom.call(0).to_f
41
+
42
+ 1.upto(k) do |l|
43
+ r[l] = (ts[0...-l].zip(ts[l...ts.size])).map do |x|
44
+ x.inject(:*)
45
+ end.inject(:+).to_f / denom.call(l).to_f
46
+ end
47
+
48
+ r_R = toeplitz(r[0...-1])
49
+
50
+ mat = Matrix.columns(r_R).inverse()
51
+ solve_matrix(mat, r[1..r.size])
52
+ end
53
+
54
+ def self.toeplitz(arr)
55
+ #Generates Toeplitz matrix -
56
+ #http://en.wikipedia.org/wiki/Toeplitz_matrix
57
+ #Toeplitz matrix are equal when they are stored in row &
58
+ #column major
59
+ #=> arr = [0, 1, 2, 3]
60
+ #=> result:
61
+ #[[0, 1, 2, 3],
62
+ # [1, 0, 1, 2],
63
+ # [2, 1, 0, 1],
64
+ # [3, 2, 1, 0]]
65
+ eplitz_matrix = Array.new(arr.size) { Array.new(arr.size) }
66
+
67
+ 0.upto(arr.size - 1) do |i|
68
+ j = 0
69
+ index = i
70
+ while i >= 0 do
71
+ eplitz_matrix[index][j] = arr[i]
72
+ j += 1
73
+ i -= 1
74
+ end
75
+ i = index + 1; k = 1
76
+ while i < arr.size do
77
+ eplitz_matrix[index][j] = arr[k]
78
+ i += 1; j += 1; k += 1
79
+ end
80
+ end
81
+ eplitz_matrix
82
+ end
83
+
84
+ def self.solve_matrix(matrix, out_vector)
85
+ solution_vector = Array.new(out_vector.size, 0)
86
+ matrix = matrix.to_a
87
+ k = 0
88
+ matrix.each do |row|
89
+ row.each_with_index do |element, i|
90
+ solution_vector[k] += element * 1.0 * out_vector[i]
91
+ end
92
+ k += 1
93
+ end
94
+ solution_vector
95
+ end
96
+
97
+ end
98
+ end
99
+ end
100
+ end
@@ -0,0 +1,500 @@
1
+ 17.66
2
+ 17.65
3
+ 17.68
4
+ 17.66
5
+ 17.68
6
+ 17.67
7
+ 17.68
8
+ 17.68
9
+ 17.67
10
+ 17.67
11
+ 17.68
12
+ 17.71
13
+ 17.74
14
+ 17.72
15
+ 17.73
16
+ 17.76
17
+ 17.74
18
+ 17.69
19
+ 17.69
20
+ 17.67
21
+ 17.66
22
+ 17.67
23
+ 17.69
24
+ 17.69
25
+ 17.68
26
+ 17.65
27
+ 17.65
28
+ 17.64
29
+ 17.63
30
+ 17.64
31
+ 17.67
32
+ 17.68
33
+ 17.7
34
+ 17.68
35
+ 17.69
36
+ 17.69
37
+ 17.72
38
+ 17.71
39
+ 17.71
40
+ 17.71
41
+ 17.69
42
+ 17.69
43
+ 17.71
44
+ 17.72
45
+ 17.71
46
+ 17.68
47
+ 17.68
48
+ 17.68
49
+ 17.69
50
+ 17.68
51
+ 17.68
52
+ 17.69
53
+ 17.67
54
+ 17.69
55
+ 17.71
56
+ 17.7
57
+ 17.7
58
+ 17.71
59
+ 17.73
60
+ 17.74
61
+ 17.74
62
+ 17.74
63
+ 17.76
64
+ 17.77
65
+ 17.55
66
+ 17.55
67
+ 17.5
68
+ 17.46
69
+ 17.49
70
+ 17.54
71
+ 17.51
72
+ 17.54
73
+ 17.57
74
+ 17.54
75
+ 17.52
76
+ 17.53
77
+ 17.56
78
+ 17.55
79
+ 17.55
80
+ 17.54
81
+ 17.55
82
+ 17.55
83
+ 17.55
84
+ 17.54
85
+ 17.52
86
+ 17.53
87
+ 17.51
88
+ 17.52
89
+ 17.5
90
+ 17.5
91
+ 17.5
92
+ 17.49
93
+ 17.46
94
+ 17.47
95
+ 17.48
96
+ 17.45
97
+ 17.41
98
+ 17.39
99
+ 17.38
100
+ 17.43
101
+ 17.44
102
+ 17.43
103
+ 17.43
104
+ 17.46
105
+ 17.46
106
+ 17.47
107
+ 17.47
108
+ 17.45
109
+ 17.48
110
+ 17.49
111
+ 17.5
112
+ 17.49
113
+ 17.48
114
+ 17.49
115
+ 17.47
116
+ 17.47
117
+ 17.44
118
+ 17.44
119
+ 17.43
120
+ 17.45
121
+ 17.42
122
+ 17.43
123
+ 17.43
124
+ 17.44
125
+ 17.44
126
+ 17.43
127
+ 17.41
128
+ 17.41
129
+ 17.38
130
+ 17.38
131
+ 17.37
132
+ 17.37
133
+ 17.37
134
+ 17.3
135
+ 17.28
136
+ 17.27
137
+ 17.19
138
+ 16.41
139
+ 16.44
140
+ 16.48
141
+ 16.53
142
+ 16.51
143
+ 16.57
144
+ 16.54
145
+ 16.59
146
+ 16.64
147
+ 16.6
148
+ 16.65
149
+ 16.69
150
+ 16.69
151
+ 16.68
152
+ 16.64
153
+ 16.65
154
+ 16.66
155
+ 16.64
156
+ 16.61
157
+ 16.65
158
+ 16.67
159
+ 16.66
160
+ 16.65
161
+ 16.61
162
+ 16.59
163
+ 16.57
164
+ 16.55
165
+ 16.55
166
+ 16.57
167
+ 16.54
168
+ 16.6
169
+ 16.62
170
+ 16.6
171
+ 16.59
172
+ 16.61
173
+ 16.66
174
+ 16.69
175
+ 16.67
176
+ 16.65
177
+ 16.66
178
+ 16.65
179
+ 16.65
180
+ 16.68
181
+ 16.68
182
+ 16.67
183
+ 16.64
184
+ 16.73
185
+ 16.76
186
+ 16.75
187
+ 16.79
188
+ 16.8
189
+ 16.77
190
+ 16.74
191
+ 16.76
192
+ 16.83
193
+ 16.84
194
+ 16.82
195
+ 16.89
196
+ 16.93
197
+ 16.94
198
+ 16.9
199
+ 16.92
200
+ 16.88
201
+ 16.85
202
+ 16.87
203
+ 16.8
204
+ 16.79
205
+ 16.85
206
+ 16.85
207
+ 16.8
208
+ 16.82
209
+ 16.85
210
+ 16.9
211
+ 16.86
212
+ 16.79
213
+ 16.75
214
+ 16.78
215
+ 17.06
216
+ 17.05
217
+ 17.04
218
+ 17.02
219
+ 17.01
220
+ 17.02
221
+ 17.05
222
+ 17.07
223
+ 17.08
224
+ 17.09
225
+ 17.1
226
+ 17.11
227
+ 17.09
228
+ 17.1
229
+ 17.1
230
+ 17.12
231
+ 17.17
232
+ 17.16
233
+ 17.17
234
+ 17.18
235
+ 17.18
236
+ 17.18
237
+ 17.17
238
+ 17.15
239
+ 17.14
240
+ 17.13
241
+ 17.14
242
+ 17.13
243
+ 17.12
244
+ 17.12
245
+ 17.09
246
+ 17.09
247
+ 17.11
248
+ 17.06
249
+ 17.07
250
+ 17.06
251
+ 17.07
252
+ 17.06
253
+ 17.09
254
+ 17.05
255
+ 17.04
256
+ 17.04
257
+ 16.99
258
+ 17
259
+ 17.03
260
+ 17
261
+ 16.97
262
+ 16.96
263
+ 16.98
264
+ 16.98
265
+ 16.98
266
+ 17.03
267
+ 17
268
+ 17
269
+ 17
270
+ 17.02
271
+ 17
272
+ 17.02
273
+ 17.01
274
+ 17.02
275
+ 17.03
276
+ 17.03
277
+ 17.01
278
+ 17.03
279
+ 17.03
280
+ 17.03
281
+ 17.01
282
+ 17.03
283
+ 17.05
284
+ 17.05
285
+ 17.08
286
+ 17.04
287
+ 17.01
288
+ 17.03
289
+ 17.02
290
+ 17.03
291
+ 17.04
292
+ 17.05
293
+ 17.37
294
+ 17.35
295
+ 17.34
296
+ 17.32
297
+ 17.29
298
+ 17.29
299
+ 17.22
300
+ 17.26
301
+ 17.3
302
+ 17.34
303
+ 17.33
304
+ 17.39
305
+ 17.4
306
+ 17.39
307
+ 17.48
308
+ 17.5
309
+ 17.47
310
+ 17.43
311
+ 17.4
312
+ 17.42
313
+ 17.46
314
+ 17.48
315
+ 17.48
316
+ 17.46
317
+ 17.46
318
+ 17.45
319
+ 17.43
320
+ 17.44
321
+ 17.48
322
+ 17.43
323
+ 17.45
324
+ 17.47
325
+ 17.46
326
+ 17.46
327
+ 17.48
328
+ 17.48
329
+ 17.48
330
+ 17.46
331
+ 17.5
332
+ 17.55
333
+ 17.58
334
+ 17.57
335
+ 17.56
336
+ 17.59
337
+ 17.61
338
+ 17.62
339
+ 17.63
340
+ 17.62
341
+ 17.61
342
+ 17.61
343
+ 17.62
344
+ 17.64
345
+ 17.65
346
+ 17.61
347
+ 17.62
348
+ 17.66
349
+ 17.65
350
+ 17.64
351
+ 17.63
352
+ 17.64
353
+ 17.64
354
+ 17.64
355
+ 17.63
356
+ 17.61
357
+ 17.61
358
+ 17.62
359
+ 17.63
360
+ 17.64
361
+ 17.65
362
+ 17.66
363
+ 17.68
364
+ 17.69
365
+ 17.69
366
+ 17.69
367
+ 17.66
368
+ 17.69
369
+ 17.69
370
+ 17.62
371
+ 17.68
372
+ 17.64
373
+ 17.65
374
+ 17.61
375
+ 17.52
376
+ 17.56
377
+ 17.55
378
+ 17.55
379
+ 17.48
380
+ 17.45
381
+ 17.46
382
+ 17.46
383
+ 17.44
384
+ 17.47
385
+ 17.5
386
+ 17.49
387
+ 17.5
388
+ 17.53
389
+ 17.53
390
+ 17.54
391
+ 17.51
392
+ 17.51
393
+ 17.53
394
+ 17.53
395
+ 17.53
396
+ 17.55
397
+ 17.55
398
+ 17.54
399
+ 17.56
400
+ 17.59
401
+ 17.57
402
+ 17.58
403
+ 17.58
404
+ 17.57
405
+ 17.59
406
+ 17.57
407
+ 17.55
408
+ 17.51
409
+ 17.51
410
+ 17.52
411
+ 17.52
412
+ 17.53
413
+ 17.55
414
+ 17.59
415
+ 17.61
416
+ 17.61
417
+ 17.6
418
+ 17.6
419
+ 17.62
420
+ 17.65
421
+ 17.62
422
+ 17.6
423
+ 17.6
424
+ 17.62
425
+ 17.61
426
+ 17.62
427
+ 17.63
428
+ 17.64
429
+ 17.65
430
+ 17.61
431
+ 17.62
432
+ 17.64
433
+ 17.63
434
+ 17.62
435
+ 17.6
436
+ 17.57
437
+ 17.57
438
+ 17.6
439
+ 17.59
440
+ 17.6
441
+ 17.61
442
+ 17.61
443
+ 17.63
444
+ 17.63
445
+ 17.59
446
+ 17.58
447
+ 17.76
448
+ 17.79
449
+ 17.76
450
+ 17.73
451
+ 17.74
452
+ 17.73
453
+ 17.67
454
+ 17.66
455
+ 17.66
456
+ 17.64
457
+ 17.63
458
+ 17.62
459
+ 17.61
460
+ 17.6
461
+ 17.61
462
+ 17.61
463
+ 17.6
464
+ 17.6
465
+ 17.64
466
+ 17.65
467
+ 17.65
468
+ 17.63
469
+ 17.61
470
+ 17.6
471
+ 17.63
472
+ 17.63
473
+ 17.62
474
+ 17.63
475
+ 17.64
476
+ 17.62
477
+ 17.63
478
+ 17.65
479
+ 17.64
480
+ 17.6
481
+ 17.59
482
+ 17.59
483
+ 17.58
484
+ 17.58
485
+ 17.6
486
+ 17.6
487
+ 17.6
488
+ 17.6
489
+ 17.6
490
+ 17.58
491
+ 17.59
492
+ 17.6
493
+ 17.6
494
+ 17.6
495
+ 17.59
496
+ 17.59
497
+ 17.58
498
+ 17.58
499
+ 17.65
500
+ 17.65