gsl_extras 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.document +5 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +20 -0
- data/README.md +4 -0
- data/README.rdoc +19 -0
- data/Rakefile +53 -0
- data/VERSION +1 -0
- data/gsl_extras.gemspec +60 -0
- data/lib/gsl_extras.rb +1215 -0
- data/test/helper.rb +18 -0
- data/test/test_gsl_extras.rb +7 -0
- metadata +127 -0
data/lib/gsl_extras.rb
ADDED
|
@@ -0,0 +1,1215 @@
|
|
|
1
|
+
####################################
|
|
2
|
+
# = GSL Tools
|
|
3
|
+
####################################
|
|
4
|
+
#
|
|
5
|
+
# A set of tools which extend the Ruby wrapper for the GNU Scientific Library
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
#######################
|
|
10
|
+
# Fixes for rb-gsl #
|
|
11
|
+
#######################
|
|
12
|
+
|
|
13
|
+
# Means that #inspect, eval, Marshal.dump etc all work in the expected way
|
|
14
|
+
|
|
15
|
+
class GSL::Vector
|
|
16
|
+
# aliold :join
|
|
17
|
+
def to_gslv
|
|
18
|
+
self
|
|
19
|
+
end
|
|
20
|
+
def join(*args)
|
|
21
|
+
to_a.join(*args)
|
|
22
|
+
end
|
|
23
|
+
def inspect
|
|
24
|
+
"GSL::Vector.alloc(#{to_a.inspect})"
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
class GSL::Vector
|
|
28
|
+
def _dump(depth)
|
|
29
|
+
return Marshal.dump(self.to_a)
|
|
30
|
+
end
|
|
31
|
+
def self._load(string)
|
|
32
|
+
return self.alloc(Marshal.load(string))
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
class GSL::Matrix
|
|
36
|
+
def _dump(depth)
|
|
37
|
+
return Marshal.dump(self.to_a)
|
|
38
|
+
end
|
|
39
|
+
def self._load(string)
|
|
40
|
+
arr = Marshal.load(string)
|
|
41
|
+
return self.alloc(arr.flatten, arr.size, arr[0].size)
|
|
42
|
+
end
|
|
43
|
+
def inspect
|
|
44
|
+
arr=self.to_a
|
|
45
|
+
"GSL::Matrix.alloc(#{arr.flatten.inspect}, #{arr.size}, #{arr[0].size})"
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
class GSL::Complex
|
|
50
|
+
def to_a
|
|
51
|
+
return [self.real, self.imag]
|
|
52
|
+
end
|
|
53
|
+
def _dump(depth)
|
|
54
|
+
return Marshal.dump(self.to_a)
|
|
55
|
+
end
|
|
56
|
+
def self._load(string)
|
|
57
|
+
return self.alloc(Marshal.load(string))
|
|
58
|
+
end
|
|
59
|
+
def inspect
|
|
60
|
+
"GSL::Complex.alloc(#{self.to_a.inspect})"
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
class GSL::Vector::Complex
|
|
64
|
+
def _dump(depth)
|
|
65
|
+
return Marshal.dump([self.real.to_a, self.imag.to_a])
|
|
66
|
+
end
|
|
67
|
+
def self._load(string)
|
|
68
|
+
re, im = Marshal.load(string)
|
|
69
|
+
return self.alloc(re.zip(im))
|
|
70
|
+
end
|
|
71
|
+
def inspect
|
|
72
|
+
re, im = [self.real.to_a, self.imag.to_a]
|
|
73
|
+
"GSL::Vector::Complex.alloc(#{re.zip(im).inspect})"
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class CodeRunner
|
|
81
|
+
NaN = GSL::NAN
|
|
82
|
+
Infinity = GSL::POSINF
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
module GSL
|
|
88
|
+
|
|
89
|
+
# A class for interpolation from scattered multidimensional data using radial basis functions.
|
|
90
|
+
#
|
|
91
|
+
# E.g.
|
|
92
|
+
# x = GSL::Vector.alloc([0,3,6])
|
|
93
|
+
# y = GSL::Vector.alloc([0,1,2])
|
|
94
|
+
# z = GSL::Vector.alloc([0,5,10])
|
|
95
|
+
# normalising_radius = GSL::Vector.alloc([3,1])
|
|
96
|
+
#
|
|
97
|
+
# int = GSL::ScatterInterp.alloc(:linear, [x,y,z], false, normalising_radius)
|
|
98
|
+
# puts int.eval(4.5, 1.7)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ScatterInterp
|
|
103
|
+
|
|
104
|
+
THIN_PLATE_SPLINES = :thin_plate_splines
|
|
105
|
+
|
|
106
|
+
# Create a new interpolation class, for interpolating a function of one or more variables from a scattered dataset. datavecs is an array of vectors; the last vector should be the values of the function to be interpolated from, the other vectors are the coordinates or parameters corresponding to those values. Norm is a boolean; should the normalised functions be used? Default false. Func is the particular basis function to be used: can be
|
|
107
|
+
# * :linear
|
|
108
|
+
# * :cubic
|
|
109
|
+
# * :thin_plate_splines
|
|
110
|
+
# * :multiquadratic
|
|
111
|
+
# * :inverse_multiquadratic
|
|
112
|
+
#
|
|
113
|
+
# <tt>r0</tt> is a normalising radius which should be on the order of the scale length of the variation. If the scale length differs for each direction, an array of values can be passed; the length of this array should be one less than the number of datavecs, i.e. it should be the number of coordinates.
|
|
114
|
+
|
|
115
|
+
def self.alloc(func, datavecs, norm, r0=1.0)
|
|
116
|
+
new(func, datavecs, norm, r0)
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def initialize(func, datavecs, norm=false, r0=1.0)
|
|
122
|
+
# p datavecs
|
|
123
|
+
@norm = norm
|
|
124
|
+
@npoints = datavecs[0].size
|
|
125
|
+
datavecs.map!{|v| v.to_a}
|
|
126
|
+
datavecs.each{|vec| raise ArgumentError.new("Datavectors must all have the same size ") unless vec.size == @npoints}
|
|
127
|
+
@func = func
|
|
128
|
+
data = datavecs.pop
|
|
129
|
+
@gridpoints = GSL::Matrix.alloc(*datavecs).transpose
|
|
130
|
+
# puts @gridpoints.shape
|
|
131
|
+
@dim = datavecs.size
|
|
132
|
+
@r0 = r0
|
|
133
|
+
if @r0.kind_of? Numeric
|
|
134
|
+
v = GSL::Vector.alloc(@dim)
|
|
135
|
+
v.set_all(@r0)
|
|
136
|
+
@r0 = v
|
|
137
|
+
end
|
|
138
|
+
m = GSL::Matrix.alloc(@npoints, @npoints)
|
|
139
|
+
for i in 0...@npoints
|
|
140
|
+
for j in 0...@npoints
|
|
141
|
+
# ep i, j
|
|
142
|
+
# if true or i>= j
|
|
143
|
+
# p @gridpoints.row(i), @gridpoints.row(j) if i == j
|
|
144
|
+
m[i,j] = function(@gridpoints.row(i), @gridpoints.row(j)) #(radius(@gridpoints.row(i), @gridpoints.row(j)))
|
|
145
|
+
# else
|
|
146
|
+
# m[i,j] = 0.0
|
|
147
|
+
# end
|
|
148
|
+
end
|
|
149
|
+
# data[i] = data[i] * m.get_row(i).sum if norm
|
|
150
|
+
end
|
|
151
|
+
# ep m
|
|
152
|
+
@weights = m.LU_solve(GSL::Vector.alloc(data))
|
|
153
|
+
# ep @weights
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def radius(vec1, vec2) # :nodoc:
|
|
158
|
+
Math.sqrt((vec1 - vec2).square.sum)
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
# :nodoc:
|
|
162
|
+
|
|
163
|
+
def normalized_radius(vec1, vec2)
|
|
164
|
+
#Math.sqrt(((vec1 - vec2) / @r0).square.sum)
|
|
165
|
+
case @r0
|
|
166
|
+
when Numeric
|
|
167
|
+
Math.sqrt(((vec1 - vec2) / @r0).square.sum)
|
|
168
|
+
else
|
|
169
|
+
Math.sqrt(((vec1/@r0.to_gslv - vec2/@r0.to_gslv)).square.sum)
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
# Return the value of the interpolation kernel for the separation between the two given vectors. If linear was chosen this will just be the normalised distance between the two points.
|
|
175
|
+
|
|
176
|
+
def function(vec1, vec2)
|
|
177
|
+
case @func
|
|
178
|
+
when :linear
|
|
179
|
+
return normalized_radius(vec1, vec2)
|
|
180
|
+
when :cubic_alt
|
|
181
|
+
return normalized_radius(vec1, vec2)**(1.5)
|
|
182
|
+
when :thin_plate_splines
|
|
183
|
+
return 0.0 if radius(vec1, vec2) == 0.0
|
|
184
|
+
return normalized_radius(vec1, vec2)**2.0 * Math.log(normalized_radius(vec1, vec2))
|
|
185
|
+
when :thin_plate_splines_alt
|
|
186
|
+
rnorm = ((@r0.prod.abs)**(2.0/@r0.size)*(((vec1-vec2).square / @r0.square).sum + 1.0))
|
|
187
|
+
return rnorm * Math.log(rnorm)
|
|
188
|
+
when :multiquadratic
|
|
189
|
+
# return Math.sqrt(radius(vec1, vec2)**2 + Math.sqrt(@r0.square.sum))
|
|
190
|
+
(@r0.prod.abs)**(1.0/@r0.size)*Math.sqrt(((vec1-vec2).square / @r0.square).sum + 1.0)
|
|
191
|
+
when :inverse_multiquadratic
|
|
192
|
+
1.0 / ((@r0.prod.abs)**(1.0/@r0.size)*Math.sqrt(((vec1-vec2).square / @r0.square).sum + 1.0))
|
|
193
|
+
when :cubic
|
|
194
|
+
((@r0.prod.abs)**(2.0/@r0.size)*(((vec1-vec2).square / @r0.square).sum + 1.0))**(1.5)
|
|
195
|
+
# invs = ((vec1-vec2).square + @r0.square).sqrt**(-1)
|
|
196
|
+
# invs.sum
|
|
197
|
+
# p @ro
|
|
198
|
+
# return 1.0 / Math.sqrt(radius(vec1, vec2)**2 + Math.sqrt(@r0.square.sum))
|
|
199
|
+
# when :inverse_multiquadratic
|
|
200
|
+
# # p @ro
|
|
201
|
+
# return 1.0 / Math.sqrt(radius(vec1, vec2)**2 + Math.sqrt(@r0.square.sum))
|
|
202
|
+
else
|
|
203
|
+
raise ArgumentError.new("Bad radial basis function: #{@func}")
|
|
204
|
+
end
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
# Return the interpolated value for the given parameters.
|
|
208
|
+
|
|
209
|
+
def eval(*pars)
|
|
210
|
+
raise ArgumentError("wrong number of points") if pars.size != @dim
|
|
211
|
+
# p vals
|
|
212
|
+
pars = GSL::Vector.alloc(pars)
|
|
213
|
+
return @npoints.times.inject(0.0) do |sum, i|
|
|
214
|
+
# sum + function(radius(vals, @gridpoints.row(i)))*@weights[i]
|
|
215
|
+
sum + function(pars, @gridpoints.row(i))*@weights[i]
|
|
216
|
+
end
|
|
217
|
+
end
|
|
218
|
+
|
|
219
|
+
# Evaluate the function,
|
|
220
|
+
|
|
221
|
+
def gaussian_smooth_eval(*vals, sigma_vec)
|
|
222
|
+
npix = 7
|
|
223
|
+
raise "npix must be odd" if npix%2==0
|
|
224
|
+
case vals.size
|
|
225
|
+
when 2
|
|
226
|
+
# delt0 = 3.0*0.999999*sigma_vec[0] / (npix-1)
|
|
227
|
+
# delt1 = 3.0*0.999999*sigma_vec[1] / (npix-1)
|
|
228
|
+
# sig3 = 3.0*sigma
|
|
229
|
+
vals0 = GSL::Vector.linspace(vals[0] - 3.0* sigma_vec[0], vals[0] + 3.0* sigma_vec[0], npix)
|
|
230
|
+
vals1 = GSL::Vector.linspace(vals[1] - 3.0* sigma_vec[1], vals[1] + 3.0* sigma_vec[1], npix)
|
|
231
|
+
mat = GSL::Matrix.alloc(vals0.size, vals1.size)
|
|
232
|
+
for i in 0...vals0.size
|
|
233
|
+
for j in 0...vals1.size
|
|
234
|
+
mat[i,j] = eval(vals0[i], vals1[j])
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
mat.gaussian_smooth(*sigma_vec.to_a)
|
|
238
|
+
cent = (npix - 1) / 2
|
|
239
|
+
return mat[cent, cent]
|
|
240
|
+
|
|
241
|
+
else
|
|
242
|
+
raise 'Not supported for this number of dimensions yet'
|
|
243
|
+
end
|
|
244
|
+
end
|
|
245
|
+
|
|
246
|
+
# Create a GSL::Contour object for making contours of the interpolated function. Only works for functions of 2 variables.
|
|
247
|
+
|
|
248
|
+
def to_contour(grid_size=@npoints)
|
|
249
|
+
m = Matrix.alloc(grid_size, grid_size)
|
|
250
|
+
raise TypeError("Must be 3d data") unless @gridpoints.shape[1] == 2
|
|
251
|
+
# p @gridpoints.shape
|
|
252
|
+
# p @gridpoints
|
|
253
|
+
xmax, xmin = @gridpoints.col(0).max, @gridpoints.col(0).min
|
|
254
|
+
ymax, ymin = @gridpoints.col(1).max, @gridpoints.col(1).min
|
|
255
|
+
p 'x', xmax, 'y', ymax
|
|
256
|
+
xvec = GSL::Vector.alloc((0...grid_size).to_a) * (xmax - xmin) / (grid_size - 1.0).to_f + xmin
|
|
257
|
+
yvec = GSL::Vector.alloc((0...grid_size).to_a) * (ymax - ymin) / (grid_size - 1.0).to_f + ymin
|
|
258
|
+
p 'x', xvec.max, 'y', yvec.max
|
|
259
|
+
|
|
260
|
+
for i in 0...grid_size
|
|
261
|
+
for j in 0...grid_size
|
|
262
|
+
# p xvec[i], yvec[j]
|
|
263
|
+
m[i,j] = eval(xvec[i], yvec[j])
|
|
264
|
+
end
|
|
265
|
+
end
|
|
266
|
+
p 'm', m.max
|
|
267
|
+
Contour.alloc(xvec, yvec, m)
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
end
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
# A class for making contours of a function on a regular two dimensional grid. If contours of scattered data are required, see GSL::ScatterInterp#to_contour.
|
|
274
|
+
|
|
275
|
+
class Contour
|
|
276
|
+
|
|
277
|
+
# Create a new Contour object. <tt>x</tt> and <tt>y</tt> are vectors of coordinates, and <tt>grid</tt> is a matrix of values on those coordinates.
|
|
278
|
+
|
|
279
|
+
def self.alloc(x, y, grid)
|
|
280
|
+
new(x, y, grid)
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
attr_accessor :keep_path_data
|
|
284
|
+
|
|
285
|
+
def initialize(x, y, grid)
|
|
286
|
+
@x = x; @y=y; @grid=grid
|
|
287
|
+
# p @grid, @x, @y
|
|
288
|
+
raise ArgumentError.new("Unmatching data sizes: #{x.size}, #{y.size}, #{grid.shape}") unless [x.size, y.size] == grid.shape
|
|
289
|
+
@adaptive = false
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def set_adaptive(func, scale, multi_adaptive=false) # :nodoc:
|
|
295
|
+
@func = func; @adaptive = true
|
|
296
|
+
@multi_adaptive = multi_adaptive
|
|
297
|
+
if @multi_adaptive
|
|
298
|
+
@adaption_scale = 4
|
|
299
|
+
raise "Adaption scale should be a power of two for multi_adaptive contour generation" if scale % 2 == 1 and not scale == 1
|
|
300
|
+
|
|
301
|
+
@next_adaption_scale = scale / 2
|
|
302
|
+
else
|
|
303
|
+
@adaption_scale = scale*2
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
# Create a series of contours at the given values. Returns a hash of {value => array_of_contours}. The array_of_contours is a an array of arrays, where each array is a list of [x, y] coordinates along the contour.
|
|
308
|
+
|
|
309
|
+
def contours(*values)
|
|
310
|
+
(values = (0..values[0]+1).to_a.map{|i| i.to_f * (@grid.max - @grid.min) / ( values[0]+1) + @grid.min}; values.pop; values.shift) if values.size==1 and values[0].kind_of? Integer
|
|
311
|
+
cons = values.inject({}){|hash, val| hash[val] = []; hash}
|
|
312
|
+
# p cons
|
|
313
|
+
for i in 0...((@x.size / 2.0).ceil - 1)
|
|
314
|
+
for j in 0...((@y.size / 2.0).ceil - 1)
|
|
315
|
+
analyse_cell(i*2, j*2, cons)
|
|
316
|
+
end
|
|
317
|
+
end
|
|
318
|
+
# pp cons
|
|
319
|
+
cons.keys.each{|val| cons[val] = connect_contours(cons[val])}
|
|
320
|
+
@last_contours = cons
|
|
321
|
+
end
|
|
322
|
+
|
|
323
|
+
# Create a GraphKit object of the contours.
|
|
324
|
+
|
|
325
|
+
def graphkit(*args)
|
|
326
|
+
if args.size == 0
|
|
327
|
+
conts = @last_contours
|
|
328
|
+
else
|
|
329
|
+
conts = contours(*args)
|
|
330
|
+
end
|
|
331
|
+
graphs = conts.map do |val, cons|
|
|
332
|
+
unless cons[0]
|
|
333
|
+
nil
|
|
334
|
+
else
|
|
335
|
+
(cons.map do |con|
|
|
336
|
+
# p con
|
|
337
|
+
contour = con.transpose
|
|
338
|
+
kit = CodeRunner::GraphKit.autocreate({x: {data: contour[0]}, y: {data: contour[1], title: val.to_s}})
|
|
339
|
+
kit.data[0].with = "l"
|
|
340
|
+
kit
|
|
341
|
+
end).sum
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
graphs.compact.reverse.sum
|
|
345
|
+
end
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
#Edges: 1 __
|
|
349
|
+
# 0 | 4 \ /5 | 2
|
|
350
|
+
# | 7 / \ 6 |
|
|
351
|
+
# 3 __
|
|
352
|
+
|
|
353
|
+
VALID_CONNECTIONS = [[0,4,5,2], [0,4,1], [0,7,3], [0,7,6,2], [0,4,5,6,3],[0,7,6,5,1], [1,4,7,3], [1,5,6,3], [1,5,2], [1,4,7,6,2], [2,6,3],[2,5,4,7,3]]
|
|
354
|
+
|
|
355
|
+
def get_crossed_edges(i, j, cons)
|
|
356
|
+
ce = {}
|
|
357
|
+
edges = {0=>[[i, j], [i+2, j]],
|
|
358
|
+
1=>[[i, j], [i, j+2]],
|
|
359
|
+
2=>[[i, j+2], [i+2, j+2]],
|
|
360
|
+
3=>[[i+2, j], [i+2, j+2]],
|
|
361
|
+
4=>[[i, j], [i+1, j+1]],
|
|
362
|
+
5=>[[i, j+2], [i+1, j+1]],
|
|
363
|
+
6=>[[i+2, j+2], [i+1, j+1]],
|
|
364
|
+
7=>[[i+2, j], [i+1, j+1]]
|
|
365
|
+
}
|
|
366
|
+
cons.keys.each do |val|
|
|
367
|
+
ce[val] = {}
|
|
368
|
+
edges.each do |edge, (start, fin)|
|
|
369
|
+
bounds = [@grid[*start], @grid[*fin]]
|
|
370
|
+
# p edge, bounds if bounds.max < 4
|
|
371
|
+
if val <= bounds.max and val > bounds.min
|
|
372
|
+
dx = @x[fin[0]] - @x[start[0]]
|
|
373
|
+
dy = @y[fin[1]] - @y[start[1]]
|
|
374
|
+
df = bounds[1] - bounds[0]
|
|
375
|
+
xcross = @x[start[0]] + (val-bounds[0]) / df * dx
|
|
376
|
+
ycross = @y[start[1]] + (val-bounds[0]) / df * dy
|
|
377
|
+
ce[val][edge] = [xcross, ycross]
|
|
378
|
+
end
|
|
379
|
+
|
|
380
|
+
end
|
|
381
|
+
end
|
|
382
|
+
ce
|
|
383
|
+
end
|
|
384
|
+
|
|
385
|
+
private :get_crossed_edges
|
|
386
|
+
|
|
387
|
+
def analyse_cell(i, j, cons)
|
|
388
|
+
crossed_edges = get_crossed_edges(i, j, cons)
|
|
389
|
+
if @adaptive and crossed_edges.values.find_all{|crossings| crossings.size>0}.size > 0
|
|
390
|
+
@adaptive_matrix ||= GSL::Matrix.alloc(@adaption_scale+1, @adaption_scale+1)
|
|
391
|
+
@adaptive_x ||= GSL::Vector.alloc(@adaption_scale+1)
|
|
392
|
+
@adaptive_y ||= GSL::Vector.alloc(@adaption_scale+1)
|
|
393
|
+
dx = (@x[i+2] - @x[i])/@adaption_scale.to_f
|
|
394
|
+
dy = (@y[j+2] - @y[j])/@adaption_scale.to_f
|
|
395
|
+
x = @x[i]
|
|
396
|
+
y = @y[j]
|
|
397
|
+
for ii in 0...@adaption_scale+1
|
|
398
|
+
for jj in 0...@adaption_scale+1
|
|
399
|
+
@adaptive_x[ii] = x + ii * dx
|
|
400
|
+
@adaptive_y[jj] = y + jj * dy
|
|
401
|
+
@adaptive_matrix[ii, jj] = @func.eval(@adaptive_x[ii], @adaptive_y[jj])
|
|
402
|
+
end
|
|
403
|
+
end
|
|
404
|
+
cell_contour = Contour.alloc(@adaptive_x, @adaptive_y, @adaptive_matrix)
|
|
405
|
+
cell_contour.keep_path_data = true
|
|
406
|
+
if @multi_adaptive and @next_adaption_scale > 1
|
|
407
|
+
#p @next_adaption_scale
|
|
408
|
+
#p "#{@adaptive_x.max}, #{@adaptive_x.min}, #{@x[i]}, #{@x[i+2]}"
|
|
409
|
+
cell_contour.set_adaptive(@func, @next_adaption_scale, true)
|
|
410
|
+
end
|
|
411
|
+
|
|
412
|
+
cell_cons = cell_contour.contours(*cons.keys)
|
|
413
|
+
#kit = cell_contour.graphkit
|
|
414
|
+
#kit.gnuplot(xrange: [@x[i], @x[i+2]], yrange: [@y[j], @y[j+2]], style: "data lp")
|
|
415
|
+
#p cell_cons
|
|
416
|
+
cell_cons.each do |val, cell_val_contours|
|
|
417
|
+
cell_val_contours.each do |path|
|
|
418
|
+
# we have to relabel the path from the fine scale contour so that it fits with the course scale labelling system. only the beginning and end of the path matters
|
|
419
|
+
path_start = path[0][0]
|
|
420
|
+
if path_start[0] == @x[i]
|
|
421
|
+
path[0][1] = [i, j, 1]
|
|
422
|
+
elsif path_start[0] == @x[i+2]
|
|
423
|
+
path[0][1] = [i, j, 3]
|
|
424
|
+
else
|
|
425
|
+
if path_start[1] == @y[j]
|
|
426
|
+
path[0][1] = [i, j, 0]
|
|
427
|
+
elsif path_start[1] == @y[j+2]
|
|
428
|
+
path[0][1] = [i, j, 2]
|
|
429
|
+
else
|
|
430
|
+
raise "Could not find path_start; #{path_start}; x: #{@x[i]}, #{@x[i+2]}; y: #{@y[j]}, #{@y[j+2]} "
|
|
431
|
+
end
|
|
432
|
+
end
|
|
433
|
+
path_end = path[-1][0]
|
|
434
|
+
if path_end[0] == @x[i]
|
|
435
|
+
path[-1][1] = [i, j, 1]
|
|
436
|
+
elsif path_end[0] == @x[i+2]
|
|
437
|
+
path[-1][1] = [i, j, 3]
|
|
438
|
+
else
|
|
439
|
+
if path_end[1] == @y[j]
|
|
440
|
+
path[-1][1] = [i, j, 0]
|
|
441
|
+
elsif path_end[1] == @y[j+2]
|
|
442
|
+
path[-1][1] = [i, j, 2]
|
|
443
|
+
else
|
|
444
|
+
raise "Could not find path_end #{path_end}; x: #{@x[i]}, #{@x[i+2]}; y: #{@y[j]}, #{@y[j+2]} "
|
|
445
|
+
end
|
|
446
|
+
end
|
|
447
|
+
|
|
448
|
+
cons[val].push path
|
|
449
|
+
end
|
|
450
|
+
end
|
|
451
|
+
#kit.close
|
|
452
|
+
else
|
|
453
|
+
crossed_edges.each do |val, crossings|
|
|
454
|
+
outer = crossings.keys.find_all{|edge| edge < 4}
|
|
455
|
+
inner = crossings.keys.find_all{|edge| edge > 4}
|
|
456
|
+
next if outer.size == 0 and inner.size == 0
|
|
457
|
+
VALID_CONNECTIONS.each do |connection|
|
|
458
|
+
path = crossings.values_at(*connection).compact.zip(connection.map{|edge| [i, j, edge]})
|
|
459
|
+
# p path
|
|
460
|
+
next if path.size != connection.size
|
|
461
|
+
connection.each{|edge| crossings.delete(edge)}
|
|
462
|
+
cons[val].push path
|
|
463
|
+
|
|
464
|
+
end
|
|
465
|
+
end
|
|
466
|
+
|
|
467
|
+
# p val, crossings, cons
|
|
468
|
+
end
|
|
469
|
+
end
|
|
470
|
+
|
|
471
|
+
private :analyse_cell
|
|
472
|
+
|
|
473
|
+
def connect_contours(contours)
|
|
474
|
+
return contours if contours.size == 1
|
|
475
|
+
loop do
|
|
476
|
+
catch(:restart) do
|
|
477
|
+
# old_contours = contours
|
|
478
|
+
# contours = []
|
|
479
|
+
joined = []
|
|
480
|
+
for i in 0...contours.size
|
|
481
|
+
break if i >= contours.size
|
|
482
|
+
for j in i+1...contours.size
|
|
483
|
+
break if j >= contours.size
|
|
484
|
+
coni = contours[i]
|
|
485
|
+
conj = contours[j]
|
|
486
|
+
if joins?(coni[-1], conj[0])
|
|
487
|
+
contours[i] = coni + conj
|
|
488
|
+
contours.delete_at(j)
|
|
489
|
+
# redo
|
|
490
|
+
throw(:restart)
|
|
491
|
+
elsif joins?(coni[0], conj[-1])
|
|
492
|
+
contours[i] = conj + coni
|
|
493
|
+
contours.delete_at(j)
|
|
494
|
+
# redo
|
|
495
|
+
throw(:restart)
|
|
496
|
+
elsif joins?(coni[0], conj[0])
|
|
497
|
+
contours[i] = coni.reverse + conj
|
|
498
|
+
contours.delete_at(j)
|
|
499
|
+
# redo
|
|
500
|
+
throw(:restart)
|
|
501
|
+
elsif joins?(coni[-1], conj[-1])
|
|
502
|
+
contours[i] = conj + coni.reverse
|
|
503
|
+
contours.delete_at(j)
|
|
504
|
+
# redo
|
|
505
|
+
throw(:restart)
|
|
506
|
+
end
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
end
|
|
511
|
+
end
|
|
512
|
+
contours.each{|con| con.uniq!}
|
|
513
|
+
contours.map!{|con| con.map{|point| point[0]}} unless @keep_path_data
|
|
514
|
+
return contours
|
|
515
|
+
end
|
|
516
|
+
|
|
517
|
+
end
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
private :connect_contours
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def joins?(path, contour)
|
|
525
|
+
# p @x.size
|
|
526
|
+
# dx = (@x.subvector(1, @x.size - 1) - @x.subvector(0, @x.size - 1)).sum / @x.size #The average dx between gridpoints
|
|
527
|
+
# dy = (@y.subvector(1, @y.size - 1) - @y.subvector(0, @y.size - 1)).sum / @y.size
|
|
528
|
+
# eps = 1.0
|
|
529
|
+
# # return true
|
|
530
|
+
# return ((path[0] - contour[0]).abs < dx.abs * eps and (path[1] - contour[1]).abs < dy.abs * eps)
|
|
531
|
+
# p dx; exit
|
|
532
|
+
pi, pj, pedge = path[1]
|
|
533
|
+
ci, cj, cedge = contour[1]
|
|
534
|
+
joins = false
|
|
535
|
+
joins = ((pi == ci and pj == cj + 2 and pedge == 0 and cedge == 2) or
|
|
536
|
+
(pi == ci and pj == cj - 2 and pedge == 2 and cedge == 0) or
|
|
537
|
+
(pi == ci + 2 and pj == cj and pedge == 1 and cedge == 3) or
|
|
538
|
+
(pi == ci - 2 and pj == cj and pedge == 3 and cedge == 1))
|
|
539
|
+
# unless joins
|
|
540
|
+
# p path[1], contour[1]
|
|
541
|
+
# STDIN.gets
|
|
542
|
+
# end
|
|
543
|
+
# if path[1] == [2,10,1]
|
|
544
|
+
# p path[1], contour[1], joins
|
|
545
|
+
# STDIN.gets
|
|
546
|
+
# end
|
|
547
|
+
return joins
|
|
548
|
+
end
|
|
549
|
+
|
|
550
|
+
private :joins?
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
end
|
|
556
|
+
class Contour2
|
|
557
|
+
|
|
558
|
+
# Create a new Contour object. <tt>x</tt> and <tt>y</tt> are vectors of coordinates, and <tt>grid</tt> is a matrix of values on those coordinates. If a function is given, it will be used to evaluate gridpoints rather than the matrix (though a matrix must still be provided). Provide a function if your contours only cover a small part of the domain and your function is expensive to evaluate.
|
|
559
|
+
|
|
560
|
+
def self.alloc(x, y, grid, function=nil)
|
|
561
|
+
new(x, y, grid)
|
|
562
|
+
end
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
def initialize(x, y, grid, function=nil)
|
|
566
|
+
@function = function
|
|
567
|
+
@evaluated = {}
|
|
568
|
+
@x = x; @y=y; @grid=grid
|
|
569
|
+
# p @grid, @x, @y
|
|
570
|
+
raise ArgumentError.new("Unmatching data sizes: #{x.size}, #{y.size}, #{grid.shape}") unless [x.size, y.size] == grid.shape
|
|
571
|
+
@adaptive = false
|
|
572
|
+
end
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
def set_adaptive(func) # :nodoc:
|
|
577
|
+
@func = func; @adaptive = true
|
|
578
|
+
end
|
|
579
|
+
|
|
580
|
+
# Create a series of contours at the given values. Returns a hash of {value => array_of_contours}. The array_of_contours is a an array of arrays, where each array is a list of [x, y] coordinates along the contour.
|
|
581
|
+
|
|
582
|
+
def contours(*values)
|
|
583
|
+
(values = (0..values[0]+1).to_a.map{|i| i.to_f * (@grid.max - @grid.min) / ( values[0]+1) + @grid.min}; values.pop; values.shift) if values.size==1 and values[0].kind_of? Integer
|
|
584
|
+
cons = values.inject({}){|hash, val| hash[val] = []; hash}
|
|
585
|
+
get_startpoints(cons)
|
|
586
|
+
#@analysed = {}
|
|
587
|
+
@found = {}
|
|
588
|
+
#p cons
|
|
589
|
+
starts = cons
|
|
590
|
+
|
|
591
|
+
cons = values.inject({}){|hash, val| hash[val] = []; hash}
|
|
592
|
+
temp_cons = values.inject({}){|hash, val| hash[val] = []; hash}
|
|
593
|
+
starts.each do |val, arr|
|
|
594
|
+
#p 'arr', arr
|
|
595
|
+
arr.each do |start_con|
|
|
596
|
+
#arr.map{|edges| edges[-1][1].slice(0..1)}.each do |starti, startj|
|
|
597
|
+
starti, startj = start_con[-1][1].slice(0..1)
|
|
598
|
+
temp_cons[val] = [start_con]
|
|
599
|
+
p 'startj', starti, startj
|
|
600
|
+
loop do
|
|
601
|
+
starti, startj = trace_contour(val, temp_cons, starti, startj)
|
|
602
|
+
break unless starti and startj
|
|
603
|
+
end
|
|
604
|
+
cons[val].push temp_cons[val][0]
|
|
605
|
+
end
|
|
606
|
+
end
|
|
607
|
+
cons.keys.each{|val| cons[val] = connect_contours(cons[val]);
|
|
608
|
+
cons[val].map!{|con| con.map{|point| point[0]}}}
|
|
609
|
+
|
|
610
|
+
@last_contours = cons
|
|
611
|
+
gk = graphkit
|
|
612
|
+
#gk.gp.style = 'data with linespoints'
|
|
613
|
+
gk.data.each{|dk| dk.with = 'lp'}
|
|
614
|
+
gk.gnuplot
|
|
615
|
+
@last_contours
|
|
616
|
+
end
|
|
617
|
+
|
|
618
|
+
def trace_contour(val, cons, starti, startj)
|
|
619
|
+
old_start = cons[val][0][0][1]; old_end = cons[val][0][-1][1]
|
|
620
|
+
#p 'old_start', old_start, 'old_end', old_end, 'starti', starti, 'startj', startj
|
|
621
|
+
for delti in -1..1
|
|
622
|
+
for deltj in -1..1
|
|
623
|
+
celli, cellj = [starti + 2 * delti, startj + deltj * 2]
|
|
624
|
+
#unless (@analysed[val] and @analysed[val][[celli, cellj]] ) or celli > (@x.size-3) or cellj >(@y.size - 3) or celli < 0 or cellj < 0
|
|
625
|
+
unless celli > (@x.size-3) or cellj >(@y.size - 3) or celli < 0 or cellj < 0
|
|
626
|
+
|
|
627
|
+
#p 'analysing', celli, cellj
|
|
628
|
+
analyse_cell(celli, cellj, cons, val)
|
|
629
|
+
end
|
|
630
|
+
|
|
631
|
+
end
|
|
632
|
+
end
|
|
633
|
+
cons[val] = connect_contours(cons[val])
|
|
634
|
+
#p cons[val]
|
|
635
|
+
new_contour = cons[val].find{|cont| old_start == cont[0][1] or old_end == cont[-1][1]}
|
|
636
|
+
unless new_contour
|
|
637
|
+
cons[val].map!{|con| con.reverse}
|
|
638
|
+
new_contour = cons[val].find{|cont| old_start == cont[0][1] or old_end == cont[-1][1]}
|
|
639
|
+
end
|
|
640
|
+
raise "no new contour" unless new_contour
|
|
641
|
+
cons[val] = [new_contour]
|
|
642
|
+
#p cons[val]
|
|
643
|
+
#newtails = cons[val].map{|con| con[-1]}
|
|
644
|
+
if cons[val][0][0][1] != old_start
|
|
645
|
+
return cons[val][0][0][1].slice(0..1)
|
|
646
|
+
elsif cons[val][0][-1][1] != old_end
|
|
647
|
+
return cons[val][0][-1][1].slice(0..1)
|
|
648
|
+
else
|
|
649
|
+
#p cons[val]
|
|
650
|
+
|
|
651
|
+
p 'old_start', old_start, 'old_end', old_end
|
|
652
|
+
#raise "Could not connect"
|
|
653
|
+
return nil, nil
|
|
654
|
+
end
|
|
655
|
+
end
|
|
656
|
+
|
|
657
|
+
def get_startpoints(cons)
|
|
658
|
+
[0,((@x.size / 2).floor - 1)-1].each do |i|
|
|
659
|
+
for j in 0...((@y.size / 2).floor - 1)
|
|
660
|
+
analyse_cell(i*2, j*2, cons)
|
|
661
|
+
end
|
|
662
|
+
end
|
|
663
|
+
for i in 0...((@x.size / 2).floor - 1)
|
|
664
|
+
[0,((@y.size / 2).floor - 1)-1].each do |j|
|
|
665
|
+
analyse_cell(i*2, j*2, cons)
|
|
666
|
+
end
|
|
667
|
+
end
|
|
668
|
+
end
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
# Create a GraphKit object of the contours.
|
|
673
|
+
|
|
674
|
+
def graphkit(*args)
|
|
675
|
+
if args.size == 0
|
|
676
|
+
conts = @last_contours
|
|
677
|
+
else
|
|
678
|
+
conts = contours(*args)
|
|
679
|
+
end
|
|
680
|
+
graphs = conts.map do |val, cons|
|
|
681
|
+
unless cons[0]
|
|
682
|
+
nil
|
|
683
|
+
else
|
|
684
|
+
(cons.map do |con|
|
|
685
|
+
# p con
|
|
686
|
+
contour = con.transpose
|
|
687
|
+
kit = CodeRunner::GraphKit.autocreate({x: {data: contour[0]}, y: {data: contour[1], title: val.to_s}})
|
|
688
|
+
kit.data[0].with = "l"
|
|
689
|
+
kit
|
|
690
|
+
end).sum
|
|
691
|
+
end
|
|
692
|
+
end
|
|
693
|
+
graphs.compact.reverse.sum
|
|
694
|
+
end
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
#Edges: 1 __
|
|
698
|
+
# 0 | 4 \ /5 | 2
|
|
699
|
+
# | 7 / \ 6 |
|
|
700
|
+
# 3 __
|
|
701
|
+
|
|
702
|
+
VALID_CONNECTIONS = [[0,4,5,2], [0,4,1], [0,7,3], [0,7,6,2], [0,4,5,6,3],[0,7,6,5,1], [1,4,7,3], [1,5,6,3], [1,5,2], [1,4,7,6,2], [2,6,3],[2,5,4,7,3]]
|
|
703
|
+
|
|
704
|
+
def get_crossed_edges(i, j, cons, specific_val = nil)
|
|
705
|
+
ce = {}
|
|
706
|
+
edges = {0=>[[i, j], [i+2, j]],
|
|
707
|
+
1=>[[i, j], [i, j+2]],
|
|
708
|
+
2=>[[i, j+2], [i+2, j+2]],
|
|
709
|
+
3=>[[i+2, j], [i+2, j+2]],
|
|
710
|
+
4=>[[i, j], [i+1, j+1]],
|
|
711
|
+
5=>[[i, j+2], [i+1, j+1]],
|
|
712
|
+
6=>[[i+2, j+2], [i+1, j+1]],
|
|
713
|
+
7=>[[i+2, j], [i+1, j+1]]
|
|
714
|
+
}
|
|
715
|
+
cons.keys.each do |val|
|
|
716
|
+
next if specific_val and specific_val != val
|
|
717
|
+
ce[val] = {}
|
|
718
|
+
edges.each do |edge, (start, fin)|
|
|
719
|
+
if @function
|
|
720
|
+
#p start, fin
|
|
721
|
+
(@evaluated[start]= true, (@grid[*start] = @function.eval(@x[start[0]], @y[start[1]]))) unless @evaluated[start]
|
|
722
|
+
(@evaluated[fin]= true, (@grid[*fin] = @function.eval(@x[fin[0]], @y[fin[1]]))) unless @evaluated[fin]
|
|
723
|
+
end
|
|
724
|
+
bounds = [@grid[*start], @grid[*fin]]
|
|
725
|
+
# p edge, bounds if bounds.max < 4
|
|
726
|
+
if val <= bounds.max and val > bounds.min
|
|
727
|
+
dx = @x[fin[0]] - @x[start[0]]
|
|
728
|
+
dy = @y[fin[1]] - @y[start[1]]
|
|
729
|
+
df = bounds[1] - bounds[0]
|
|
730
|
+
xcross = @x[start[0]] + (val-bounds[0]) / df * dx
|
|
731
|
+
ycross = @y[start[1]] + (val-bounds[0]) / df * dy
|
|
732
|
+
ce[val][edge] = [xcross, ycross]
|
|
733
|
+
end
|
|
734
|
+
|
|
735
|
+
end
|
|
736
|
+
end
|
|
737
|
+
ce
|
|
738
|
+
end
|
|
739
|
+
|
|
740
|
+
private :get_crossed_edges
|
|
741
|
+
|
|
742
|
+
def analyse_cell(i, j, cons, specific_val = nil)
|
|
743
|
+
crossed_edges = get_crossed_edges(i, j, cons, specific_val)
|
|
744
|
+
crossed_edges.each do |val, crossings|
|
|
745
|
+
if specific_val
|
|
746
|
+
next unless val == specific_val
|
|
747
|
+
#@analysed[val] ||= {}
|
|
748
|
+
#@analysed[val][[i,j]] = true
|
|
749
|
+
@found[[i,j]] ||= {}
|
|
750
|
+
end
|
|
751
|
+
outer = crossings.keys.find_all{|edge| edge < 4}
|
|
752
|
+
inner = crossings.keys.find_all{|edge| edge > 4}
|
|
753
|
+
next if outer.size == 0 and inner.size == 0
|
|
754
|
+
VALID_CONNECTIONS.each do |connection|
|
|
755
|
+
path = crossings.values_at(*connection).compact.zip(connection.map{|edge| [i, j, edge]})
|
|
756
|
+
# p path
|
|
757
|
+
next if path.size != connection.size
|
|
758
|
+
connection.each{|edge| crossings.delete(edge)}
|
|
759
|
+
if specific_val
|
|
760
|
+
next if @found[[i,j]][path]
|
|
761
|
+
@found[[i,j]][path] = true
|
|
762
|
+
|
|
763
|
+
end
|
|
764
|
+
cons[val].push path
|
|
765
|
+
|
|
766
|
+
end
|
|
767
|
+
|
|
768
|
+
# p val, crossings, cons
|
|
769
|
+
end
|
|
770
|
+
end
|
|
771
|
+
|
|
772
|
+
private :analyse_cell
|
|
773
|
+
|
|
774
|
+
def connect_contours(contours)
|
|
775
|
+
return contours if contours.size == 1
|
|
776
|
+
loop do
|
|
777
|
+
catch(:restart) do
|
|
778
|
+
# old_contours = contours
|
|
779
|
+
# contours = []
|
|
780
|
+
joined = []
|
|
781
|
+
for i in 0...contours.size
|
|
782
|
+
break if i >= contours.size
|
|
783
|
+
for j in i+1...contours.size
|
|
784
|
+
break if j >= contours.size
|
|
785
|
+
coni = contours[i]
|
|
786
|
+
conj = contours[j]
|
|
787
|
+
if joins?(coni[-1], conj[0])
|
|
788
|
+
contours[i] = coni + conj
|
|
789
|
+
contours.delete_at(j)
|
|
790
|
+
# redo
|
|
791
|
+
throw(:restart)
|
|
792
|
+
elsif joins?(coni[0], conj[-1])
|
|
793
|
+
contours[i] = conj + coni
|
|
794
|
+
contours.delete_at(j)
|
|
795
|
+
# redo
|
|
796
|
+
throw(:restart)
|
|
797
|
+
elsif joins?(coni[0], conj[0])
|
|
798
|
+
contours[i] = coni.reverse + conj
|
|
799
|
+
contours.delete_at(j)
|
|
800
|
+
# redo
|
|
801
|
+
throw(:restart)
|
|
802
|
+
elsif joins?(coni[-1], conj[-1])
|
|
803
|
+
contours[i] = conj + coni.reverse
|
|
804
|
+
contours.delete_at(j)
|
|
805
|
+
# redo
|
|
806
|
+
throw(:restart)
|
|
807
|
+
end
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
end
|
|
812
|
+
end
|
|
813
|
+
contours.each{|con| con.uniq!}
|
|
814
|
+
return contours
|
|
815
|
+
end
|
|
816
|
+
|
|
817
|
+
end
|
|
818
|
+
end
|
|
819
|
+
|
|
820
|
+
private :connect_contours
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
|
|
824
|
+
def joins?(path, contour)
|
|
825
|
+
# p @x.size
|
|
826
|
+
# dx = (@x.subvector(1, @x.size - 1) - @x.subvector(0, @x.size - 1)).sum / @x.size #The average dx between gridpoints
|
|
827
|
+
# dy = (@y.subvector(1, @y.size - 1) - @y.subvector(0, @y.size - 1)).sum / @y.size
|
|
828
|
+
# eps = 1.0
|
|
829
|
+
# # return true
|
|
830
|
+
# return ((path[0] - contour[0]).abs < dx.abs * eps and (path[1] - contour[1]).abs < dy.abs * eps)
|
|
831
|
+
# p dx; exit
|
|
832
|
+
pi, pj, pedge = path[1]
|
|
833
|
+
ci, cj, cedge = contour[1]
|
|
834
|
+
joins = false
|
|
835
|
+
joins = ((pi == ci and pj == cj + 2 and pedge == 0 and cedge == 2) or
|
|
836
|
+
(pi == ci and pj == cj - 2 and pedge == 2 and cedge == 0) or
|
|
837
|
+
(pi == ci + 2 and pj == cj and pedge == 1 and cedge == 3) or
|
|
838
|
+
(pi == ci - 2 and pj == cj and pedge == 3 and cedge == 1))
|
|
839
|
+
# unless joins
|
|
840
|
+
# p path[1], contour[1]
|
|
841
|
+
# STDIN.gets
|
|
842
|
+
# end
|
|
843
|
+
# if path[1] == [2,10,1]
|
|
844
|
+
# p path[1], contour[1], joins
|
|
845
|
+
# STDIN.gets
|
|
846
|
+
# end
|
|
847
|
+
return joins
|
|
848
|
+
end
|
|
849
|
+
|
|
850
|
+
private :joins?
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
|
|
855
|
+
end
|
|
856
|
+
|
|
857
|
+
module MultiFit
|
|
858
|
+
class MultidLM
|
|
859
|
+
def self.alloc(*args)
|
|
860
|
+
new(*args)
|
|
861
|
+
end
|
|
862
|
+
|
|
863
|
+
def initialize(yproc = nil, fproc, dfproc, ndata, ndims, nparams)
|
|
864
|
+
@fproc = Proc.new do |x, t, y, sigma, f|
|
|
865
|
+
# gridpoints = (0...@ndims).to_a.map do |i|
|
|
866
|
+
# @gridpoints.col(i)
|
|
867
|
+
# end
|
|
868
|
+
fproc.call(x, *@gridpoints, y, sigma, f)
|
|
869
|
+
end
|
|
870
|
+
@dfproc = Proc.new do |x, t, y, sigma, jac|
|
|
871
|
+
# gridpoints = (0...@ndims).to_a.map do |i|
|
|
872
|
+
# @gridpoints.col(i)
|
|
873
|
+
# end
|
|
874
|
+
# puts 'hello'
|
|
875
|
+
dfproc.call(x, *@gridpoints, y, sigma, jac)
|
|
876
|
+
end
|
|
877
|
+
|
|
878
|
+
@yproc = yproc
|
|
879
|
+
# fproc
|
|
880
|
+
# @dfproc = dfproc
|
|
881
|
+
@ndata = ndata; @ndims = ndims; @nparams = nparams
|
|
882
|
+
@f = GSL::MultiFit::Function_fdf.alloc(@fproc, @dfproc, @nparams)
|
|
883
|
+
@solver = GSL::MultiFit::FdfSolver.alloc(FdfSolver::LMDER, @ndata, @nparams)
|
|
884
|
+
end
|
|
885
|
+
|
|
886
|
+
def set_data(xstart, *gridpoints, y, sigma)
|
|
887
|
+
# p 'g', gridpoints.size
|
|
888
|
+
@gridpoints = gridpoints; @y = y; @x = xstart.dup; @sigma = sigma
|
|
889
|
+
@t = GSL::Vector.alloc(@y.size)
|
|
890
|
+
@t.set_all(0.0) # t should never be used.
|
|
891
|
+
@f.set_data(@t, @y, @sigma)
|
|
892
|
+
@solver.set(@f, @x)
|
|
893
|
+
end
|
|
894
|
+
|
|
895
|
+
def solve(print_out = false)
|
|
896
|
+
(puts "Warning: due to a bug, print out doesn't work with less than 3 params"; print_out = false) if @nparams < 3
|
|
897
|
+
# p @nparams, @solver.send(:p)
|
|
898
|
+
iter = 0
|
|
899
|
+
@solver.print_state(iter) if print_out
|
|
900
|
+
begin
|
|
901
|
+
iter += 1
|
|
902
|
+
status = @solver.iterate
|
|
903
|
+
@solver.print_state(iter) if print_out
|
|
904
|
+
status = @solver.test_delta(1e-7, 1e-7)
|
|
905
|
+
end while status == GSL::CONTINUE and iter < 500
|
|
906
|
+
|
|
907
|
+
@covar = @solver.covar(0.0)
|
|
908
|
+
@position = @solver.position
|
|
909
|
+
@my_chi2 = 0.0
|
|
910
|
+
# gp = @gridpoints.transpose
|
|
911
|
+
for i in 0...@y.size
|
|
912
|
+
@my_chi2 += (@y[i] - eval(*@gridpoints.map{|vec| vec[i]}))**2.0 / @sigma[i]**2.0
|
|
913
|
+
end
|
|
914
|
+
@chi2 = (@solver.f.dnrm2)**2
|
|
915
|
+
@dof = @ndata - @nparams
|
|
916
|
+
@solved = true
|
|
917
|
+
@solver.position
|
|
918
|
+
end
|
|
919
|
+
|
|
920
|
+
attr_accessor :chi2, :my_chi2, :covar, :position, :dof
|
|
921
|
+
|
|
922
|
+
def eval(*points)
|
|
923
|
+
raise "yproc not set" unless @yproc
|
|
924
|
+
@yproc.call(@solver.position, *points)
|
|
925
|
+
end
|
|
926
|
+
|
|
927
|
+
|
|
928
|
+
end
|
|
929
|
+
class MultidLMNumDiff < MultidLM
|
|
930
|
+
def self.alloc(*params)
|
|
931
|
+
new(*params)
|
|
932
|
+
end
|
|
933
|
+
|
|
934
|
+
def initialize(yproc, ndata, ndims, nparams)
|
|
935
|
+
fproc = Proc.new do |x, *gridpoints, y, sigma, f|
|
|
936
|
+
# gridpoints = (0...@ndims).to_a.map do |i|
|
|
937
|
+
# @gridpoints.col(i)
|
|
938
|
+
# end
|
|
939
|
+
for i in 0...ndata.size do
|
|
940
|
+
f[i] = (@yproc.call(x, *gridpoints.map{|vec| vec[i]}) - y[i])/sigma[i]
|
|
941
|
+
end
|
|
942
|
+
end
|
|
943
|
+
dfproc = Proc.new do |x, *gridpoints, y, sigma, jac|
|
|
944
|
+
for j in 0...nparams do
|
|
945
|
+
xj = x[j]
|
|
946
|
+
xplus = x.dup
|
|
947
|
+
xplus[j] = xj + @delt[j]
|
|
948
|
+
xminus = x.dup
|
|
949
|
+
xminus[j] = xj - @delt[j]
|
|
950
|
+
|
|
951
|
+
for i in 0...ndata do
|
|
952
|
+
gp = gridpoints.map{|vec| vec[i]}
|
|
953
|
+
yplus = @yproc.call(xplus, *gp)
|
|
954
|
+
yminus = @yproc.call(xminus, *gp)
|
|
955
|
+
#p "delt", @delt, "sigma", @sigma, "jac", jac, "jac.shape", jac.shape, "result", (yplus - yminus)/2*@delt[j]/sigma[i]
|
|
956
|
+
jac.set(i, j, (yplus - yminus)/2*@delt[j]/sigma[i])
|
|
957
|
+
end
|
|
958
|
+
end
|
|
959
|
+
end
|
|
960
|
+
super(yproc, fproc, dfproc, ndata, ndims, nparams)
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
end
|
|
964
|
+
def set_data(xstart, delt, *gridpoints, y, sigma)
|
|
965
|
+
@delt = (delt || GSL::Vector.alloc([1e-5]*xstart.size))
|
|
966
|
+
super(xstart, *gridpoints, y, sigma)
|
|
967
|
+
end
|
|
968
|
+
end
|
|
969
|
+
|
|
970
|
+
|
|
971
|
+
end
|
|
972
|
+
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
module SpectralAnalysis
|
|
976
|
+
|
|
977
|
+
# A Lomb periodogram is a method of spectrally analysing a set of data which are not evenly spaced, and thus cannot be Fourier transformed. The Lomb periodogram is something akin to a probability distribution function for a given set of frequencies.
|
|
978
|
+
|
|
979
|
+
class Lomb
|
|
980
|
+
class << self
|
|
981
|
+
alias :alloc :new
|
|
982
|
+
end
|
|
983
|
+
|
|
984
|
+
# Create a new Lomb object. times and data should be GSL::Vectors.
|
|
985
|
+
|
|
986
|
+
def initialize(times, data)
|
|
987
|
+
@times = times; @data = data
|
|
988
|
+
raise "Times #{times.size} and data #{data.size} do not have the same size" unless @times.size == @data.size
|
|
989
|
+
@n = data.size
|
|
990
|
+
@dmean = data.mean
|
|
991
|
+
@dvar = data.variance
|
|
992
|
+
end
|
|
993
|
+
|
|
994
|
+
attr_accessor :frequencies, :periodogram
|
|
995
|
+
|
|
996
|
+
# Calculate the Lomb periodogram. Without studying the Lomb analysis, it's best to leave the defaults alone. frequency_factor is how far above the estimated Nyquist frequency (calculated by dividing the net time interval by the number of data points) the spectrum should be calculated.
|
|
997
|
+
|
|
998
|
+
def calculate_periodogram(frequency_factor=2.0, oversampling=4.0, frequency_indexes=nil)
|
|
999
|
+
@frequency_factor = frequency_factor
|
|
1000
|
+
@oversampling = oversampling
|
|
1001
|
+
@nout = (@n * 0.5 * frequency_factor * oversampling).to_i # (nout or @n * 0.5 * frequency_factor * 4.0).to_i
|
|
1002
|
+
t_window = @times.max - @times.min
|
|
1003
|
+
delta_f = 1.0 / t_window / oversampling # / 2.0 / Math::PI #(@nout / @n / 0.5 / frequency_factor)
|
|
1004
|
+
# data_min, data_max = @data.minmax
|
|
1005
|
+
|
|
1006
|
+
@frequencies = GSL::Vector.linspace(delta_f, delta_f*@nout, @nout)
|
|
1007
|
+
# p @nout, delta_f, @frequencies
|
|
1008
|
+
if frequency_indexes
|
|
1009
|
+
@frequencies = GSL::Vector.alloc(frequency_indexes.map{|i| @frequencies[i]})
|
|
1010
|
+
end
|
|
1011
|
+
@periodogram = @frequencies.collect do |freq|
|
|
1012
|
+
p_n(freq)
|
|
1013
|
+
end
|
|
1014
|
+
# @frequencies = @frequencies / Math::PI / 2.0
|
|
1015
|
+
[@frequencies, @periodogram]
|
|
1016
|
+
end
|
|
1017
|
+
|
|
1018
|
+
# Proportional to the probability that the given frequency was present in the data. Roughly akin to p(k) for a Fourier transform.
|
|
1019
|
+
|
|
1020
|
+
def p_n(freq)
|
|
1021
|
+
omega = freq * Math::PI * 2.0
|
|
1022
|
+
twoomt = @times * 2.0 * omega
|
|
1023
|
+
tau = Math.atan(
|
|
1024
|
+
twoomt.sin.sum / twoomt.cos.sum
|
|
1025
|
+
)/ 2.0 / omega
|
|
1026
|
+
omttau = ((@times - tau) * omega)
|
|
1027
|
+
c = omttau.cos
|
|
1028
|
+
s = omttau.sin
|
|
1029
|
+
ddmean = @data - @dmean
|
|
1030
|
+
pn = 1 / 2.0 / @dvar * (
|
|
1031
|
+
(ddmean * c).sum ** 2.0 / c.square.sum +
|
|
1032
|
+
(ddmean * s).sum ** 2.0 / s.square.sum
|
|
1033
|
+
)
|
|
1034
|
+
pn
|
|
1035
|
+
end
|
|
1036
|
+
|
|
1037
|
+
# Equal to 1.0 - the probability that the value of pn could have been generated by gaussian noise
|
|
1038
|
+
|
|
1039
|
+
def confidence(pn, frequency_factor = @frequency_factor)
|
|
1040
|
+
(1.0 - Math.exp(-pn)) ** (@n * frequency_factor)
|
|
1041
|
+
end
|
|
1042
|
+
|
|
1043
|
+
# The probability that the value of pn could have been generated by gaussian noise.
|
|
1044
|
+
|
|
1045
|
+
def pnull(pn, frequency_factor = @frequency_factor)
|
|
1046
|
+
1.0 - confidence(pn, frequency_factor)
|
|
1047
|
+
end
|
|
1048
|
+
|
|
1049
|
+
# Find a
|
|
1050
|
+
|
|
1051
|
+
def p_n_from_confidence(confidence, frequency_factor = @frequency_factor)
|
|
1052
|
+
- Math.log(1.0 - confidence ** (1.0 / @n / frequency_factor))
|
|
1053
|
+
end
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
def graphkit
|
|
1057
|
+
CodeRunner::GraphKit.autocreate(x: {title: "Frequency", data: @frequencies}, y: {title: "P_N", data: @periodogram})
|
|
1058
|
+
end
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
|
|
1062
|
+
end
|
|
1063
|
+
end
|
|
1064
|
+
|
|
1065
|
+
class GaussianSmoothKernel < Vector
|
|
1066
|
+
KERNELS = {}
|
|
1067
|
+
|
|
1068
|
+
def self.alloc(sigma, delt = 1.0)
|
|
1069
|
+
return KERNELS[[sigma,delt]] if KERNELS[[sigma,delt]]
|
|
1070
|
+
npix ||= (3.0*sigma / delt).floor
|
|
1071
|
+
kernel = super(2*npix + 1)
|
|
1072
|
+
for i in 0...kernel.size
|
|
1073
|
+
j = (i - npix) * delt
|
|
1074
|
+
kernel[i] = Math.exp(- j**2 / 2.0 / sigma**2) / ( 2.0 * Math::PI * sigma**2)
|
|
1075
|
+
end
|
|
1076
|
+
KERNELS[[sigma,delt]] = kernel / kernel.sum
|
|
1077
|
+
end
|
|
1078
|
+
|
|
1079
|
+
end
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
class Vector
|
|
1083
|
+
def rectangular_smooth
|
|
1084
|
+
smooth = dup
|
|
1085
|
+
for i in 1...(self.size-1)
|
|
1086
|
+
smooth[i] = (self[i-1] + self[i] + self[i+1]) / 3.0
|
|
1087
|
+
end
|
|
1088
|
+
smooth
|
|
1089
|
+
end
|
|
1090
|
+
|
|
1091
|
+
|
|
1092
|
+
|
|
1093
|
+
def gaussian_smooth!(sigma)
|
|
1094
|
+
new = gaussian_smooth(sigma)
|
|
1095
|
+
for i in 0...size do i
|
|
1096
|
+
self[i] = new[i]
|
|
1097
|
+
end
|
|
1098
|
+
return nil
|
|
1099
|
+
end
|
|
1100
|
+
def gaussian_smooth(sigma)
|
|
1101
|
+
npix = (3.0*sigma).floor
|
|
1102
|
+
smooth = dup
|
|
1103
|
+
smooth.set_all(0.0)
|
|
1104
|
+
kernel = GaussianSmoothKernel.alloc(sigma)# gaussian_smooth_kernel(sigma)
|
|
1105
|
+
|
|
1106
|
+
# p kernel
|
|
1107
|
+
for i in 0...smooth.size
|
|
1108
|
+
range = [([i - npix, 0].max), ([i + npix, smooth.size - 1].min)]
|
|
1109
|
+
ke = kernel.subvector(range[0] - i + npix, range[1] - range[0] + 1)
|
|
1110
|
+
ke = kernel / ke.sum
|
|
1111
|
+
for j in range[0]..range[1]
|
|
1112
|
+
smooth[i] += self[j] * ke[j - i + npix]
|
|
1113
|
+
end
|
|
1114
|
+
end
|
|
1115
|
+
smooth
|
|
1116
|
+
end
|
|
1117
|
+
|
|
1118
|
+
def mean_no_outliers(nstd=1.0)
|
|
1119
|
+
av = mean
|
|
1120
|
+
std = sd
|
|
1121
|
+
self.dup.delete_if{|val| (val - mean).abs > nstd * std}.mean
|
|
1122
|
+
end
|
|
1123
|
+
# next if i + j < 0 or i + j >= smooth.size
|
|
1124
|
+
end
|
|
1125
|
+
|
|
1126
|
+
class Matrix
|
|
1127
|
+
def gaussian_smooth(sigmai, sigmaj = nil)
|
|
1128
|
+
sigmaj ||= sigmai
|
|
1129
|
+
for i in 0...shape[0]
|
|
1130
|
+
set_row i,row(i).gaussian_smooth(sigmai)
|
|
1131
|
+
end
|
|
1132
|
+
for i in 0...shape[1]
|
|
1133
|
+
set_col i,col(i).gaussian_smooth(sigmaj)
|
|
1134
|
+
end
|
|
1135
|
+
end
|
|
1136
|
+
end
|
|
1137
|
+
|
|
1138
|
+
|
|
1139
|
+
end
|
|
1140
|
+
|
|
1141
|
+
|
|
1142
|
+
if ARGV.include? "test_coderunner_gsl_tools" and $has_put_startup_message_for_code_runner
|
|
1143
|
+
#
|
|
1144
|
+
if true
|
|
1145
|
+
|
|
1146
|
+
x = []; y=[]; z=[]
|
|
1147
|
+
|
|
1148
|
+
n = 10
|
|
1149
|
+
zmat = GSL::Matrix.alloc(n, n)
|
|
1150
|
+
for i in 0...n
|
|
1151
|
+
for j in 0...n
|
|
1152
|
+
# next if rand < 0.5
|
|
1153
|
+
x.push i; y.push j; z.push (i-0.0)**2 - (j-0.0)**3
|
|
1154
|
+
# x.push i; y.push j; z.push (i-2.0)**2 + (j-2.0)**2
|
|
1155
|
+
# x.push i; y.push j; z.push Math.exp((i-2.0)**2 + (j-2.0)**2 + 0.1)
|
|
1156
|
+
# x.push i * 18.0/n; y.push j* 18.0/n; z.push Math.sin((i* 18.0/n-0.0)**2 + (j* 18.0/n-0.0)**2)**2
|
|
1157
|
+
# x.push i * 18.0/n; y.push j* 18.0/n; z.push Math.sin(i* 18.0/n-0.0)**2
|
|
1158
|
+
|
|
1159
|
+
zmat[i,j] = z[-1]
|
|
1160
|
+
|
|
1161
|
+
end
|
|
1162
|
+
end
|
|
1163
|
+
# p x, y, z
|
|
1164
|
+
|
|
1165
|
+
xvec = GSL::Vector.alloc(x.uniq.sort)
|
|
1166
|
+
yvec = GSL::Vector.alloc(y.uniq.sort)
|
|
1167
|
+
|
|
1168
|
+
int = GSL::ScatterInterp.alloc(:thin_plate_splines, [x, y, z], 1.0)
|
|
1169
|
+
p int.eval(2, 3), 4-27
|
|
1170
|
+
p int.eval(8,7), 8**2-7**3
|
|
1171
|
+
con = int.to_contour(60)
|
|
1172
|
+
# con = GSL::Contour.alloc(xvec, yvec, zmat)
|
|
1173
|
+
contours = con.contours(10) #(-50, -40, -30, -20, -10, 0.0, 10)
|
|
1174
|
+
# p contour
|
|
1175
|
+
graphs = contours.map do |val, cons|
|
|
1176
|
+
unless cons[0]
|
|
1177
|
+
nil
|
|
1178
|
+
else
|
|
1179
|
+
(cons.map do |con|
|
|
1180
|
+
contour = con.transpose
|
|
1181
|
+
kit = CodeRunner::GraphKit.autocreate({x: {data: contour[0]}, y: {data: contour[1], title: val.to_s}})
|
|
1182
|
+
kit.data[0].with = "lp"
|
|
1183
|
+
kit
|
|
1184
|
+
end).sum
|
|
1185
|
+
end
|
|
1186
|
+
end
|
|
1187
|
+
graphs.compact.reverse.sum.gnuplot({key: "off"})
|
|
1188
|
+
end
|
|
1189
|
+
|
|
1190
|
+
times = GSL::Vector.alloc(400.times.inject([0]){|a,i| a[i+1] = a[i] + rand/1.0; a })
|
|
1191
|
+
data = (0.125*2.0*Math::PI*times).cos + (0.25*2.0*Math::PI*times).cos + times / times.max * 4.0
|
|
1192
|
+
kit = CodeRunner::GraphKit.autocreate(x: {data: times}, y: {data: data})
|
|
1193
|
+
kit.data[0].with = 'lp'
|
|
1194
|
+
kit.gnuplot
|
|
1195
|
+
lomb = GSL::SpectralAnalysis::Lomb.new(times, data)
|
|
1196
|
+
lomb.calculate_periodogram(0.4)
|
|
1197
|
+
kit = lomb.graphkit
|
|
1198
|
+
kit.data[0].with = 'lp'
|
|
1199
|
+
kit.gnuplot
|
|
1200
|
+
|
|
1201
|
+
mat = GSL::Matrix.alloc(3, 3)
|
|
1202
|
+
mat.set_all 0.0
|
|
1203
|
+
mat[1,1] = 4.0
|
|
1204
|
+
mat.gaussian_smooth(0.8)
|
|
1205
|
+
p mat
|
|
1206
|
+
|
|
1207
|
+
vec = GSL::Vector.linspace(0, 40, 100)
|
|
1208
|
+
vec = vec.gaussian_smooth(10.0)
|
|
1209
|
+
CodeRunner::GraphKit.quick_create([vec]).gnuplot
|
|
1210
|
+
|
|
1211
|
+
# STDIN.gets
|
|
1212
|
+
|
|
1213
|
+
|
|
1214
|
+
end
|
|
1215
|
+
|