ruby-dnn 0.13.4 → 0.14.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/examples/cifar100_example.rb +1 -2
- data/examples/cifar10_example.rb +1 -2
- data/examples/dcgan/dcgan.rb +19 -9
- data/examples/dcgan/imgen.rb +9 -2
- data/examples/dcgan/train.rb +7 -22
- data/examples/iris_example.rb +1 -2
- data/examples/mnist_conv2d_example.rb +1 -2
- data/examples/mnist_define_by_run.rb +1 -2
- data/examples/mnist_example.rb +1 -2
- data/examples/mnist_lstm_example.rb +1 -2
- data/examples/xor_example.rb +2 -3
- data/lib/dnn.rb +2 -0
- data/lib/dnn/core/activations.rb +11 -18
- data/lib/dnn/core/callbacks.rb +136 -0
- data/lib/dnn/core/cnn_layers.rb +26 -33
- data/lib/dnn/core/embedding.rb +20 -2
- data/lib/dnn/core/error.rb +0 -2
- data/lib/dnn/core/initializers.rb +2 -8
- data/lib/dnn/core/iterator.rb +17 -13
- data/lib/dnn/core/layers.rb +38 -34
- data/lib/dnn/core/link.rb +1 -2
- data/lib/dnn/core/losses.rb +21 -14
- data/lib/dnn/core/merge_layers.rb +7 -8
- data/lib/dnn/core/models.rb +134 -125
- data/lib/dnn/core/normalizations.rb +2 -2
- data/lib/dnn/core/optimizers.rb +20 -25
- data/lib/dnn/core/regularizers.rb +6 -7
- data/lib/dnn/core/rnn_layers.rb +15 -21
- data/lib/dnn/core/savers.rb +9 -7
- data/lib/dnn/core/tensor.rb +11 -0
- data/lib/dnn/core/utils.rb +1 -1
- data/lib/dnn/image.rb +22 -1
- data/lib/dnn/version.rb +1 -1
- metadata +4 -2
@@ -32,7 +32,7 @@ module DNN
|
|
32
32
|
if DNN.learning_phase
|
33
33
|
mean = x.mean(axis: @axis, keepdims: true)
|
34
34
|
@xc = x - mean
|
35
|
-
var = (@xc
|
35
|
+
var = (@xc**2).mean(axis: @axis, keepdims: true)
|
36
36
|
@std = Xumo::NMath.sqrt(var + @eps)
|
37
37
|
xn = @xc / @std
|
38
38
|
@xn = xn
|
@@ -53,7 +53,7 @@ module DNN
|
|
53
53
|
end
|
54
54
|
dxn = @gamma.data * dy
|
55
55
|
dxc = dxn / @std
|
56
|
-
dstd = -((dxn * @xc) / (@std
|
56
|
+
dstd = -((dxn * @xc) / (@std**2)).sum(axis: @axis, keepdims: true)
|
57
57
|
dvar = 0.5 * dstd / @std
|
58
58
|
dxc += (2.0 / batch_size) * @xc * dvar
|
59
59
|
dmean = dxc.sum(axis: @axis, keepdims: true)
|
data/lib/dnn/core/optimizers.rb
CHANGED
@@ -10,13 +10,14 @@ module DNN
|
|
10
10
|
return nil unless hash
|
11
11
|
optimizer_class = DNN.const_get(hash[:class])
|
12
12
|
optimizer = optimizer_class.allocate
|
13
|
-
raise DNN_Error
|
13
|
+
raise DNN_Error, "#{optimizer.class} is not an instance of #{self} class." unless optimizer.is_a?(self)
|
14
14
|
optimizer.load_hash(hash)
|
15
15
|
optimizer
|
16
16
|
end
|
17
17
|
|
18
18
|
def self.load(dumped)
|
19
19
|
opt = from_hash(dumped[:hash])
|
20
|
+
return opt unless dumped[:status]
|
20
21
|
dumped[:status].each do |key, state|
|
21
22
|
state = state.clone
|
22
23
|
opt.status[key] = state
|
@@ -34,7 +35,7 @@ module DNN
|
|
34
35
|
def update(layers)
|
35
36
|
target_params = layers.select { |layer| layer.is_a?(Layers::HasParamLayer) && layer.trainable }
|
36
37
|
.map { |layer| layer.get_params.values }.flatten.compact
|
37
|
-
.select
|
38
|
+
.select(&:grad)
|
38
39
|
clip_grads(target_params) if @clip_norm
|
39
40
|
update_params(target_params)
|
40
41
|
target_params.each do |param|
|
@@ -42,8 +43,9 @@ module DNN
|
|
42
43
|
end
|
43
44
|
end
|
44
45
|
|
45
|
-
def dump
|
46
|
-
|
46
|
+
def dump(require_status = true)
|
47
|
+
status = require_status ? @status : nil
|
48
|
+
{ hash: to_hash, status: status }
|
47
49
|
end
|
48
50
|
|
49
51
|
def to_hash(merge_hash = nil)
|
@@ -54,12 +56,13 @@ module DNN
|
|
54
56
|
|
55
57
|
# Update params.
|
56
58
|
private def update_params(params)
|
57
|
-
raise NotImplementedError
|
59
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'update_params'"
|
58
60
|
end
|
59
61
|
|
60
62
|
private def clip_grads(params)
|
61
|
-
norm = Math.sqrt(params.reduce(0) { |total, param| total + (param.grad
|
63
|
+
norm = Math.sqrt(params.reduce(0) { |total, param| total + (param.grad**2).sum })
|
62
64
|
return if norm <= @clip_norm
|
65
|
+
|
63
66
|
rate = @clip_norm / (norm + 1e-7)
|
64
67
|
params.each do |param|
|
65
68
|
param.grad *= rate
|
@@ -71,7 +74,6 @@ module DNN
|
|
71
74
|
end
|
72
75
|
end
|
73
76
|
|
74
|
-
|
75
77
|
class SGD < Optimizer
|
76
78
|
attr_accessor :lr
|
77
79
|
attr_accessor :momentum
|
@@ -107,7 +109,6 @@ module DNN
|
|
107
109
|
end
|
108
110
|
end
|
109
111
|
|
110
|
-
|
111
112
|
class Nesterov < SGD
|
112
113
|
def initialize(lr = 0.01, momentum: 0.9, clip_norm: nil)
|
113
114
|
super(lr, momentum: momentum, clip_norm: clip_norm)
|
@@ -118,12 +119,11 @@ module DNN
|
|
118
119
|
@v[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
119
120
|
amount = param.grad * @lr
|
120
121
|
@v[param.name] = @v[param.name] * @momentum - amount
|
121
|
-
param.data = (param.data + @momentum
|
122
|
+
param.data = (param.data + @momentum**2 * @v[param.name]) - (1 + @momentum) * amount
|
122
123
|
end
|
123
124
|
end
|
124
125
|
end
|
125
126
|
|
126
|
-
|
127
127
|
class AdaGrad < Optimizer
|
128
128
|
attr_accessor :lr
|
129
129
|
attr_accessor :eps
|
@@ -141,7 +141,7 @@ module DNN
|
|
141
141
|
private def update_params(params)
|
142
142
|
params.each do |param|
|
143
143
|
@g[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
144
|
-
@g[param.name] += param.grad
|
144
|
+
@g[param.name] += param.grad**2
|
145
145
|
param.data -= (@lr / Xumo::NMath.sqrt(@g[param.name] + @eps)) * param.grad
|
146
146
|
end
|
147
147
|
end
|
@@ -155,7 +155,6 @@ module DNN
|
|
155
155
|
end
|
156
156
|
end
|
157
157
|
|
158
|
-
|
159
158
|
class RMSProp < Optimizer
|
160
159
|
attr_accessor :lr
|
161
160
|
attr_accessor :alpha
|
@@ -180,7 +179,7 @@ module DNN
|
|
180
179
|
private def update_params(params)
|
181
180
|
params.each do |param|
|
182
181
|
@g[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
183
|
-
@g[param.name] = @alpha * @g[param.name] + (1 - @alpha) * param.grad
|
182
|
+
@g[param.name] = @alpha * @g[param.name] + (1 - @alpha) * param.grad**2
|
184
183
|
param.data -= (@lr / Xumo::NMath.sqrt(@g[param.name] + @eps)) * param.grad
|
185
184
|
end
|
186
185
|
end
|
@@ -190,7 +189,6 @@ module DNN
|
|
190
189
|
end
|
191
190
|
end
|
192
191
|
|
193
|
-
|
194
192
|
class AdaDelta < Optimizer
|
195
193
|
attr_accessor :rho
|
196
194
|
attr_accessor :eps
|
@@ -214,9 +212,9 @@ module DNN
|
|
214
212
|
params.each do |param|
|
215
213
|
@h[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
216
214
|
@s[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
217
|
-
@h[param.name] = @rho * @h[param.name] + (1 - @rho) * param.grad
|
215
|
+
@h[param.name] = @rho * @h[param.name] + (1 - @rho) * param.grad**2
|
218
216
|
v = (Xumo::NMath.sqrt(@s[param.name] + @eps) / Xumo::NMath.sqrt(@h[param.name] + @eps)) * param.grad
|
219
|
-
@s[param.name] = @rho * @s[param.name] + (1 - @rho) * v
|
217
|
+
@s[param.name] = @rho * @s[param.name] + (1 - @rho) * v**2
|
220
218
|
param.data -= v
|
221
219
|
end
|
222
220
|
end
|
@@ -226,7 +224,6 @@ module DNN
|
|
226
224
|
end
|
227
225
|
end
|
228
226
|
|
229
|
-
|
230
227
|
class RMSPropGraves < Optimizer
|
231
228
|
attr_accessor :lr
|
232
229
|
attr_accessor :alpha
|
@@ -254,8 +251,8 @@ module DNN
|
|
254
251
|
@m[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
255
252
|
@v[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
256
253
|
@m[param.name] = @alpha * @m[param.name] + (1 - @alpha) * param.grad
|
257
|
-
@v[param.name] = @alpha * @v[param.name] + (1 - @alpha) * param.grad
|
258
|
-
param.data -= (@lr / Xumo::NMath.sqrt(@v[param.name] - @m[param.name]
|
254
|
+
@v[param.name] = @alpha * @v[param.name] + (1 - @alpha) * param.grad**2
|
255
|
+
param.data -= (@lr / Xumo::NMath.sqrt(@v[param.name] - @m[param.name]**2 + @eps)) * param.grad
|
259
256
|
end
|
260
257
|
end
|
261
258
|
|
@@ -264,7 +261,6 @@ module DNN
|
|
264
261
|
end
|
265
262
|
end
|
266
263
|
|
267
|
-
|
268
264
|
class Adam < Optimizer
|
269
265
|
attr_accessor :alpha
|
270
266
|
attr_accessor :beta1
|
@@ -300,12 +296,12 @@ module DNN
|
|
300
296
|
|
301
297
|
private def update_params(params)
|
302
298
|
@t += 1
|
303
|
-
lr = @alpha * Math.sqrt(1 - @beta2
|
299
|
+
lr = @alpha * Math.sqrt(1 - @beta2**@t) / (1 - @beta1**@t)
|
304
300
|
params.each do |param|
|
305
301
|
@m[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
306
302
|
@v[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
307
303
|
@m[param.name] += (1 - @beta1) * (param.grad - @m[param.name])
|
308
|
-
@v[param.name] += (1 - @beta2) * (param.grad
|
304
|
+
@v[param.name] += (1 - @beta2) * (param.grad**2 - @v[param.name])
|
309
305
|
if @amsgrad
|
310
306
|
@s[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
311
307
|
@s[param.name] = Xumo::SFloat.maximum(@s[param.name], @v[param.name])
|
@@ -322,7 +318,6 @@ module DNN
|
|
322
318
|
end
|
323
319
|
end
|
324
320
|
|
325
|
-
|
326
321
|
class AdaBound < Adam
|
327
322
|
attr_accessor :final_lr
|
328
323
|
attr_accessor :gamma
|
@@ -344,7 +339,7 @@ module DNN
|
|
344
339
|
|
345
340
|
private def update_params(params)
|
346
341
|
@t += 1
|
347
|
-
lr = @alpha * Math.sqrt(1 - @beta2
|
342
|
+
lr = @alpha * Math.sqrt(1 - @beta2**@t) / (1 - @beta1**@t)
|
348
343
|
final_lr = @final_lr * lr / @alpha
|
349
344
|
lower_bound = final_lr * (1 - 1 / (@gamma * @t + 1))
|
350
345
|
upper_bound = final_lr * (1 + 1 / (@gamma * @t))
|
@@ -352,7 +347,7 @@ module DNN
|
|
352
347
|
@m[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
353
348
|
@v[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
354
349
|
@m[param.name] += (1 - @beta1) * (param.grad - @m[param.name])
|
355
|
-
@v[param.name] += (1 - @beta2) * (param.grad
|
350
|
+
@v[param.name] += (1 - @beta2) * (param.grad**2 - @v[param.name])
|
356
351
|
if @amsgrad
|
357
352
|
@s[param.name] ||= Xumo::SFloat.zeros(*param.data.shape)
|
358
353
|
@s[param.name] = Xumo::SFloat.maximum(@s[param.name], @v[param.name])
|
@@ -8,17 +8,17 @@ module DNN
|
|
8
8
|
return nil unless hash
|
9
9
|
regularizer_class = DNN.const_get(hash[:class])
|
10
10
|
regularizer = regularizer_class.allocate
|
11
|
-
raise DNN_Error
|
11
|
+
raise DNN_Error, "#{regularizer.class} is not an instance of #{self} class." unless regularizer.is_a?(self)
|
12
12
|
regularizer.load_hash(hash)
|
13
13
|
regularizer
|
14
14
|
end
|
15
15
|
|
16
16
|
def forward(x)
|
17
|
-
raise NotImplementedError
|
17
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'forward'"
|
18
18
|
end
|
19
19
|
|
20
20
|
def backward
|
21
|
-
raise NotImplementedError
|
21
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'backward'"
|
22
22
|
end
|
23
23
|
|
24
24
|
def to_hash(merge_hash)
|
@@ -28,7 +28,7 @@ module DNN
|
|
28
28
|
end
|
29
29
|
|
30
30
|
def load_hash(hash)
|
31
|
-
raise NotImplementedError
|
31
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'load_hash'"
|
32
32
|
end
|
33
33
|
end
|
34
34
|
|
@@ -59,7 +59,6 @@ module DNN
|
|
59
59
|
end
|
60
60
|
end
|
61
61
|
|
62
|
-
|
63
62
|
class L2 < Regularizer
|
64
63
|
attr_accessor :l2_lambda
|
65
64
|
|
@@ -69,7 +68,7 @@ module DNN
|
|
69
68
|
end
|
70
69
|
|
71
70
|
def forward(x)
|
72
|
-
x + 0.5 * @l2_lambda * (@param.data
|
71
|
+
x + 0.5 * @l2_lambda * (@param.data**2).sum
|
73
72
|
end
|
74
73
|
|
75
74
|
def backward
|
@@ -98,7 +97,7 @@ module DNN
|
|
98
97
|
|
99
98
|
def forward(x)
|
100
99
|
l1 = @l1_lambda * @param.data.abs.sum
|
101
|
-
l2 = 0.5 * @l2_lambda * (@param.data
|
100
|
+
l2 = 0.5 * @l2_lambda * (@param.data**2).sum
|
102
101
|
x + l1 + l2
|
103
102
|
end
|
104
103
|
|
data/lib/dnn/core/rnn_layers.rb
CHANGED
@@ -40,7 +40,7 @@ module DNN
|
|
40
40
|
|
41
41
|
def build(input_shape)
|
42
42
|
unless input_shape.length == 2
|
43
|
-
raise DNN_ShapeError
|
43
|
+
raise DNN_ShapeError, "Input shape is #{input_shape}. But input shape must be 2 dimensional."
|
44
44
|
end
|
45
45
|
super
|
46
46
|
@time_length = @input_shape[0]
|
@@ -129,7 +129,6 @@ module DNN
|
|
129
129
|
end
|
130
130
|
end
|
131
131
|
|
132
|
-
|
133
132
|
class SimpleRNNDense
|
134
133
|
attr_accessor :trainable
|
135
134
|
|
@@ -162,7 +161,6 @@ module DNN
|
|
162
161
|
end
|
163
162
|
end
|
164
163
|
|
165
|
-
|
166
164
|
class SimpleRNN < RNN
|
167
165
|
attr_reader :activation
|
168
166
|
|
@@ -170,7 +168,7 @@ module DNN
|
|
170
168
|
def initialize(num_nodes,
|
171
169
|
stateful: false,
|
172
170
|
return_sequences: true,
|
173
|
-
activation:
|
171
|
+
activation: Layers::Tanh.new,
|
174
172
|
weight_initializer: Initializers::RandomNormal.new,
|
175
173
|
recurrent_weight_initializer: Initializers::RandomNormal.new,
|
176
174
|
bias_initializer: Initializers::Zeros.new,
|
@@ -222,7 +220,6 @@ module DNN
|
|
222
220
|
end
|
223
221
|
end
|
224
222
|
|
225
|
-
|
226
223
|
class LSTMDense
|
227
224
|
attr_accessor :trainable
|
228
225
|
|
@@ -230,11 +227,11 @@ module DNN
|
|
230
227
|
@weight = weight
|
231
228
|
@recurrent_weight = recurrent_weight
|
232
229
|
@bias = bias
|
233
|
-
@tanh =
|
234
|
-
@g_tanh =
|
235
|
-
@forget_sigmoid =
|
236
|
-
@in_sigmoid =
|
237
|
-
@out_sigmoid =
|
230
|
+
@tanh = Layers::Tanh.new
|
231
|
+
@g_tanh = Layers::Tanh.new
|
232
|
+
@forget_sigmoid = Layers::Sigmoid.new
|
233
|
+
@in_sigmoid = Layers::Sigmoid.new
|
234
|
+
@out_sigmoid = Layers::Sigmoid.new
|
238
235
|
@trainable = true
|
239
236
|
end
|
240
237
|
|
@@ -280,7 +277,6 @@ module DNN
|
|
280
277
|
end
|
281
278
|
end
|
282
279
|
|
283
|
-
|
284
280
|
class LSTM < RNN
|
285
281
|
attr_reader :cell
|
286
282
|
|
@@ -359,7 +355,6 @@ module DNN
|
|
359
355
|
end
|
360
356
|
end
|
361
357
|
|
362
|
-
|
363
358
|
class GRUDense
|
364
359
|
attr_accessor :trainable
|
365
360
|
|
@@ -367,9 +362,9 @@ module DNN
|
|
367
362
|
@weight = weight
|
368
363
|
@recurrent_weight = recurrent_weight
|
369
364
|
@bias = bias
|
370
|
-
@update_sigmoid =
|
371
|
-
@reset_sigmoid =
|
372
|
-
@tanh =
|
365
|
+
@update_sigmoid = Layers::Sigmoid.new
|
366
|
+
@reset_sigmoid = Layers::Sigmoid.new
|
367
|
+
@tanh = Layers::Tanh.new
|
373
368
|
@trainable = true
|
374
369
|
end
|
375
370
|
|
@@ -387,11 +382,11 @@ module DNN
|
|
387
382
|
@weight_h = @weight.data[true, (num_nodes * 2)..-1]
|
388
383
|
@weight2_h = @recurrent_weight.data[true, (num_nodes * 2)..-1]
|
389
384
|
@tanh_h = if @bias
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
385
|
+
bias_h = @bias.data[(num_nodes * 2)..-1]
|
386
|
+
@tanh.forward(x.dot(@weight_h) + (h * @reset).dot(@weight2_h) + bias_h)
|
387
|
+
else
|
388
|
+
@tanh.forward(x.dot(@weight_h) + (h * @reset).dot(@weight2_h))
|
389
|
+
end
|
395
390
|
h2 = (1 - @update) * @tanh_h + @update * h
|
396
391
|
h2
|
397
392
|
end
|
@@ -428,7 +423,6 @@ module DNN
|
|
428
423
|
end
|
429
424
|
end
|
430
425
|
|
431
|
-
|
432
426
|
class GRU < RNN
|
433
427
|
def initialize(num_nodes,
|
434
428
|
stateful: false,
|
data/lib/dnn/core/savers.rb
CHANGED
@@ -17,7 +17,7 @@ module DNN
|
|
17
17
|
private
|
18
18
|
|
19
19
|
def load_bin(bin)
|
20
|
-
raise NotImplementedError
|
20
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'load_bin'"
|
21
21
|
end
|
22
22
|
|
23
23
|
def set_all_params_data(params_data)
|
@@ -30,13 +30,16 @@ module DNN
|
|
30
30
|
end
|
31
31
|
end
|
32
32
|
|
33
|
-
|
34
33
|
class MarshalLoader < Loader
|
35
34
|
private def load_bin(bin)
|
36
35
|
data = Marshal.load(Zlib::Inflate.inflate(bin))
|
36
|
+
unless @model.class.name == data[:class]
|
37
|
+
raise DNN_Error, "Class name is not mismatch. Target model is #{@model.class.name}. But loading model is #{data[:class]}."
|
38
|
+
end
|
37
39
|
opt = Optimizers::Optimizer.load(data[:optimizer])
|
38
40
|
loss_func = Losses::Loss.from_hash(data[:loss_func])
|
39
41
|
@model.setup(opt, loss_func)
|
42
|
+
@model.instance_variable_set(:@built, false)
|
40
43
|
@model.predict1(Xumo::SFloat.zeros(*data[:input_shape]))
|
41
44
|
set_all_params_data(data[:params])
|
42
45
|
end
|
@@ -50,6 +53,7 @@ module DNN
|
|
50
53
|
opt = Optimizers::Optimizer.from_hash(data[:optimizer])
|
51
54
|
loss_func = Losses::Loss.from_hash(data[:loss_func])
|
52
55
|
@model.setup(opt, loss_func)
|
56
|
+
@model.instance_variable_set(:@built, false)
|
53
57
|
@model.predict1(Xumo::SFloat.zeros(*data[:input_shape]))
|
54
58
|
base64_to_params_data(data[:params])
|
55
59
|
end
|
@@ -65,7 +69,6 @@ module DNN
|
|
65
69
|
|
66
70
|
end
|
67
71
|
|
68
|
-
|
69
72
|
module Savers
|
70
73
|
|
71
74
|
class Saver
|
@@ -87,7 +90,7 @@ module DNN
|
|
87
90
|
private
|
88
91
|
|
89
92
|
def dump_bin
|
90
|
-
raise NotImplementedError
|
93
|
+
raise NotImplementedError, "Class '#{self.class.name}' has implement method 'dump_bin'"
|
91
94
|
end
|
92
95
|
|
93
96
|
def get_all_params_data
|
@@ -98,7 +101,6 @@ module DNN
|
|
98
101
|
end
|
99
102
|
end
|
100
103
|
|
101
|
-
|
102
104
|
class MarshalSaver < Saver
|
103
105
|
def initialize(model, include_optimizer: true)
|
104
106
|
super(model)
|
@@ -106,10 +108,10 @@ module DNN
|
|
106
108
|
end
|
107
109
|
|
108
110
|
private def dump_bin
|
109
|
-
|
111
|
+
require_status = @include_optimizer ? true : false
|
110
112
|
data = {
|
111
113
|
version: VERSION, class: @model.class.name, input_shape: @model.layers.first.input_shape, params: get_all_params_data,
|
112
|
-
optimizer:
|
114
|
+
optimizer: @model.optimizer.dump(require_status), loss_func: @model.loss_func.to_hash
|
113
115
|
}
|
114
116
|
Zlib::Deflate.deflate(Marshal.dump(data))
|
115
117
|
end
|