machine_learning_workbench 0.1.2 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +11 -0
- data/examples/image_compression.rb +35 -0
- data/lib/machine_learning_workbench/compressor/online_vector_quantization.rb +27 -0
- data/lib/machine_learning_workbench/compressor/vector_quantization.rb +63 -37
- data/lib/machine_learning_workbench/optimizer/natural_evolution_strategies/base.rb +8 -7
- data/lib/machine_learning_workbench/tools/normalization.rb +2 -2
- data/machine_learning_workbench.gemspec +28 -14
- metadata +45 -37
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8107a7a0b51fc50d183e08c6ef60e43bd96d4ea1
|
4
|
+
data.tar.gz: 82f7af0fd0938433d749696122e9ddc588f49aa0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 07a342404b87066ab60217ad7e2bde8cbf89d23bb7d49b504a280ce1142456a70eb24b44b6f6c76b4a4f10dfddf219acd8c7f37e85e7f91548b24771752a798c
|
7
|
+
data.tar.gz: bc99aec41e7f9365d47068e69398462e04bcc8dd38dda090ba46e00d27a1e5485bf96a6d55f270c1ffcd6fe8d93223c52014d479634f8e0d48673ebe2eb00bb6
|
data/README.md
CHANGED
@@ -48,3 +48,14 @@ Bug reports and pull requests are welcome on GitHub at https://github.com/[USERN
|
|
48
48
|
## License
|
49
49
|
|
50
50
|
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
51
|
+
|
52
|
+
## References
|
53
|
+
|
54
|
+
Please feel free to contribute to this list (see `Contributing` above).
|
55
|
+
|
56
|
+
- NES stands for Natural Evolution Strategies. Check its [Wikipedia page](https://en.wikipedia.org/wiki/Natural_evolution_strategy) for more info.
|
57
|
+
- CMA-ES stands for Covariance Matrix Adaptation Evolution Strategy. Check its [Wikipedia page](https://en.wikipedia.org/wiki/CMA-ES) for more info.
|
58
|
+
- UL-ELR stands for Unsupervised Learning plus Evolutionary Reinforcement Learning, from the paper _"Intrinsically Motivated Neuroevolution for Vision-Based Reinforcement Learning" (ICDL2011)_. Check [here](https://exascale.info/members/giuseppe-cuccu/) for citation reference and pdf.
|
59
|
+
- BD-NES stands for Block Diagonal Natural Evolution Strategies, from the homonymous paper _"Block Diagonal Natural Evolution Strategies" (PPSN2012)_. Check [here](https://exascale.info/members/giuseppe-cuccu/) for citation reference and pdf.
|
60
|
+
- **Online VQ** stands for Online Vector Quantization, from the paper _"Intrinsically Motivated Neuroevolution for Vision-Based Reinforcement Learning" (ICDL2011)_. Check [here](https://exascale.info/members/giuseppe-cuccu/) for citation reference and pdf.
|
61
|
+
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'rmagick'
|
2
|
+
require 'machine_learning_workbench'
|
3
|
+
VectorQuantization = MachineLearningWorkbench::Compressor::VectorQuantization
|
4
|
+
Img = MachineLearningWorkbench::Tools::Imaging
|
5
|
+
Norm = MachineLearningWorkbench::Tools::Normalization
|
6
|
+
|
7
|
+
ncentrs = 1
|
8
|
+
image_files = Dir[ENV['HOME']+'/jaffe/KA.HA*.png']
|
9
|
+
raise "Download the JAFFE dataset in your home dir" if image_files&.empty?
|
10
|
+
# ... and convert the `.tiff` in `.png`: `mogrify -format png jaffe/*.tiff`
|
11
|
+
centr_range = [-1, 1]
|
12
|
+
orig_shape = [256, 256]
|
13
|
+
img_range = [0, 2**16-1]
|
14
|
+
|
15
|
+
puts "Loading images"
|
16
|
+
images = image_files.map do |fname|
|
17
|
+
nmat = Img.nmat_from_png fname, flat: true, dtype: :float64
|
18
|
+
ret = Norm.feature_scaling nmat, from: img_range, to: centr_range
|
19
|
+
end
|
20
|
+
|
21
|
+
puts "Initializing VQ"
|
22
|
+
vq = VectorQuantization.new ncentrs: ncentrs,
|
23
|
+
dims: images.first.shape, lrate: 0.3,
|
24
|
+
dtype: images.first.dtype, vrange: centr_range
|
25
|
+
|
26
|
+
puts "Training"
|
27
|
+
vq.train images, debug: true
|
28
|
+
|
29
|
+
puts "Done!"
|
30
|
+
begin
|
31
|
+
vq.centrs.map { |c| Img.display c, shape: orig_shape }
|
32
|
+
require 'pry'; binding.pry
|
33
|
+
ensure
|
34
|
+
MachineLearningWorkbench::Tools::Execution.kill_forks
|
35
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module MachineLearningWorkbench::Compressor
|
2
|
+
# Online Vector Quantization: VQ with per-centroid decaying learning rates.
|
3
|
+
# Optimized for online training.
|
4
|
+
class OnlineVectorQuantization < VectorQuantization
|
5
|
+
|
6
|
+
attr_reader :min_lrate, :ntrains
|
7
|
+
|
8
|
+
def initialize min_lrate: 0.01, **opts
|
9
|
+
super **opts.merge({lrate: nil})
|
10
|
+
@min_lrate = min_lrate
|
11
|
+
@ntrains = [0]*ncentrs
|
12
|
+
end
|
13
|
+
|
14
|
+
# Decaying per-centroid learning rate.
|
15
|
+
# @param centr_idx [Integer] index of the centroid
|
16
|
+
# @param lower_bound [Float] minimum learning rate
|
17
|
+
def lrate centr_idx, lower_bound: min_lrate
|
18
|
+
[1/ntrains[centr_idx], lower_bound].max
|
19
|
+
end
|
20
|
+
|
21
|
+
# Train on one image
|
22
|
+
# @return [Integer] index of trained centroid
|
23
|
+
def train_one *args, **opts
|
24
|
+
super.tap { |trg_idx| ntrains[trg_idx] += 1 }
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -1,4 +1,6 @@
|
|
1
1
|
module MachineLearningWorkbench::Compressor
|
2
|
+
|
3
|
+
# Standard Vector Quantization
|
2
4
|
class VectorQuantization
|
3
5
|
attr_reader :ncentrs, :centrs, :dims, :vrange, :dtype, :lrate, :rng
|
4
6
|
Verification = MachineLearningWorkbench::Tools::Verification
|
@@ -7,68 +9,92 @@ module MachineLearningWorkbench::Compressor
|
|
7
9
|
@rng = Random.new rseed
|
8
10
|
@ncentrs = ncentrs
|
9
11
|
@dtype = dtype
|
10
|
-
@dims = dims
|
12
|
+
@dims = Array(dims)
|
11
13
|
@lrate = lrate
|
12
14
|
@vrange = case vrange
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
else
|
19
|
-
raise ArgumentError, "vrange: unrecognized type: #{vrange.class}"
|
15
|
+
when Array
|
16
|
+
raise ArgumentError, "vrange size not 2: #{vrange}" unless vrange.size == 2
|
17
|
+
vrange.map &method(:Float)
|
18
|
+
when Range then [vrange.first, vrange.last].map &method(:Float)
|
19
|
+
else raise ArgumentError, "vrange: unrecognized type: #{vrange.class}"
|
20
20
|
end
|
21
21
|
@centrs = ncentrs.times.map { new_centr }
|
22
22
|
end
|
23
23
|
|
24
24
|
# Creates a new (random) centroid
|
25
25
|
def new_centr
|
26
|
-
|
26
|
+
# TODO: this is too slow, find another way to use the rng
|
27
|
+
# NMatrix.new(dims, dtype: dtype) { rng.rand Range.new *vrange }
|
28
|
+
NMatrix.random dims, dtype: dtype
|
27
29
|
end
|
28
30
|
|
29
|
-
# Computes similarities between
|
30
|
-
def similarities
|
31
|
-
raise NotImplementedError if
|
32
|
-
|
33
|
-
require 'parallel'
|
34
|
-
Parallel.map(centrs) { |c| c.dot(
|
31
|
+
# Computes similarities between vector and all centroids
|
32
|
+
def similarities vec
|
33
|
+
raise NotImplementedError if vec.shape.size > 1
|
34
|
+
centrs.map { |c| c.dot(vec).first }
|
35
|
+
# require 'parallel'
|
36
|
+
# Parallel.map(centrs) { |c| c.dot(vec).first }
|
35
37
|
end
|
36
|
-
# The list of similarities also constitutes the encoding of the image
|
37
|
-
alias encode similarities
|
38
38
|
|
39
|
-
#
|
40
|
-
def
|
41
|
-
simils = similarities
|
39
|
+
# Encode a vector
|
40
|
+
def encode vec, type: :most_similar
|
41
|
+
simils = similarities vec
|
42
|
+
case type
|
43
|
+
when :most_similar
|
44
|
+
simils.index simils.max
|
45
|
+
when :ensemble
|
46
|
+
simils
|
47
|
+
when :ensemble_norm
|
48
|
+
tot = simils.reduce(:+)
|
49
|
+
simils.map { |s| s/tot }
|
50
|
+
else raise ArgumentError, "unrecognized encode type: #{type}"
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Reconstruct vector from its code (encoding)
|
55
|
+
def reconstruction code, type: :most_similar
|
56
|
+
case type
|
57
|
+
when :most_similar
|
58
|
+
centrs[code]
|
59
|
+
when :ensemble
|
60
|
+
tot = code.reduce :+
|
61
|
+
centrs.zip(code).map { |centr, contr| centr*contr/tot }.reduce :+
|
62
|
+
when :ensemble_norm
|
63
|
+
centrs.zip(code).map { |centr, contr| centr*contr }.reduce :+
|
64
|
+
else raise ArgumentError, "unrecognized reconstruction type: #{type}"
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
# Returns index and similitude of most similar centroid to vector
|
69
|
+
def most_similar_centr vec
|
70
|
+
simils = similarities vec
|
42
71
|
max_simil = simils.max
|
43
72
|
max_idx = simils.index max_simil
|
44
73
|
[max_idx, max_simil]
|
45
74
|
end
|
46
75
|
|
47
|
-
#
|
48
|
-
def
|
49
|
-
|
50
|
-
end
|
51
|
-
|
52
|
-
# Per-pixel errors in reconstructing image
|
53
|
-
def reconstr_error img
|
54
|
-
reconstruction(img) - img
|
76
|
+
# Per-pixel errors in reconstructing vector
|
77
|
+
def reconstr_error vec
|
78
|
+
reconstruction(vec) - vec
|
55
79
|
end
|
56
80
|
|
57
|
-
# Train on one
|
58
|
-
|
59
|
-
|
60
|
-
|
81
|
+
# Train on one vector
|
82
|
+
# @param vec [NMatrix]
|
83
|
+
# @return [Integer] index of trained centroid
|
84
|
+
def train_one vec, simils: nil
|
85
|
+
trg_idx, _simil = simils || most_similar_centr(vec)
|
86
|
+
centrs[trg_idx] = centrs[trg_idx] * (1-lrate) + vec * lrate
|
61
87
|
Verification.in_range! centrs[trg_idx], vrange
|
62
|
-
|
88
|
+
trg_idx
|
63
89
|
end
|
64
90
|
|
65
|
-
# Train on
|
66
|
-
def train
|
91
|
+
# Train on vector list
|
92
|
+
def train vec_lst, debug: false
|
67
93
|
# Two ways here:
|
68
|
-
# - Batch: canonical, centrs updated with each
|
94
|
+
# - Batch: canonical, centrs updated with each vec
|
69
95
|
# - Parallel: could be parallel either on simils or on training (?)
|
70
96
|
# Unsure on the correctness of either Parallel, let's stick with Batch
|
71
|
-
|
97
|
+
vec_lst.each { |vec| train_one vec; print '.' if debug }
|
72
98
|
end
|
73
99
|
end
|
74
100
|
end
|
@@ -2,7 +2,7 @@
|
|
2
2
|
module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
3
3
|
# Natural Evolution Strategies base class
|
4
4
|
class Base
|
5
|
-
attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :id, :rng, :last_fits, :best
|
5
|
+
attr_reader :ndims, :mu, :sigma, :opt_type, :obj_fn, :parallel_fit, :id, :rng, :last_fits, :best, :rescale_popsize, :rescale_lrate
|
6
6
|
|
7
7
|
# NES object initialization
|
8
8
|
# @param ndims [Integer] number of parameters to optimize
|
@@ -11,15 +11,16 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
11
11
|
# @param rseed [Integer] allow for deterministic execution on rseed provided
|
12
12
|
# @param mu_init [Numeric] values to initalize the distribution's mean
|
13
13
|
# @param sigma_init [Numeric] values to initialize the distribution's covariance
|
14
|
-
# @param parallel_fit [boolean] whether the `obj_fn` should be passed all the individuals
|
14
|
+
# @param parallel_fit [boolean] whether the `obj_fn` should be passed all the individuals
|
15
15
|
# together. In the canonical case the fitness function always scores a single individual;
|
16
16
|
# in practical cases though it is easier to delegate the scoring parallelization to the
|
17
17
|
# external fitness function. Turning this to `true` will make the algorithm pass _an
|
18
18
|
# Array_ of individuals to the fitness function, rather than a single instance.
|
19
|
-
def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false
|
19
|
+
def initialize ndims, obj_fn, opt_type, rseed: nil, mu_init: 0, sigma_init: 1, parallel_fit: false, rescale_popsize: 1, rescale_lrate: 1
|
20
20
|
raise ArgumentError unless [:min, :max].include? opt_type
|
21
21
|
raise ArgumentError unless obj_fn.respond_to? :call
|
22
22
|
@ndims, @opt_type, @obj_fn, @parallel_fit = ndims, opt_type, obj_fn, parallel_fit
|
23
|
+
@rescale_popsize, @rescale_lrate = rescale_popsize, rescale_lrate
|
23
24
|
@id = NMatrix.identity(ndims, dtype: :float64)
|
24
25
|
rseed ||= Random.new_seed
|
25
26
|
# puts "NES rseed: #{s}" # currently disabled
|
@@ -40,11 +41,11 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
40
41
|
|
41
42
|
# Memoized automatic magic numbers
|
42
43
|
# NOTE: Doubling popsize and halving lrate often helps
|
43
|
-
def utils; @utilities ||= cmaes_utilities
|
44
|
+
def utils; @utilities ||= cmaes_utilities end
|
44
45
|
# (see #utils)
|
45
|
-
def popsize; @popsize ||= cmaes_popsize *
|
46
|
+
def popsize; @popsize ||= cmaes_popsize * rescale_popsize end
|
46
47
|
# (see #utils)
|
47
|
-
def lrate; @lrate ||= cmaes_lrate
|
48
|
+
def lrate; @lrate ||= cmaes_lrate * rescale_lrate end
|
48
49
|
|
49
50
|
# Magic numbers from CMA-ES (TODO: add proper citation)
|
50
51
|
# @return [NMatrix] scale-invariant utilities
|
@@ -116,4 +117,4 @@ module MachineLearningWorkbench::Optimizer::NaturalEvolutionStrategies
|
|
116
117
|
end
|
117
118
|
end
|
118
119
|
end
|
119
|
-
end
|
120
|
+
end
|
@@ -4,7 +4,7 @@ module MachineLearningWorkbench::Tools
|
|
4
4
|
from ||= nmat.minmax
|
5
5
|
old_min, old_max = from
|
6
6
|
new_min, new_max = to
|
7
|
-
|
7
|
+
(nmat-old_min)*(new_max-new_min)/(old_max-old_min)+new_min
|
8
8
|
end
|
9
9
|
|
10
10
|
# @param per_column [bool] wheather to compute stats per-column or matrix-wise
|
@@ -16,7 +16,7 @@ module MachineLearningWorkbench::Tools
|
|
16
16
|
stddevs.map! { |v| v.zero? ? 1 : v }
|
17
17
|
mean_mat = means.repeat nmat.rows, 0
|
18
18
|
stddev_mat = stddevs.repeat nmat.rows, 0
|
19
|
-
|
19
|
+
(nmat - mean_mat) / stddev_mat
|
20
20
|
end
|
21
21
|
end
|
22
22
|
end
|
@@ -5,32 +5,46 @@ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
|
5
5
|
Gem::Specification.new do |spec|
|
6
6
|
spec.name = "machine_learning_workbench"
|
7
7
|
spec.version = `git describe`
|
8
|
-
spec.
|
9
|
-
spec.email =
|
8
|
+
spec.author = "Giuseppe Cuccu"
|
9
|
+
spec.email = "giuseppe.cuccu@gmail.com"
|
10
10
|
|
11
|
-
spec.summary = %q
|
12
|
-
spec.description = %q
|
11
|
+
spec.summary = %q[Workbench for practical machine learning in Ruby.]
|
12
|
+
spec.description = %q[\
|
13
|
+
This workbench holds a collection of machine learning
|
14
|
+
methods in Ruby. Rather than specializing on a single task or method, this
|
15
|
+
gem aims at providing an encompassing framework for any machine learning
|
16
|
+
application.].gsub(' ', '')
|
13
17
|
spec.homepage = "https://github.com/giuse/machine_learning_workbench"
|
14
18
|
spec.license = "MIT"
|
19
|
+
spec.post_install_message = %Q[\
|
20
|
+
Thanks for installing the machine learning workbench!
|
21
|
+
It is still a work in progress, feel free to open an issue or drop me an email
|
22
|
+
and start a discussion if you are using this gem. Cheers!
|
23
|
+
].gsub(' ', '')
|
15
24
|
|
16
|
-
spec.files
|
17
|
-
f.match(%r{^(test|spec|features)/})
|
18
|
-
end
|
19
|
-
spec.bindir = "exe"
|
20
|
-
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
21
|
-
spec.require_paths = ["lib"]
|
25
|
+
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.start_with? "spec" }
|
22
26
|
|
23
|
-
spec.
|
27
|
+
# spec.bindir = "exe"
|
28
|
+
# spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
29
|
+
spec.require_paths = ["lib"]
|
30
|
+
spec.required_ruby_version = '>= 2.4.0'
|
24
31
|
|
32
|
+
# Install
|
25
33
|
spec.add_development_dependency "bundler", "~> 1.16"
|
26
34
|
spec.add_development_dependency "rake", "~> 10.0"
|
35
|
+
|
36
|
+
# Test
|
27
37
|
spec.add_development_dependency "rspec", "~> 3.0"
|
38
|
+
spec.add_development_dependency "rmagick" # only used for one example
|
39
|
+
|
40
|
+
# Debug
|
28
41
|
spec.add_development_dependency "pry", "~> 0.10"
|
29
42
|
spec.add_development_dependency "pry-nav", "~> 0.2"
|
30
43
|
spec.add_development_dependency "pry-rescue", "~> 1.4"
|
31
44
|
spec.add_development_dependency "pry-stack_explorer", "~> 0.4"
|
32
45
|
|
33
|
-
#
|
34
|
-
spec.
|
35
|
-
spec.
|
46
|
+
# Run
|
47
|
+
spec.requirements << "libatlas-base-dev" # library for following dependency
|
48
|
+
spec.add_dependency "nmatrix-atlas", "~> 0.2"
|
49
|
+
spec.add_dependency "parallel", "~> 1.12"
|
36
50
|
end
|
metadata
CHANGED
@@ -1,29 +1,15 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: machine_learning_workbench
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Giuseppe Cuccu
|
8
8
|
autorequire:
|
9
|
-
bindir:
|
9
|
+
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-03-
|
11
|
+
date: 2018-03-14 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
|
-
- !ruby/object:Gem::Dependency
|
14
|
-
name: nmatrix-atlas
|
15
|
-
requirement: !ruby/object:Gem::Requirement
|
16
|
-
requirements:
|
17
|
-
- - "~>"
|
18
|
-
- !ruby/object:Gem::Version
|
19
|
-
version: '0.2'
|
20
|
-
type: :runtime
|
21
|
-
prerelease: false
|
22
|
-
version_requirements: !ruby/object:Gem::Requirement
|
23
|
-
requirements:
|
24
|
-
- - "~>"
|
25
|
-
- !ruby/object:Gem::Version
|
26
|
-
version: '0.2'
|
27
13
|
- !ruby/object:Gem::Dependency
|
28
14
|
name: bundler
|
29
15
|
requirement: !ruby/object:Gem::Requirement
|
@@ -66,6 +52,20 @@ dependencies:
|
|
66
52
|
- - "~>"
|
67
53
|
- !ruby/object:Gem::Version
|
68
54
|
version: '3.0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: rmagick
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '0'
|
62
|
+
type: :development
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '0'
|
69
69
|
- !ruby/object:Gem::Dependency
|
70
70
|
name: pry
|
71
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -123,38 +123,40 @@ dependencies:
|
|
123
123
|
- !ruby/object:Gem::Version
|
124
124
|
version: '0.4'
|
125
125
|
- !ruby/object:Gem::Dependency
|
126
|
-
name:
|
126
|
+
name: nmatrix-atlas
|
127
127
|
requirement: !ruby/object:Gem::Requirement
|
128
128
|
requirements:
|
129
|
-
- - "
|
129
|
+
- - "~>"
|
130
130
|
- !ruby/object:Gem::Version
|
131
|
-
version: '0'
|
132
|
-
type: :
|
131
|
+
version: '0.2'
|
132
|
+
type: :runtime
|
133
133
|
prerelease: false
|
134
134
|
version_requirements: !ruby/object:Gem::Requirement
|
135
135
|
requirements:
|
136
|
-
- - "
|
136
|
+
- - "~>"
|
137
137
|
- !ruby/object:Gem::Version
|
138
|
-
version: '0'
|
138
|
+
version: '0.2'
|
139
139
|
- !ruby/object:Gem::Dependency
|
140
140
|
name: parallel
|
141
141
|
requirement: !ruby/object:Gem::Requirement
|
142
142
|
requirements:
|
143
|
-
- - "
|
143
|
+
- - "~>"
|
144
144
|
- !ruby/object:Gem::Version
|
145
|
-
version: '
|
146
|
-
type: :
|
145
|
+
version: '1.12'
|
146
|
+
type: :runtime
|
147
147
|
prerelease: false
|
148
148
|
version_requirements: !ruby/object:Gem::Requirement
|
149
149
|
requirements:
|
150
|
-
- - "
|
150
|
+
- - "~>"
|
151
151
|
- !ruby/object:Gem::Version
|
152
|
-
version: '
|
153
|
-
description:
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
152
|
+
version: '1.12'
|
153
|
+
description: |-
|
154
|
+
\
|
155
|
+
This workbench holds a collection of machine learning
|
156
|
+
methods in Ruby. Rather than specializing on a single task or method, this
|
157
|
+
gem aims at providing an encompassing framework for any machine learning
|
158
|
+
application.
|
159
|
+
email: giuseppe.cuccu@gmail.com
|
158
160
|
executables: []
|
159
161
|
extensions: []
|
160
162
|
extra_rdoc_files: []
|
@@ -169,9 +171,11 @@ files:
|
|
169
171
|
- Rakefile
|
170
172
|
- bin/console
|
171
173
|
- bin/setup
|
174
|
+
- examples/image_compression.rb
|
172
175
|
- examples/neuroevolution.rb
|
173
176
|
- lib/machine_learning_workbench.rb
|
174
177
|
- lib/machine_learning_workbench/compressor.rb
|
178
|
+
- lib/machine_learning_workbench/compressor/online_vector_quantization.rb
|
175
179
|
- lib/machine_learning_workbench/compressor/vector_quantization.rb
|
176
180
|
- lib/machine_learning_workbench/monkey.rb
|
177
181
|
- lib/machine_learning_workbench/neural_network.rb
|
@@ -195,7 +199,10 @@ homepage: https://github.com/giuse/machine_learning_workbench
|
|
195
199
|
licenses:
|
196
200
|
- MIT
|
197
201
|
metadata: {}
|
198
|
-
post_install_message:
|
202
|
+
post_install_message: |
|
203
|
+
Thanks for installing the machine learning workbench!
|
204
|
+
It is still a work in progress, feel free to open an issue or drop me an email
|
205
|
+
and start a discussion if you are using this gem. Cheers!
|
199
206
|
rdoc_options: []
|
200
207
|
require_paths:
|
201
208
|
- lib
|
@@ -203,15 +210,16 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
203
210
|
requirements:
|
204
211
|
- - ">="
|
205
212
|
- !ruby/object:Gem::Version
|
206
|
-
version:
|
213
|
+
version: 2.4.0
|
207
214
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
208
215
|
requirements:
|
209
216
|
- - ">="
|
210
217
|
- !ruby/object:Gem::Version
|
211
218
|
version: '0'
|
212
|
-
requirements:
|
219
|
+
requirements:
|
220
|
+
- libatlas-base-dev
|
213
221
|
rubyforge_project:
|
214
|
-
rubygems_version: 2.6.
|
222
|
+
rubygems_version: 2.6.13
|
215
223
|
signing_key:
|
216
224
|
specification_version: 4
|
217
225
|
summary: Workbench for practical machine learning in Ruby.
|