ruby-opencv 0.0.10 → 0.0.11.pre
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +6 -14
- data/Manifest.txt +32 -8
- data/README.md +1 -1
- data/examples/alpha_blend.rb +2 -2
- data/examples/contours/bounding-box-detect-canny.rb +0 -0
- data/examples/contours/contour_retrieval_modes.rb +0 -0
- data/examples/convexhull.rb +0 -0
- data/examples/face_detect.rb +0 -0
- data/examples/facerec/create_csv.rb +43 -0
- data/examples/facerec/facerec_eigenfaces.rb +132 -0
- data/examples/facerec/facerec_fisherfaces.rb +131 -0
- data/examples/facerec/facerec_lbph.rb +116 -0
- data/examples/facerec/readme.md +111 -0
- data/examples/find_obj.rb +2 -2
- data/examples/houghcircle.rb +1 -1
- data/examples/{box.png → images/box.png} +0 -0
- data/examples/{box_in_scene.png → images/box_in_scene.png} +0 -0
- data/examples/{inpaint.png → images/inpaint.png} +0 -0
- data/examples/images/lena-256x256.jpg +0 -0
- data/examples/images/lena-eyes.jpg +0 -0
- data/examples/{lenna-rotated.jpg → images/lenna-rotated.jpg} +0 -0
- data/examples/{lenna.jpg → images/lenna.jpg} +0 -0
- data/examples/{stuff.jpg → images/stuff.jpg} +0 -0
- data/examples/{tiffany.jpg → images/tiffany.jpg} +0 -0
- data/examples/inpaint.rb +1 -1
- data/examples/match_kdtree.rb +2 -2
- data/examples/match_template.rb +26 -0
- data/examples/{matching_to_many_images.rb → matching_to_many_images/matching_to_many_images.rb} +3 -3
- data/examples/matching_to_many_images/query.png +0 -0
- data/examples/matching_to_many_images/train/1.png +0 -0
- data/examples/matching_to_many_images/train/2.png +0 -0
- data/examples/matching_to_many_images/train/3.png +0 -0
- data/examples/matching_to_many_images/train/trainImages.txt +0 -0
- data/examples/paint.rb +0 -0
- data/examples/snake.rb +0 -0
- data/ext/opencv/algorithm.cpp +286 -0
- data/ext/opencv/algorithm.h +38 -0
- data/ext/opencv/cvmat.cpp +205 -76
- data/ext/opencv/cvmat.h +8 -1
- data/ext/opencv/eigenfaces.cpp +67 -0
- data/ext/opencv/eigenfaces.h +30 -0
- data/ext/opencv/extconf.rb +0 -0
- data/ext/opencv/facerecognizer.cpp +174 -0
- data/ext/opencv/facerecognizer.h +46 -0
- data/ext/opencv/fisherfaces.cpp +67 -0
- data/ext/opencv/fisherfaces.h +30 -0
- data/ext/opencv/lbph.cpp +70 -0
- data/ext/opencv/lbph.h +30 -0
- data/ext/opencv/opencv.cpp +51 -1
- data/ext/opencv/opencv.h +6 -0
- data/lib/opencv.rb +0 -0
- data/lib/opencv/version.rb +1 -1
- data/ruby-opencv.gemspec +8 -7
- data/test/eigenfaces_save.xml +7524 -0
- data/test/fisherfaces_save.xml +7530 -0
- data/test/helper.rb +0 -0
- data/test/lbph_save.xml +4304 -0
- data/test/runner.rb +0 -0
- data/test/test_curve.rb +0 -0
- data/test/test_cvavgcomp.rb +0 -0
- data/test/test_cvbox2d.rb +0 -0
- data/test/test_cvcapture.rb +0 -0
- data/test/test_cvchain.rb +0 -0
- data/test/test_cvcircle32f.rb +0 -0
- data/test/test_cvconnectedcomp.rb +0 -0
- data/test/test_cvcontour.rb +0 -0
- data/test/test_cvcontourtree.rb +0 -0
- data/test/test_cverror.rb +0 -0
- data/test/test_cvfeaturetree.rb +0 -0
- data/test/test_cvfont.rb +0 -0
- data/test/test_cvhaarclassifiercascade.rb +0 -0
- data/test/test_cvhistogram.rb +0 -0
- data/test/test_cvhumoments.rb +0 -0
- data/test/test_cvline.rb +0 -0
- data/test/test_cvmat.rb +72 -16
- data/test/test_cvmat_drawing.rb +0 -0
- data/test/test_cvmat_dxt.rb +0 -0
- data/test/test_cvmat_imageprocessing.rb +72 -2
- data/test/test_cvmat_matching.rb +1 -1
- data/test/test_cvmoments.rb +0 -0
- data/test/test_cvpoint.rb +0 -0
- data/test/test_cvpoint2d32f.rb +0 -0
- data/test/test_cvpoint3d32f.rb +0 -0
- data/test/test_cvrect.rb +0 -0
- data/test/test_cvscalar.rb +0 -0
- data/test/test_cvseq.rb +0 -0
- data/test/test_cvsize.rb +0 -0
- data/test/test_cvsize2d32f.rb +0 -0
- data/test/test_cvslice.rb +0 -0
- data/test/test_cvsurfparams.rb +0 -0
- data/test/test_cvsurfpoint.rb +0 -0
- data/test/test_cvtermcriteria.rb +0 -0
- data/test/test_cvtwopoints.rb +0 -0
- data/test/test_cvvideowriter.rb +0 -0
- data/test/test_eigenfaces.rb +93 -0
- data/test/test_fisherfaces.rb +93 -0
- data/test/test_iplconvkernel.rb +0 -0
- data/test/test_iplimage.rb +0 -4
- data/test/test_lbph.rb +152 -0
- data/test/test_mouseevent.rb +0 -0
- data/test/test_opencv.rb +33 -4
- data/test/test_pointset.rb +7 -5
- data/test/test_preliminary.rb +0 -0
- data/test/test_trackbar.rb +0 -0
- data/test/test_window.rb +0 -0
- metadata +84 -56
checksums.yaml
CHANGED
@@ -1,15 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
metadata.gz: !binary |-
|
9
|
-
ODM3MzU3NTQ4YzczMGM3M2U1YzllNjc3ZjJmMTg5ZTY0YTUzZGY2NGMwMzNh
|
10
|
-
ZWRmODkwNDFhYzMyY2QzZDcyZDcxYzMyNjBkN2JkZDBjMDhmNDk3NzBiMzQz
|
11
|
-
ZmZkNjk1NTMwNDIzZGYzNGNmMTQ5NDg5ZTljZGYyMTQ1N2Y3ZTk=
|
12
|
-
data.tar.gz: !binary |-
|
13
|
-
NmM2ZTNlZjUyMmU4OWI2NjUzMjcwYmU4ZTJjN2ZiY2VkNjAzMmM4OTJkNTAz
|
14
|
-
YzBjYTYwMTczNjRiY2EyZDA4ZWY5ZWZkY2M5ZmZmYjJiM2JhNDJkMmQ5YzRh
|
15
|
-
MmRhNmM2ZjI0NTczZGM0M2YxZDZiNzViNDZhZWUzZTAwM2QwZjY=
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 6d80b6b271b5f36988e8ee0cf78dabcc363638e5
|
4
|
+
data.tar.gz: 5a1d2921ecee9e26c65c030463040db4a60fd61b
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 1fc2b4f7a49ca855b07af6cf4ea5379858de4123d58e177011aa58f9c48ba5fdc5bc05d1e8adafa15864318497a7ec4b881174d4874c451c76a800ee24948208
|
7
|
+
data.tar.gz: dc97af78d5c411cbb42e480c58e9e8fd330849c8ea04198320bebba80e49ec52e5828f0a98bf958870442c632aa697df32e16a8cb4b39064de5aa3d912203eb0
|
data/Manifest.txt
CHANGED
@@ -8,8 +8,6 @@ README.md
|
|
8
8
|
Rakefile
|
9
9
|
config.yml
|
10
10
|
examples/alpha_blend.rb
|
11
|
-
examples/box.png
|
12
|
-
examples/box_in_scene.png
|
13
11
|
examples/contours/bitmap-contours-with-labels.png
|
14
12
|
examples/contours/bitmap-contours.png
|
15
13
|
examples/contours/bounding-box-detect-canny.rb
|
@@ -17,14 +15,26 @@ examples/contours/contour_retrieval_modes.rb
|
|
17
15
|
examples/contours/rotated-boxes.jpg
|
18
16
|
examples/convexhull.rb
|
19
17
|
examples/face_detect.rb
|
18
|
+
examples/facerec/create_csv.rb
|
19
|
+
examples/facerec/facerec_eigenfaces.rb
|
20
|
+
examples/facerec/facerec_fisherfaces.rb
|
21
|
+
examples/facerec/facerec_lbph.rb
|
22
|
+
examples/facerec/readme.md
|
20
23
|
examples/find_obj.rb
|
21
24
|
examples/houghcircle.rb
|
22
|
-
examples/
|
25
|
+
examples/images/box.png
|
26
|
+
examples/images/box_in_scene.png
|
27
|
+
examples/images/inpaint.png
|
28
|
+
examples/images/lena-256x256.jpg
|
29
|
+
examples/images/lena-eyes.jpg
|
30
|
+
examples/images/lenna-rotated.jpg
|
31
|
+
examples/images/lenna.jpg
|
32
|
+
examples/images/stuff.jpg
|
33
|
+
examples/images/tiffany.jpg
|
23
34
|
examples/inpaint.rb
|
24
|
-
examples/lenna-rotated.jpg
|
25
|
-
examples/lenna.jpg
|
26
35
|
examples/match_kdtree.rb
|
27
|
-
examples/
|
36
|
+
examples/match_template.rb
|
37
|
+
examples/matching_to_many_images/matching_to_many_images.rb
|
28
38
|
examples/matching_to_many_images/query.png
|
29
39
|
examples/matching_to_many_images/train/1.png
|
30
40
|
examples/matching_to_many_images/train/2.png
|
@@ -32,8 +42,8 @@ examples/matching_to_many_images/train/3.png
|
|
32
42
|
examples/matching_to_many_images/train/trainImages.txt
|
33
43
|
examples/paint.rb
|
34
44
|
examples/snake.rb
|
35
|
-
|
36
|
-
|
45
|
+
ext/opencv/algorithm.cpp
|
46
|
+
ext/opencv/algorithm.h
|
37
47
|
ext/opencv/curve.cpp
|
38
48
|
ext/opencv/curve.h
|
39
49
|
ext/opencv/cvavgcomp.cpp
|
@@ -110,13 +120,21 @@ ext/opencv/cvutils.cpp
|
|
110
120
|
ext/opencv/cvutils.h
|
111
121
|
ext/opencv/cvvideowriter.cpp
|
112
122
|
ext/opencv/cvvideowriter.h
|
123
|
+
ext/opencv/eigenfaces.cpp
|
124
|
+
ext/opencv/eigenfaces.h
|
113
125
|
ext/opencv/extconf.rb
|
126
|
+
ext/opencv/facerecognizer.cpp
|
127
|
+
ext/opencv/facerecognizer.h
|
128
|
+
ext/opencv/fisherfaces.cpp
|
129
|
+
ext/opencv/fisherfaces.h
|
114
130
|
ext/opencv/gui.cpp
|
115
131
|
ext/opencv/gui.h
|
116
132
|
ext/opencv/iplconvkernel.cpp
|
117
133
|
ext/opencv/iplconvkernel.h
|
118
134
|
ext/opencv/iplimage.cpp
|
119
135
|
ext/opencv/iplimage.h
|
136
|
+
ext/opencv/lbph.cpp
|
137
|
+
ext/opencv/lbph.h
|
120
138
|
ext/opencv/mouseevent.cpp
|
121
139
|
ext/opencv/mouseevent.h
|
122
140
|
ext/opencv/opencv.cpp
|
@@ -135,7 +153,10 @@ lib/opencv.rb
|
|
135
153
|
lib/opencv/psyched_yaml.rb
|
136
154
|
lib/opencv/version.rb
|
137
155
|
ruby-opencv.gemspec
|
156
|
+
test/eigenfaces_save.xml
|
157
|
+
test/fisherfaces_save.xml
|
138
158
|
test/helper.rb
|
159
|
+
test/lbph_save.xml
|
139
160
|
test/runner.rb
|
140
161
|
test/samples/airplane.jpg
|
141
162
|
test/samples/baboon.jpg
|
@@ -217,8 +238,11 @@ test/test_cvsurfpoint.rb
|
|
217
238
|
test/test_cvtermcriteria.rb
|
218
239
|
test/test_cvtwopoints.rb
|
219
240
|
test/test_cvvideowriter.rb
|
241
|
+
test/test_eigenfaces.rb
|
242
|
+
test/test_fisherfaces.rb
|
220
243
|
test/test_iplconvkernel.rb
|
221
244
|
test/test_iplimage.rb
|
245
|
+
test/test_lbph.rb
|
222
246
|
test/test_mouseevent.rb
|
223
247
|
test/test_opencv.rb
|
224
248
|
test/test_pointset.rb
|
data/README.md
CHANGED
data/examples/alpha_blend.rb
CHANGED
@@ -6,8 +6,8 @@
|
|
6
6
|
require 'opencv'
|
7
7
|
include OpenCV
|
8
8
|
|
9
|
-
img1 = IplImage.load('lenna.jpg', CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH)
|
10
|
-
img2 = IplImage.load('tiffany.jpg', CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH)
|
9
|
+
img1 = IplImage.load('images/lenna.jpg', CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH)
|
10
|
+
img2 = IplImage.load('images/tiffany.jpg', CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH)
|
11
11
|
|
12
12
|
window = GUI::Window.new('Alpha blend')
|
13
13
|
max = 100.0
|
File without changes
|
File without changes
|
data/examples/convexhull.rb
CHANGED
File without changes
|
data/examples/face_detect.rb
CHANGED
File without changes
|
@@ -0,0 +1,43 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- mode: ruby; coding: utf-8 -*-
|
3
|
+
|
4
|
+
# This is a tiny script to help you creating a CSV file from a face
|
5
|
+
# database with a similar hierarchie:
|
6
|
+
#
|
7
|
+
# philipp@mango:~/facerec/data/at$ tree
|
8
|
+
# .
|
9
|
+
# |-- README
|
10
|
+
# |-- s1
|
11
|
+
# | |-- 1.pgm
|
12
|
+
# | |-- ...
|
13
|
+
# | |-- 10.pgm
|
14
|
+
# |-- s2
|
15
|
+
# | |-- 1.pgm
|
16
|
+
# | |-- ...
|
17
|
+
# | |-- 10.pgm
|
18
|
+
# ...
|
19
|
+
# |-- s40
|
20
|
+
# | |-- 1.pgm
|
21
|
+
# | |-- ...
|
22
|
+
# | |-- 10.pgm
|
23
|
+
#
|
24
|
+
# See http://docs.opencv.org/trunk/modules/contrib/doc/facerec/facerec_tutorial.html
|
25
|
+
#
|
26
|
+
if ARGV.size != 1
|
27
|
+
puts "usage: ruby #{__FILE__} <base_path>"
|
28
|
+
exit
|
29
|
+
end
|
30
|
+
|
31
|
+
BASE_PATH = ARGV[0]
|
32
|
+
SEPARATOR = ';'
|
33
|
+
|
34
|
+
label = 0
|
35
|
+
Dir.glob("#{BASE_PATH}/*").each { |dir|
|
36
|
+
if FileTest::directory? dir
|
37
|
+
Dir.glob("#{dir}/*") { |filename|
|
38
|
+
puts "#{filename}#{SEPARATOR}#{label}"
|
39
|
+
}
|
40
|
+
label += 1
|
41
|
+
end
|
42
|
+
}
|
43
|
+
|
@@ -0,0 +1,132 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- mode: ruby; coding: utf-8 -*-
|
3
|
+
|
4
|
+
# Eigenfaces sample in ruby-opencv, equivalent to http://docs.opencv.org/trunk/_downloads/facerec_eigenfaces.cpp
|
5
|
+
# See http://docs.opencv.org/trunk/modules/contrib/doc/facerec/facerec_tutorial.html
|
6
|
+
require 'opencv'
|
7
|
+
include OpenCV
|
8
|
+
|
9
|
+
def norm_0_255(src)
|
10
|
+
dst = nil
|
11
|
+
case src.channel
|
12
|
+
when 1
|
13
|
+
dst = src.normalize(0, 255, CV_NORM_MINMAX, CV_8UC1)
|
14
|
+
when 2
|
15
|
+
dst = src.normalize(0, 255, CV_NORM_MINMAX, CV_8UC3)
|
16
|
+
else
|
17
|
+
dst = src.copy
|
18
|
+
end
|
19
|
+
|
20
|
+
dst
|
21
|
+
end
|
22
|
+
|
23
|
+
def read_csv(filename, sepalator = ';')
|
24
|
+
images = []
|
25
|
+
labels = []
|
26
|
+
open(filename, 'r') { |f|
|
27
|
+
f.each { |line|
|
28
|
+
path, label = line.chomp.split(sepalator)
|
29
|
+
images << CvMat.load(path, CV_LOAD_IMAGE_GRAYSCALE)
|
30
|
+
labels << label.to_i
|
31
|
+
}
|
32
|
+
}
|
33
|
+
|
34
|
+
[images, labels]
|
35
|
+
end
|
36
|
+
|
37
|
+
if ARGV.size < 1
|
38
|
+
puts "usage: ruby #{__FILE__} <csv.ext> <output_folder>"
|
39
|
+
exit 1
|
40
|
+
end
|
41
|
+
fn_csv = ARGV.shift
|
42
|
+
output_folder = ARGV.shift
|
43
|
+
|
44
|
+
images, labels = read_csv(fn_csv);
|
45
|
+
|
46
|
+
height = images[0].rows;
|
47
|
+
|
48
|
+
# The following lines simply get the last images from your dataset and remove it
|
49
|
+
# from the vector. This is done, so that the training data (which we learn the
|
50
|
+
# cv::FaceRecognizer on) and the test data we test the model with, do not overlap.
|
51
|
+
test_sample = images.pop
|
52
|
+
test_label = labels.pop
|
53
|
+
|
54
|
+
# The following lines create an Eigenfaces model for
|
55
|
+
# face recognition and train it with the images and
|
56
|
+
# labels read from the given CSV file.
|
57
|
+
# This here is a full PCA, if you just want to keep
|
58
|
+
# 10 principal components (read Eigenfaces), then call
|
59
|
+
# the factory method like this:
|
60
|
+
#
|
61
|
+
# EigenFaces.new(10)
|
62
|
+
#
|
63
|
+
# If you want to create a FaceRecognizer with a
|
64
|
+
# confidence threshold (e.g. 123.0), call it with:
|
65
|
+
#
|
66
|
+
# EigenFaces.new(10, 123.0)
|
67
|
+
#
|
68
|
+
# If you want to use _all_ Eigenfaces and have a threshold,
|
69
|
+
# then call the method like this:
|
70
|
+
#
|
71
|
+
# EigenFaces.new(0, 123.0)
|
72
|
+
#
|
73
|
+
model = EigenFaces.new
|
74
|
+
model.train(images, labels)
|
75
|
+
|
76
|
+
# The following line predicts the label of a given test image:
|
77
|
+
predicted_label, predicted_confidence = model.predict(test_sample)
|
78
|
+
|
79
|
+
puts "Predicted class: #{predicted_label} / Actual class: #{test_label}"
|
80
|
+
|
81
|
+
eigenvalues = model.get_mat('eigenvalues')
|
82
|
+
w = model.get_mat('eigenvectors');
|
83
|
+
mean = model.get_mat('mean')
|
84
|
+
|
85
|
+
if output_folder
|
86
|
+
norm_0_255(mean.reshape(1, images[0].rows)).save("#{output_folder}/mean.png")
|
87
|
+
else
|
88
|
+
w1 = GUI::Window.new('Predicted')
|
89
|
+
w2 = GUI::Window.new('Actual')
|
90
|
+
w3 = GUI::Window.new('mean')
|
91
|
+
|
92
|
+
w1.show images[predicted_label]
|
93
|
+
w2.show images[test_label]
|
94
|
+
w3.show norm_0_255(mean.reshape(1, images[0].rows))
|
95
|
+
end
|
96
|
+
|
97
|
+
# Display or save the Eigenfaces:
|
98
|
+
[w.cols, 10].min.times { |i|
|
99
|
+
puts "Eigenvalue ##{i} = #{eigenvalues[i][0]}"
|
100
|
+
ev = w.get_cols(i).clone()
|
101
|
+
grayscale = norm_0_255(ev.reshape(1, height))
|
102
|
+
|
103
|
+
# Show the image & apply a Jet colormap for better sensing.
|
104
|
+
cgrayscale = grayscale.apply_color_map(COLORMAP_JET)
|
105
|
+
if output_folder
|
106
|
+
norm_0_255(cgrayscale).save("#{output_folder}/eigenface_#{i}.png")
|
107
|
+
else
|
108
|
+
w4 = GUI::Window.new("eigenface_#{i}")
|
109
|
+
w4.show norm_0_255(cgrayscale)
|
110
|
+
end
|
111
|
+
}
|
112
|
+
|
113
|
+
[w.cols, 10].min.step([w.cols, 300].min, 15) { |num_components|
|
114
|
+
# slice the eigenvectors from the model
|
115
|
+
evs = w.get_cols(0..num_components)
|
116
|
+
projection = images[0].reshape(1, 1).subspace_project(evs, mean)
|
117
|
+
reconstruction = projection.subspace_reconstruct(evs, mean)
|
118
|
+
|
119
|
+
# Normalize the result:
|
120
|
+
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows))
|
121
|
+
|
122
|
+
# Display or save:
|
123
|
+
if output_folder
|
124
|
+
norm_0_255(reconstruction).save("#{output_folder}/eigenface_reconstruction_#{num_components}.png")
|
125
|
+
else
|
126
|
+
w5 = GUI::Window.new("eigenface_reconstruction_#{num_components}")
|
127
|
+
w5.show norm_0_255(reconstruction)
|
128
|
+
end
|
129
|
+
}
|
130
|
+
|
131
|
+
GUI::wait_key unless output_folder
|
132
|
+
|
@@ -0,0 +1,131 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# -*- mode: ruby; coding: utf-8 -*-
|
3
|
+
|
4
|
+
# Fisherfaces sample in ruby-opencv, equivalent to http://docs.opencv.org/trunk/_downloads/facerec_fisherfaces.cpp
|
5
|
+
# See http://docs.opencv.org/trunk/modules/contrib/doc/facerec/facerec_tutorial.html
|
6
|
+
require 'opencv'
|
7
|
+
include OpenCV
|
8
|
+
|
9
|
+
def norm_0_255(src)
|
10
|
+
dst = nil
|
11
|
+
case src.channel
|
12
|
+
when 1
|
13
|
+
dst = src.normalize(0, 255, CV_NORM_MINMAX, CV_8UC1)
|
14
|
+
when 2
|
15
|
+
dst = src.normalize(0, 255, CV_NORM_MINMAX, CV_8UC3)
|
16
|
+
else
|
17
|
+
dst = src.copy
|
18
|
+
end
|
19
|
+
|
20
|
+
dst
|
21
|
+
end
|
22
|
+
|
23
|
+
def read_csv(filename, sepalator = ';')
|
24
|
+
images = []
|
25
|
+
labels = []
|
26
|
+
open(filename, 'r') { |f|
|
27
|
+
f.each { |line|
|
28
|
+
path, label = line.chomp.split(sepalator)
|
29
|
+
images << CvMat.load(path, CV_LOAD_IMAGE_GRAYSCALE)
|
30
|
+
labels << label.to_i
|
31
|
+
}
|
32
|
+
}
|
33
|
+
|
34
|
+
[images, labels]
|
35
|
+
end
|
36
|
+
|
37
|
+
if ARGV.size < 1
|
38
|
+
puts "usage: ruby #{__FILE__} <csv.ext> <output_folder>"
|
39
|
+
exit 1
|
40
|
+
end
|
41
|
+
fn_csv = ARGV.shift
|
42
|
+
output_folder = ARGV.shift
|
43
|
+
|
44
|
+
images, labels = read_csv(fn_csv);
|
45
|
+
|
46
|
+
height = images[0].rows;
|
47
|
+
|
48
|
+
# The following lines simply get the last images from your dataset and remove it
|
49
|
+
# from the vector. This is done, so that the training data (which we learn the
|
50
|
+
# cv::FaceRecognizer on) and the test data we test the model with, do not overlap.
|
51
|
+
test_sample = images.pop
|
52
|
+
test_label = labels.pop
|
53
|
+
|
54
|
+
# The following lines create an Fisherfaces model for
|
55
|
+
# face recognition and train it with the images and
|
56
|
+
# labels read from the given CSV file.
|
57
|
+
# If you just want to keep 10 Fisherfaces, then call
|
58
|
+
# the factory method like this:
|
59
|
+
#
|
60
|
+
# FisherFaces.new(10)
|
61
|
+
#
|
62
|
+
# However it is not useful to discard Fisherfaces! Please
|
63
|
+
# always try to use _all_ available Fisherfaces for
|
64
|
+
# classification.
|
65
|
+
#
|
66
|
+
# If you want to create a FaceRecognizer with a
|
67
|
+
# confidence threshold (e.g. 123.0) and use _all_
|
68
|
+
# Fisherfaces, then call it with:
|
69
|
+
#
|
70
|
+
# FisherFaces.new(0, 123.0);
|
71
|
+
#
|
72
|
+
model = FisherFaces.new
|
73
|
+
model.train(images, labels)
|
74
|
+
|
75
|
+
# The following line predicts the label of a given test image:
|
76
|
+
predicted_label, predicted_confidence = model.predict(test_sample)
|
77
|
+
|
78
|
+
puts "Predicted class: #{predicted_label} / Actual class: #{test_label}"
|
79
|
+
|
80
|
+
eigenvalues = model.get_mat('eigenvalues')
|
81
|
+
w = model.get_mat('eigenvectors');
|
82
|
+
mean = model.get_mat('mean')
|
83
|
+
|
84
|
+
if output_folder
|
85
|
+
norm_0_255(mean.reshape(1, images[0].rows)).save("#{output_folder}/mean.png")
|
86
|
+
else
|
87
|
+
w1 = GUI::Window.new('Predicted')
|
88
|
+
w2 = GUI::Window.new('Actual')
|
89
|
+
w3 = GUI::Window.new('mean')
|
90
|
+
|
91
|
+
w1.show images[predicted_label]
|
92
|
+
w2.show images[test_label]
|
93
|
+
w3.show norm_0_255(mean.reshape(1, images[0].rows))
|
94
|
+
end
|
95
|
+
|
96
|
+
# Display or save the first, at most 16 Fisherfaces
|
97
|
+
[w.cols, 16].min.times { |i|
|
98
|
+
puts "Eigenvalue ##{i} = #{eigenvalues[i][0]}"
|
99
|
+
ev = w.get_cols(i).clone()
|
100
|
+
grayscale = norm_0_255(ev.reshape(1, height))
|
101
|
+
|
102
|
+
# Show the image & apply a Bone colormap for better sensing.
|
103
|
+
cgrayscale = grayscale.apply_color_map(COLORMAP_BONE)
|
104
|
+
if output_folder
|
105
|
+
norm_0_255(cgrayscale).save("#{output_folder}/fisherface_#{i}.png")
|
106
|
+
else
|
107
|
+
w4 = GUI::Window.new("fisherface_#{i}")
|
108
|
+
w4.show norm_0_255(cgrayscale)
|
109
|
+
end
|
110
|
+
}
|
111
|
+
|
112
|
+
[w.cols, 16].min.times { |num_component|
|
113
|
+
# Slice the Fisherface from the model
|
114
|
+
ev = w.get_cols(num_component)
|
115
|
+
projection = images[0].reshape(1, 1).subspace_project(ev, mean)
|
116
|
+
reconstruction = projection.subspace_reconstruct(ev, mean)
|
117
|
+
|
118
|
+
# Normalize the result:
|
119
|
+
reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows))
|
120
|
+
|
121
|
+
# Display or save:
|
122
|
+
if output_folder
|
123
|
+
norm_0_255(reconstruction).save("#{output_folder}/fisherface_reconstruction_#{num_component}.png")
|
124
|
+
else
|
125
|
+
w5 = GUI::Window.new("fisherface_reconstruction_#{num_component}")
|
126
|
+
w5.show norm_0_255(reconstruction)
|
127
|
+
end
|
128
|
+
}
|
129
|
+
|
130
|
+
GUI::wait_key unless output_folder
|
131
|
+
|