feldtruby 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.autotest +23 -0
- data/.gemtest +0 -0
- data/History.txt +4 -0
- data/Manifest.txt +44 -0
- data/README.md +63 -0
- data/README.txt +59 -0
- data/Rakefile +19 -0
- data/TODO +6 -0
- data/lib/feldtruby/array/basic_stats.rb +88 -0
- data/lib/feldtruby/array/count_by.rb +7 -0
- data/lib/feldtruby/array.rb +34 -0
- data/lib/feldtruby/file/file_change_watcher.rb +88 -0
- data/lib/feldtruby/file/tempfile.rb +25 -0
- data/lib/feldtruby/float.rb +17 -0
- data/lib/feldtruby/math/rand.rb +5 -0
- data/lib/feldtruby/net/html_doc_getter.rb +31 -0
- data/lib/feldtruby/optimize/differential_evolution.rb +186 -0
- data/lib/feldtruby/optimize/max_steps_termination_criterion.rb +24 -0
- data/lib/feldtruby/optimize/objective.rb +302 -0
- data/lib/feldtruby/optimize/optimizer.rb +145 -0
- data/lib/feldtruby/optimize/random_search.rb +9 -0
- data/lib/feldtruby/optimize/search_space.rb +69 -0
- data/lib/feldtruby/optimize/stdout_logger.rb +138 -0
- data/lib/feldtruby/optimize.rb +28 -0
- data/lib/feldtruby/string/to_iso.rb +7 -0
- data/lib/feldtruby/time.rb +22 -0
- data/lib/feldtruby/vector.rb +14 -0
- data/lib/feldtruby/visualization/circos.rb +25 -0
- data/lib/feldtruby/word_counter.rb +100 -0
- data/lib/feldtruby.rb +6 -0
- data/test/helper.rb +7 -0
- data/test/test_array.rb +71 -0
- data/test/test_array_basic_stats.rb +130 -0
- data/test/test_array_count_by.rb +13 -0
- data/test/test_float.rb +20 -0
- data/test/test_html_doc_getter.rb +16 -0
- data/test/test_optimize.rb +55 -0
- data/test/test_optimize_differential_evolution.rb +42 -0
- data/test/test_optimize_objective.rb +157 -0
- data/test/test_optimize_populationbasedoptimizer.rb +24 -0
- data/test/test_optimize_random_search.rb +46 -0
- data/test/test_optimize_search_space.rb +97 -0
- data/test/test_time.rb +27 -0
- data/test/test_vector.rb +98 -0
- data/test/test_word_counter.rb +57 -0
- metadata +149 -0
@@ -0,0 +1,186 @@
|
|
1
|
+
require 'feldtruby/optimize/optimizer'
|
2
|
+
require 'feldtruby/math/rand'
|
3
|
+
require 'feldtruby/vector'
|
4
|
+
|
5
|
+
module FeldtRuby::Optimize
|
6
|
+
|
7
|
+
# Common to many Evolutionary Computation optimizers
|
8
|
+
class EvolutionaryOptimizer < PopulationBasedOptimizer; end
|
9
|
+
|
10
|
+
# Base class for Differential Evolution (DE) for continuous, real-valued optimization.
|
11
|
+
# Since there are many different DE variants this is the base class
|
12
|
+
# from which we can then include different strategy parts and create complete DE classes.
|
13
|
+
#
|
14
|
+
# A DE strategy generates a new trial vector as a candidate to replace a parent vector.
|
15
|
+
# It is composed of four parts:
|
16
|
+
# - a mutation strategy that samples a set of parents to create a donor vector
|
17
|
+
# - a crossover strategy which takes a donor and parent vector and creates a trial vector
|
18
|
+
# - a bounding strategy which ensures the trial vector is within the search space
|
19
|
+
# - an update strategy which can be used to self-adapt parameters based on feedback on improvements
|
20
|
+
#
|
21
|
+
# A strategy gets feedback on whether the latest trial vector was an improvement. It
|
22
|
+
# can use this feedback to adapt its operation over time.
|
23
|
+
#
|
24
|
+
# We implement strategies as Ruby Module's that we can include in different DE optimizer classes
|
25
|
+
# that inherits form the base one above. For maximum flexibility, each of the four parts of
|
26
|
+
# a DE strategy are implemented in separate Module's so we can mix and match them.
|
27
|
+
class DEOptimizerBase < EvolutionaryOptimizer
|
28
|
+
DefaultOptions = {
|
29
|
+
:DE_F_ScaleFactor => 0.7,
|
30
|
+
:DE_CR_CrossoverRate => 0.5,
|
31
|
+
:DE_NumParentsToSample => 4,
|
32
|
+
}
|
33
|
+
|
34
|
+
def initialize_options(options)
|
35
|
+
super
|
36
|
+
@options = DefaultOptions.clone.update(options)
|
37
|
+
@f = @scale_factor = @options[:DE_F_ScaleFactor]
|
38
|
+
@cr = @crossover_rate = @options[:DE_CR_CrossoverRate]
|
39
|
+
@num_parents_to_sample = @options[:DE_NumParentsToSample]
|
40
|
+
end
|
41
|
+
|
42
|
+
# Create a population of a given size by randomly sampling candidates from the search space
|
43
|
+
# and converting them to Vector's so we can more easily calculate on them later.
|
44
|
+
def initialize_population(sizeOfPopulation)
|
45
|
+
@population = Array.new(sizeOfPopulation).map {Vector.elements(search_space.gen_candidate())}
|
46
|
+
end
|
47
|
+
|
48
|
+
# Create a candidate from an array. By default we represent candidates with Ruby
|
49
|
+
# vectors since they allow vector-based artihmetic.
|
50
|
+
def candidate_from_array(ary)
|
51
|
+
Vector.elements(ary)
|
52
|
+
end
|
53
|
+
|
54
|
+
# One step of the optimization is to (try to) update one vector. Thus, this is more of
|
55
|
+
# a steady-state than a generational EC. DE is typically a generational EC but it is hard
|
56
|
+
# to see any reason why. The default DE here is the classic DE/rand/1/*
|
57
|
+
def optimization_step()
|
58
|
+
trial, target, target_index = generate_trial_candidate_and_target()
|
59
|
+
|
60
|
+
# We get [candidate, qualityValue, subQualityValues] for each vector
|
61
|
+
best, worst = objective.rank_candidates([target, trial])
|
62
|
+
|
63
|
+
# Supplant the target vector with the trial vector if better
|
64
|
+
if best.first != target
|
65
|
+
@logger.note_new_better("Trial vector was better", *best)
|
66
|
+
trial_better = true
|
67
|
+
update_candidate_in_population(target_index, trial)
|
68
|
+
else
|
69
|
+
trial_better = false
|
70
|
+
end
|
71
|
+
|
72
|
+
# Give feedback to strategy since some strategies use this to self-adapt
|
73
|
+
feedback_on_trial_vs_target(trial, target, trial_better)
|
74
|
+
|
75
|
+
[best.first]
|
76
|
+
end
|
77
|
+
|
78
|
+
#####################################
|
79
|
+
# Strategy-related methods. Can be overridden by strategies later. Below are the defaults.
|
80
|
+
#####################################
|
81
|
+
|
82
|
+
# Number of parents to sample. Default is that this is constant but can be overriden by
|
83
|
+
# a mutation strategy.
|
84
|
+
def num_parents_to_sample; options[:DE_NumParentsToSample]; end
|
85
|
+
|
86
|
+
# Scale factor F.
|
87
|
+
# Default is to use the one set in the optimizer, regardless of target vector.
|
88
|
+
def scale_factor(targetVectorIndex); @f; end
|
89
|
+
|
90
|
+
# Crossover rate. Default is to use the one set in the optimizer, regardless of position
|
91
|
+
# of the crossover position.
|
92
|
+
def crossover_rate(position); @cr; end
|
93
|
+
|
94
|
+
# Sample parents from the population and return their indices.
|
95
|
+
def sample_parents()
|
96
|
+
sample_population_indices_without_replacement(num_parents_to_sample)
|
97
|
+
end
|
98
|
+
|
99
|
+
# Main entry point for a DEStrategy. Generates a new trial vector and the parent
|
100
|
+
# it targets.
|
101
|
+
def generate_trial_candidate_and_target()
|
102
|
+
# Sample parents. The first parent returned is used as target parent to cross-over with.
|
103
|
+
# Rest of the sampled parents is/can be used in mutation.
|
104
|
+
target_parent_index, *parent_indices = sample_parents()
|
105
|
+
target = get_candidate(target_parent_index)
|
106
|
+
|
107
|
+
# The three main steps. We get feedback from optimizer at a later stage.
|
108
|
+
donor = mutate(target_parent_index, parent_indices) # Should be implemented by a MutationStrategy
|
109
|
+
trial = crossover_donor_and_target(target, donor,
|
110
|
+
target_parent_index) # Should be implemented by a CrossoverStrategy
|
111
|
+
trial = bound_trial_candidate(trial) # Should be implemented by a BoundingStrategy
|
112
|
+
|
113
|
+
return trial, target, target_parent_index
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
module DE_BoundingStrategy_RandomWithinSearchSpace
|
118
|
+
# Default bounding strategy is to bound by the search space.
|
119
|
+
def bound_trial_candidate(candidate)
|
120
|
+
search_space.bound(candidate)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
module DE_UpdateStrategy_NoFeedbackUpdates
|
125
|
+
# We can use feedback from optimizer to improve. Default is to not change anything.
|
126
|
+
def feedback_on_trial_vs_target(trial, target, trialBetter); end
|
127
|
+
end
|
128
|
+
|
129
|
+
# This is the classic binomial DE/*/*/bin crossover.
|
130
|
+
module DE_CrossoverStrategy_Binomial
|
131
|
+
def crossover_donor_and_target(targetVector, donorVector, targetVectorIndex)
|
132
|
+
num_variables = donorVector.size
|
133
|
+
jrand = rand_int(num_variables)
|
134
|
+
trial_vector = targetVector.clone.to_a # We use the targetVector values as a starting point
|
135
|
+
trial_vector[jrand] = donorVector[jrand] # Always copy one random var to ensure some difference.
|
136
|
+
num_variables.times do |j|
|
137
|
+
trial_vector[j] = donorVector[j] if rand() <= crossover_rate(j) # Copy with crossover_rate probability
|
138
|
+
end
|
139
|
+
candidate_from_array(trial_vector)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# Building block for mutation strategies.
|
144
|
+
module DE_X_1_StrategyBuildingBlock
|
145
|
+
# We need 0 target parents and 2 other parents. Note that we must sample a target parent
|
146
|
+
# also even though it is not used in the mutation.
|
147
|
+
def num_parents_to_sample; 3; end
|
148
|
+
|
149
|
+
def difference_vector(donorParentsIndices)
|
150
|
+
p1, p2 = get_candidates_with_indices(donorParentsIndices)
|
151
|
+
(p1 - p2)
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
module DE_Rand_X_StrategyBuildingBlock
|
156
|
+
def mutate(targetIndex, donorParentsIndices)
|
157
|
+
p3 = get_candidate(donorParentsIndices[-1])
|
158
|
+
p3 + scale_factor(targetIndex) * difference_vector(donorParentsIndices[0...-1])
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
# The most-used DE/rand/1 mutation strategy.
|
163
|
+
module DE_MutationStrategy_Rand_1
|
164
|
+
include DE_X_1_StrategyBuildingBlock
|
165
|
+
|
166
|
+
# We need one more parent in the Rand strategy than in the others, but
|
167
|
+
# we can reuse the difference vector generation. So partial reuse here
|
168
|
+
# NOTE! Order of inclusion is critical!!!
|
169
|
+
def num_parents_to_sample; 4; end
|
170
|
+
|
171
|
+
include DE_Rand_X_StrategyBuildingBlock
|
172
|
+
end
|
173
|
+
|
174
|
+
# The default DEOptimizer uses
|
175
|
+
# Bounding = random bouding within the search space
|
176
|
+
# Update = no updates based on feedback
|
177
|
+
# Crossover = Classic binomial
|
178
|
+
# Mutation = Rand-1
|
179
|
+
class DEOptimizer < DEOptimizerBase
|
180
|
+
include DE_BoundingStrategy_RandomWithinSearchSpace
|
181
|
+
include DE_UpdateStrategy_NoFeedbackUpdates
|
182
|
+
include DE_CrossoverStrategy_Binomial
|
183
|
+
include DE_MutationStrategy_Rand_1
|
184
|
+
end
|
185
|
+
|
186
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
require 'feldtruby/optimize'
|
2
|
+
|
3
|
+
class FeldtRuby::Optimize::TerminationCriterion
|
4
|
+
# Default termination criterion is to never terminate
|
5
|
+
def terminate?(optimizer)
|
6
|
+
false
|
7
|
+
end
|
8
|
+
|
9
|
+
# Inverse of terminate?, i.e. should we continue optimizing?
|
10
|
+
def continue_optimization?
|
11
|
+
!terminate?
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
class FeldtRuby::Optimize::MaxStepsTerminationCriterion < FeldtRuby::Optimize::TerminationCriterion
|
16
|
+
attr_accessor :max_steps
|
17
|
+
|
18
|
+
def initialize(maxSteps = 1000)
|
19
|
+
@max_steps = maxSteps
|
20
|
+
end
|
21
|
+
def terminate?(optimizer)
|
22
|
+
optimizer.num_optimization_steps >= @max_steps
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,302 @@
|
|
1
|
+
|
2
|
+
require 'feldtruby/optimize'
|
3
|
+
require 'feldtruby/float'
|
4
|
+
|
5
|
+
# An Objective captures one or more objectives into a single object
|
6
|
+
# and supports a large number of ways to utilize basic objective
|
7
|
+
# functions in a single framework. You subclass and add instance
|
8
|
+
# methods named as
|
9
|
+
# objective_min_qualityAspectName (for an objective/aspect to be minimized), or
|
10
|
+
# objective_max_qualityAspectName (for an objective/aspect to be minimized).
|
11
|
+
# There can be multiple aspects (sub-objectives) for a single objective.
|
12
|
+
# This base class uses mean-weighted-global-ratios (MWGR) as the default mechanism
|
13
|
+
# for handling multi-objectives i.e. with more than one sub-objective.
|
14
|
+
# An objective has version numbers to indicate the number of times the scale
|
15
|
+
# for the calculation of the ratios has been changed.
|
16
|
+
class FeldtRuby::Optimize::Objective
|
17
|
+
attr_accessor :current_version, :logger
|
18
|
+
|
19
|
+
def initialize
|
20
|
+
@logger = nil # To avoid getting warnings that logger has not been initialized
|
21
|
+
@current_version = 0
|
22
|
+
@pareto_front = Array.new(num_aspects)
|
23
|
+
end
|
24
|
+
|
25
|
+
def reset_quality_scale(candidate, aspectIndex, typeOfReset)
|
26
|
+
if (typeOfReset == :min && is_min_aspect?(aspectIndex)) ||
|
27
|
+
(typeOfReset == :max && !is_min_aspect?(aspectIndex))
|
28
|
+
@pareto_front[aspectIndex] = candidate
|
29
|
+
end
|
30
|
+
|
31
|
+
# Reset the best object since we have a new scale
|
32
|
+
@best_candidate = nil
|
33
|
+
@best_qv = nil
|
34
|
+
|
35
|
+
inc_version_number
|
36
|
+
end
|
37
|
+
|
38
|
+
def update_best_candidate(candidate)
|
39
|
+
@best_candidate = candidate
|
40
|
+
@best_qv = candidate._quality_value
|
41
|
+
end
|
42
|
+
|
43
|
+
def inc_version_number
|
44
|
+
@current_version += 1
|
45
|
+
end
|
46
|
+
|
47
|
+
# Return the number of aspects/sub-objectives of this objective.
|
48
|
+
def num_aspects
|
49
|
+
@num_aspects ||= aspect_methods.length
|
50
|
+
end
|
51
|
+
|
52
|
+
# Class for representing multi-objective qualitites...
|
53
|
+
class QualityValue
|
54
|
+
attr_reader :qv, :sub_qvs
|
55
|
+
|
56
|
+
def initialize(qv, subQvs, objective)
|
57
|
+
@qv, @sub_qvs, @objective = qv, subQvs, objective
|
58
|
+
@version = objective.current_version
|
59
|
+
end
|
60
|
+
|
61
|
+
def <=>(other)
|
62
|
+
@qv <=> other.qv
|
63
|
+
end
|
64
|
+
|
65
|
+
# Two quality values are the same if they have the same qv, regardless of their
|
66
|
+
# sub qualities.
|
67
|
+
def ==(other)
|
68
|
+
other = other.qv if QualityValue === other
|
69
|
+
@qv == other
|
70
|
+
end
|
71
|
+
|
72
|
+
def improvement_in_relation_to(other)
|
73
|
+
if QualityValue === other
|
74
|
+
pdiff = @qv.ratio_diff_vs(other.qv)
|
75
|
+
subpdiffs = @sub_qvs.zip(other.sub_qvs).map {|s, os| s.ratio_diff_vs(os)}
|
76
|
+
qinspect(pdiff, subpdiffs, "Difference", "SubQ. differences", true) + ", #{report_on_num_differences(subpdiffs)}"
|
77
|
+
else
|
78
|
+
@qv.improvement_in_relation_to(other)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def report_on_num_differences(subQvRatioDiffs)
|
83
|
+
num_inc = subQvRatioDiffs.select {|v| v > 0}.length
|
84
|
+
num_dec = subQvRatioDiffs.select {|v| v < 0}.length
|
85
|
+
num_same = subQvRatioDiffs.length - num_inc - num_dec
|
86
|
+
"#{num_inc} increased, #{num_dec} decreased, #{num_same} same"
|
87
|
+
end
|
88
|
+
|
89
|
+
def qinspect(qv, subQvs, qvDesc = "Quality", subQvDesc = "SubQualities", subQvsAreRatios = false, qvIsRatio = true)
|
90
|
+
subQvs = subQvs.map {|v| v*100.0} if subQvsAreRatios
|
91
|
+
sqs = subQvs.map do |sqv|
|
92
|
+
s = (Float === sqv ? sqv.round_to_decimals(4) : sqv).inspect
|
93
|
+
s += "%" if subQvsAreRatios
|
94
|
+
s
|
95
|
+
end.join(", ")
|
96
|
+
if qvIsRatio
|
97
|
+
qstr = ("%.4f" % (100.0 * qv)) + "%"
|
98
|
+
else
|
99
|
+
qstr = "%.4f" % qv
|
100
|
+
end
|
101
|
+
"#{qvDesc}: #{qstr}, #{subQvDesc}: [#{sqs}]"
|
102
|
+
end
|
103
|
+
|
104
|
+
def inspect
|
105
|
+
qinspect(@qv, @sub_qvs) + ", Obj. version: #{@version}"
|
106
|
+
end
|
107
|
+
|
108
|
+
# Refer all other methods to the main quality value
|
109
|
+
def method_missing(meth, *args, &block)
|
110
|
+
@qv.send(meth, *args, &block)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
# Return a single quality value for the whole objective for a given candidate.
|
115
|
+
# By default this uses a variant of Bentley and Wakefield's sum-of-weighted-global-ratios (SWGR)
|
116
|
+
# called mean-of-weighted-global-ratios (MWGR) which always returns a fitness value
|
117
|
+
# in the range (0.0, 1.0) with 1.0 signaling the best fitness seen so far. The scale is adaptive
|
118
|
+
# though so that the best candidate so far always has a fitness value of 1.0.
|
119
|
+
def quality_value(candidate, weights = nil)
|
120
|
+
return candidate._quality_value_without_check if quality_value_is_up_to_date?(candidate)
|
121
|
+
num_aspects == 1 ? qv_single(candidate) : qv_mwgr(candidate, weights)
|
122
|
+
end
|
123
|
+
|
124
|
+
def quality_value_is_up_to_date?(candidate)
|
125
|
+
candidate._objective == self && candidate._objective_version == current_version
|
126
|
+
end
|
127
|
+
|
128
|
+
def update_quality_value_in_object(object, qv)
|
129
|
+
object._objective = self
|
130
|
+
object._objective_version = current_version
|
131
|
+
object._quality_value_without_check = qv
|
132
|
+
end
|
133
|
+
|
134
|
+
def ensure_updated_quality_value(candidate)
|
135
|
+
return if quality_value_is_up_to_date?(candidate)
|
136
|
+
quality_value(candidate)
|
137
|
+
end
|
138
|
+
|
139
|
+
def rank_candidates(candidates, weights = nil)
|
140
|
+
mwgr_rank_candidates(candidates, weights)
|
141
|
+
end
|
142
|
+
|
143
|
+
# Rand candidates from best to worst. NOTE! We do the steps of MWGR separately since we must
|
144
|
+
# update the global mins and maxs before calculating the SWG ratios.
|
145
|
+
def mwgr_rank_candidates(candidates, weights = nil)
|
146
|
+
sub_qvss = candidates.map {|c| sub_objective_values(c)}
|
147
|
+
sub_qvss.zip(candidates).each {|sub_qvs, c| update_global_mins_and_maxs(sub_qvs, c)}
|
148
|
+
sub_qvss.each_with_index.map do |sub_qvs, i|
|
149
|
+
qv = mwgr_ratios(sub_qvs).weighted_mean(weights)
|
150
|
+
qv = QualityValue.new(qv, sub_qvs, self)
|
151
|
+
update_quality_value_in_object(candidates[i], qv)
|
152
|
+
[candidates[i], qv, sub_qvs]
|
153
|
+
end.sort_by {|a| -a[1]} # sort by the ratio values in descending order
|
154
|
+
end
|
155
|
+
|
156
|
+
def note_end_of_optimization(optimizer)
|
157
|
+
log("Objective reporting the Pareto front", info_pareto_front())
|
158
|
+
end
|
159
|
+
|
160
|
+
def info_pareto_front
|
161
|
+
@pareto_front.each_with_index.map do |c, i|
|
162
|
+
"Pareto front candidate for objective #{aspect_methods[i]}: #{map_candidate_vector_to_candidate_to_be_evaluated(c).inspect}"
|
163
|
+
end.join("\n")
|
164
|
+
end
|
165
|
+
|
166
|
+
# Return the quality value assuming this is a single objective.
|
167
|
+
def qv_single(candidate)
|
168
|
+
qv = self.send(aspect_methods.first,
|
169
|
+
map_candidate_vector_to_candidate_to_be_evaluated(candidate))
|
170
|
+
update_quality_value_in_object(candidate, qv)
|
171
|
+
qv
|
172
|
+
end
|
173
|
+
|
174
|
+
# Mean-of-weigthed-global-ratios (MWGR) quality value
|
175
|
+
def qv_mwgr(candidate, weights = nil)
|
176
|
+
mwgr_rank_candidates([candidate], weights).first[1]
|
177
|
+
end
|
178
|
+
|
179
|
+
# Calculate the SWGR ratios
|
180
|
+
def mwgr_ratios(subObjectiveValues)
|
181
|
+
subObjectiveValues.each_with_index.map {|v,i| ratio_for_aspect(i, v)}
|
182
|
+
end
|
183
|
+
|
184
|
+
def ratio_for_aspect(aspectIndex, value)
|
185
|
+
min, max = global_min_values_per_aspect[aspectIndex], global_max_values_per_aspect[aspectIndex]
|
186
|
+
if is_min_aspect?(aspectIndex)
|
187
|
+
numerator = max - value
|
188
|
+
else
|
189
|
+
numerator = value - min
|
190
|
+
end
|
191
|
+
numerator.to_f.protected_division_with(max - min)
|
192
|
+
end
|
193
|
+
|
194
|
+
# The vectors can be mapped to a more complex candidate object before we call
|
195
|
+
# the sub objectives to calc their quality values. Default is no mapping but subclasses
|
196
|
+
# can override this.
|
197
|
+
def map_candidate_vector_to_candidate_to_be_evaluated(vector)
|
198
|
+
vector
|
199
|
+
end
|
200
|
+
|
201
|
+
def sub_objective_values(candidateVector)
|
202
|
+
candidate = map_candidate_vector_to_candidate_to_be_evaluated(candidateVector)
|
203
|
+
aspect_methods.map {|omethod| self.send(omethod, candidate)}
|
204
|
+
end
|
205
|
+
|
206
|
+
def update_global_mins_and_maxs(aspectValues, candidate = nil)
|
207
|
+
aspectValues.each_with_index {|v, i| update_global_min_and_max(i, v, candidate)}
|
208
|
+
end
|
209
|
+
|
210
|
+
def update_global_min_and_max(aspectIndex, value, candidate)
|
211
|
+
min = global_min_values_per_aspect[aspectIndex]
|
212
|
+
if value < min
|
213
|
+
reset_quality_scale(candidate, aspectIndex, :min)
|
214
|
+
global_min_values_per_aspect[aspectIndex] = value
|
215
|
+
log_new_min_max(aspectIndex, value, min, "min")
|
216
|
+
end
|
217
|
+
max = global_max_values_per_aspect[aspectIndex]
|
218
|
+
if value > max
|
219
|
+
reset_quality_scale(candidate, aspectIndex, :max)
|
220
|
+
global_max_values_per_aspect[aspectIndex] = value
|
221
|
+
log_new_min_max(aspectIndex, value, max, "max")
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
def log_new_min_max(index, newValue, oldValue, description)
|
226
|
+
log("New global #{description} for sub-objective #{aspect_methods[index]}",
|
227
|
+
("a %.3f" % (100.0 * (newValue - oldValue).protected_division_with(oldValue))) + "% difference",
|
228
|
+
"new = #{newValue}, old = #{oldValue}",
|
229
|
+
"scale is now [#{global_min_values_per_aspect[index]}, #{global_max_values_per_aspect[index]}]",
|
230
|
+
"objective version = #{current_version}")
|
231
|
+
end
|
232
|
+
|
233
|
+
def log(msg, *values)
|
234
|
+
@logger.anote(msg, *values) if @logger
|
235
|
+
end
|
236
|
+
|
237
|
+
# Global min values for each aspect. Needed for SWGR. Updated every time we see a new
|
238
|
+
# quality value for an aspect.
|
239
|
+
# All are minus infinity when we have not seen any values yet.
|
240
|
+
def global_min_values_per_aspect
|
241
|
+
@global_min_values_per_aspect ||= Array.new(num_aspects).map {Float::INFINITY}
|
242
|
+
end
|
243
|
+
|
244
|
+
# Global max values for each aspect. Needed for SWGR. Updated every time we see a new
|
245
|
+
# quality value for an aspect.
|
246
|
+
# All are minus infinity when we have not seen any values yet.
|
247
|
+
def global_max_values_per_aspect
|
248
|
+
@global_max_values_per_aspect ||= Array.new(num_aspects).map {-Float::INFINITY}
|
249
|
+
end
|
250
|
+
|
251
|
+
private
|
252
|
+
|
253
|
+
def aspect_methods
|
254
|
+
@aspect_methods ||= self.methods.select {|m| is_aspect_method?(m)}
|
255
|
+
end
|
256
|
+
|
257
|
+
def is_min_aspect?(aspectIndex)
|
258
|
+
(@is_min_aspect ||= (aspect_methods.map {|m| is_min_aspect_method?(m)}))[aspectIndex]
|
259
|
+
end
|
260
|
+
|
261
|
+
def is_aspect_method?(methodNameAsSymbolOrString)
|
262
|
+
methodNameAsSymbolOrString.to_s =~ /^objective_(min|max)_([\w_]+)$/
|
263
|
+
end
|
264
|
+
|
265
|
+
def is_min_aspect_method?(methodNameAsSymbolOrString)
|
266
|
+
methodNameAsSymbolOrString.to_s =~ /^objective_min_([\w_]+)$/
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
# We add strangely named accessor methods so we can attach the quality values to objects.
|
271
|
+
# We use strange names to minimize risk of method name conflicts.
|
272
|
+
class Object
|
273
|
+
attr_accessor :_quality_value_without_check, :_objective, :_objective_version
|
274
|
+
def _quality_value
|
275
|
+
@_objective.ensure_updated_quality_value(self) if defined?(@_objective) && @_objective
|
276
|
+
@_quality_value_without_check ||= nil # To avoid warning if unset
|
277
|
+
end
|
278
|
+
end
|
279
|
+
|
280
|
+
# Short hand for when the objective function is given as a block that should be minimized.
|
281
|
+
class FeldtRuby::Optimize::ObjectiveMinimizeBlock < FeldtRuby::Optimize::Objective
|
282
|
+
def initialize(&objFunc)
|
283
|
+
super()
|
284
|
+
@objective_function = objFunc
|
285
|
+
end
|
286
|
+
|
287
|
+
def objective_min_cost_function(candidate)
|
288
|
+
@objective_function.call(*candidate.to_a)
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
# Short hand for when the objective function is given as a block that should be minimized.
|
293
|
+
class FeldtRuby::Optimize::ObjectiveMaximizeBlock < FeldtRuby::Optimize::Objective
|
294
|
+
def initialize(&objFunc)
|
295
|
+
super()
|
296
|
+
@objective_function = objFunc
|
297
|
+
end
|
298
|
+
|
299
|
+
def objective_max_cost_function(candidate)
|
300
|
+
@objective_function.call(*candidate.to_a)
|
301
|
+
end
|
302
|
+
end
|
@@ -0,0 +1,145 @@
|
|
1
|
+
require 'feldtruby/optimize'
|
2
|
+
require 'feldtruby/optimize/objective'
|
3
|
+
require 'feldtruby/optimize/search_space'
|
4
|
+
require 'feldtruby/optimize/stdout_logger'
|
5
|
+
require 'feldtruby/optimize/max_steps_termination_criterion'
|
6
|
+
require 'feldtruby/math/rand'
|
7
|
+
|
8
|
+
module FeldtRuby::Optimize
|
9
|
+
DefaultOptimizationOptions = {
|
10
|
+
:logger => FeldtRuby::Optimize::StdOutLogger,
|
11
|
+
:maxNumSteps => 10_000,
|
12
|
+
:terminationCriterionClass => FeldtRuby::Optimize::MaxStepsTerminationCriterion,
|
13
|
+
:verbose => false,
|
14
|
+
:populationSize => 100,
|
15
|
+
}
|
16
|
+
|
17
|
+
def self.override_default_options_with(options)
|
18
|
+
o = DefaultOptimizationOptions.clone.update(options)
|
19
|
+
o[:terminationCriterion] = o[:terminationCriterionClass].new(o[:maxNumSteps])
|
20
|
+
o
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# Find an vector of float values that optimizes a given
|
25
|
+
# objective.
|
26
|
+
class FeldtRuby::Optimize::Optimizer
|
27
|
+
attr_reader :objective, :search_space, :best, :best_quality_value, :best_sub_quality_values, :num_optimization_steps, :termination_criterion
|
28
|
+
|
29
|
+
def initialize(objective, searchSpace = FeldtRuby::Optimize::DefaultSearchSpace, options = {})
|
30
|
+
@best = nil # To avoid warnings if not set
|
31
|
+
@objective, @search_space = objective, searchSpace
|
32
|
+
@options = FeldtRuby::Optimize.override_default_options_with(options)
|
33
|
+
initialize_options(@options)
|
34
|
+
@objective.logger = @logger
|
35
|
+
end
|
36
|
+
|
37
|
+
def initialize_options(options)
|
38
|
+
@logger = options[:logger].new(self, options[:verbose])
|
39
|
+
@termination_criterion = options[:terminationCriterion]
|
40
|
+
end
|
41
|
+
|
42
|
+
# Optimize the objective in the given search space.
|
43
|
+
def optimize()
|
44
|
+
@num_optimization_steps = 0
|
45
|
+
# Set up a random best since other methods require it
|
46
|
+
update_best([search_space.gen_candidate()])
|
47
|
+
begin
|
48
|
+
@logger.note_optimization_starts()
|
49
|
+
while !termination_criterion.terminate?(self)
|
50
|
+
new_candidates = optimization_step()
|
51
|
+
@num_optimization_steps += 1
|
52
|
+
@logger.note_another_optimization_step(@num_optimization_steps)
|
53
|
+
update_best(new_candidates)
|
54
|
+
end
|
55
|
+
rescue Exception => e
|
56
|
+
@logger.note_termination("!!! - Optimization FAILED with exception: #{e.message} - !!!" + e.backtrace.join("\n"))
|
57
|
+
ensure
|
58
|
+
@logger.note_termination("!!! - Optimization FINISHED after #{@num_optimization_steps} steps - !!!")
|
59
|
+
end
|
60
|
+
@objective.note_end_of_optimization(self)
|
61
|
+
@logger.note_end_of_optimization(self)
|
62
|
+
@best # return the best
|
63
|
+
end
|
64
|
+
|
65
|
+
# Run one optimization step. Default is to do nothing, i.e. this is just a superclass,
|
66
|
+
# but subclasses need to implement this.
|
67
|
+
def optimization_step()
|
68
|
+
end
|
69
|
+
|
70
|
+
# Rank all candidates, then update the best one if a new best found.
|
71
|
+
def update_best(candidates)
|
72
|
+
if @best
|
73
|
+
ranked = objective.rank_candidates(candidates + [@best])
|
74
|
+
else
|
75
|
+
ranked = objective.rank_candidates(candidates)
|
76
|
+
end
|
77
|
+
new_best, new_quality_value, new_sub_qvalues = ranked.first
|
78
|
+
# Since some objectives are not deterministic the best
|
79
|
+
if new_best != @best
|
80
|
+
if @best
|
81
|
+
old_best, new_qv_old_best, sub_qv_old_best = ranked.select {|a| a.first == @best}.first
|
82
|
+
end
|
83
|
+
@logger.note_new_best(new_best, new_quality_value, new_sub_qvalues,
|
84
|
+
@best, new_qv_old_best, sub_qv_old_best)
|
85
|
+
@best = new_best
|
86
|
+
@best_quality_value = new_quality_value
|
87
|
+
@best_sub_quality_values = new_sub_qvalues
|
88
|
+
true
|
89
|
+
else
|
90
|
+
false
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
class FeldtRuby::Optimize::PopulationBasedOptimizer < FeldtRuby::Optimize::Optimizer
|
96
|
+
attr_reader :population
|
97
|
+
|
98
|
+
def initialize_options(options)
|
99
|
+
super
|
100
|
+
initialize_population(@options[:populationSize])
|
101
|
+
initialize_all_indices()
|
102
|
+
end
|
103
|
+
|
104
|
+
# Create a population of a given size by randomly sampling candidates from the search space.
|
105
|
+
def initialize_population(sizeOfPopulation)
|
106
|
+
@population = Array.new(sizeOfPopulation).map {search_space.gen_candidate()}
|
107
|
+
end
|
108
|
+
|
109
|
+
def population_size
|
110
|
+
@population.length
|
111
|
+
end
|
112
|
+
|
113
|
+
def initialize_all_indices
|
114
|
+
# We set up an array of the indices to all candidates of the population so we can later sample from it
|
115
|
+
# This should always contain all indices even if they might be out of order. This is because we
|
116
|
+
# only swap! elements in this array, never delete any.
|
117
|
+
@all_indices = (0...population_size).to_a
|
118
|
+
end
|
119
|
+
|
120
|
+
# Sample indices from the population without replacement.
|
121
|
+
def sample_population_indices_without_replacement(numSamples)
|
122
|
+
sampled_indices = []
|
123
|
+
numSamples.times do |i|
|
124
|
+
index = i + rand_int(population_size - i)
|
125
|
+
sampled_index, skip = @all_indices.swap!(i, index)
|
126
|
+
sampled_indices << sampled_index
|
127
|
+
end
|
128
|
+
sampled_indices
|
129
|
+
end
|
130
|
+
|
131
|
+
# Get candidates from population at given indices.
|
132
|
+
def get_candidates_with_indices(indices)
|
133
|
+
indices.map {|i| @population[i]}
|
134
|
+
end
|
135
|
+
|
136
|
+
# Get candidate from population at given index.
|
137
|
+
def get_candidate(index)
|
138
|
+
@population[index]
|
139
|
+
end
|
140
|
+
|
141
|
+
# Update population with candidate at given index.
|
142
|
+
def update_candidate_in_population(index, candidate)
|
143
|
+
@population[index] = candidate
|
144
|
+
end
|
145
|
+
end
|
@@ -0,0 +1,9 @@
|
|
1
|
+
require 'feldtruby/optimize/optimizer'
|
2
|
+
|
3
|
+
# Random search that optimizes a given objective function.
|
4
|
+
class FeldtRuby::Optimize::RandomSearcher < FeldtRuby::Optimize::Optimizer
|
5
|
+
def optimization_step()
|
6
|
+
# For random search we just generate a new random candidate in each step.
|
7
|
+
[search_space.gen_candidate()]
|
8
|
+
end
|
9
|
+
end
|