ironfan 3.1.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +51 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +130 -0
- data/Gemfile +26 -0
- data/LICENSE.md +201 -0
- data/README.md +328 -0
- data/Rakefile +104 -0
- data/TODO.md +16 -0
- data/VERSION +1 -0
- data/chefignore +41 -0
- data/cluster_chef-knife.gemspec +123 -0
- data/cluster_chef.gemspec +111 -0
- data/config/client.rb +59 -0
- data/config/proxy.pac +12 -0
- data/config/ubuntu10.04-ironfan.erb +157 -0
- data/config/ubuntu11.10-ironfan.erb +145 -0
- data/ironfan.gemspec +121 -0
- data/lib/chef/knife/bootstrap/ubuntu10.04-ironfan.erb +157 -0
- data/lib/chef/knife/bootstrap/ubuntu11.10-ironfan.erb +145 -0
- data/lib/chef/knife/cluster_bootstrap.rb +74 -0
- data/lib/chef/knife/cluster_kick.rb +94 -0
- data/lib/chef/knife/cluster_kill.rb +73 -0
- data/lib/chef/knife/cluster_launch.rb +164 -0
- data/lib/chef/knife/cluster_list.rb +50 -0
- data/lib/chef/knife/cluster_proxy.rb +126 -0
- data/lib/chef/knife/cluster_show.rb +61 -0
- data/lib/chef/knife/cluster_ssh.rb +141 -0
- data/lib/chef/knife/cluster_start.rb +40 -0
- data/lib/chef/knife/cluster_stop.rb +43 -0
- data/lib/chef/knife/cluster_sync.rb +77 -0
- data/lib/chef/knife/generic_command.rb +66 -0
- data/lib/chef/knife/knife_common.rb +195 -0
- data/lib/ironfan.rb +143 -0
- data/lib/ironfan/chef_layer.rb +299 -0
- data/lib/ironfan/cloud.rb +412 -0
- data/lib/ironfan/cluster.rb +118 -0
- data/lib/ironfan/compute.rb +153 -0
- data/lib/ironfan/deprecated.rb +33 -0
- data/lib/ironfan/discovery.rb +177 -0
- data/lib/ironfan/dsl_object.rb +124 -0
- data/lib/ironfan/facet.rb +144 -0
- data/lib/ironfan/fog_layer.rb +150 -0
- data/lib/ironfan/private_key.rb +130 -0
- data/lib/ironfan/role_implications.rb +58 -0
- data/lib/ironfan/security_group.rb +119 -0
- data/lib/ironfan/server.rb +281 -0
- data/lib/ironfan/server_slice.rb +260 -0
- data/lib/ironfan/volume.rb +157 -0
- data/spec/ironfan/cluster_spec.rb +13 -0
- data/spec/ironfan/facet_spec.rb +69 -0
- data/spec/ironfan/server_slice_spec.rb +19 -0
- data/spec/ironfan/server_spec.rb +112 -0
- data/spec/ironfan_spec.rb +193 -0
- data/spec/spec_helper.rb +50 -0
- data/spec/spec_helper/dummy_chef.rb +25 -0
- data/spec/test_config.rb +20 -0
- data/tasks/chef_config.rake +38 -0
- data/tasks/jeweler_use_alt_branch.rake +53 -0
- metadata +217 -0
@@ -0,0 +1,260 @@
|
|
1
|
+
module Ironfan
|
2
|
+
#
|
3
|
+
# A server group is a set of actual or implied servers.
|
4
|
+
#
|
5
|
+
# The idea is we want to be able to smoothly roll up settings
|
6
|
+
#
|
7
|
+
#
|
8
|
+
class ServerSlice < Ironfan::DslObject
|
9
|
+
attr_reader :name, :servers, :cluster
|
10
|
+
|
11
|
+
def initialize cluster, servers
|
12
|
+
super()
|
13
|
+
@name = "#{cluster.name} slice"
|
14
|
+
@cluster = cluster
|
15
|
+
@servers = servers
|
16
|
+
end
|
17
|
+
|
18
|
+
#
|
19
|
+
# Enumerable
|
20
|
+
#
|
21
|
+
include Enumerable
|
22
|
+
def each(&block)
|
23
|
+
@servers.each(&block)
|
24
|
+
end
|
25
|
+
def length
|
26
|
+
@servers.length
|
27
|
+
end
|
28
|
+
def empty?
|
29
|
+
length == 0
|
30
|
+
end
|
31
|
+
[:select, :find_all, :reject, :detect, :find, :drop_while].each do |method|
|
32
|
+
define_method(method) do |*args, &block|
|
33
|
+
ServerSlice.new cluster, @servers.send(method, *args, &block)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
# Return the collection of servers that are not yet 'created'
|
38
|
+
def uncreated_servers
|
39
|
+
select{|svr| not svr.created? }
|
40
|
+
end
|
41
|
+
|
42
|
+
def bogus_servers
|
43
|
+
select(&:bogus?)
|
44
|
+
end
|
45
|
+
|
46
|
+
#
|
47
|
+
# Info!
|
48
|
+
#
|
49
|
+
|
50
|
+
def chef_nodes
|
51
|
+
servers.map(&:chef_node).compact
|
52
|
+
end
|
53
|
+
|
54
|
+
def fog_servers
|
55
|
+
servers.map(&:fog_server).compact
|
56
|
+
end
|
57
|
+
|
58
|
+
def security_groups
|
59
|
+
sg = {}
|
60
|
+
servers.each{|svr| sg.merge!(svr.cloud.security_groups) }
|
61
|
+
sg
|
62
|
+
end
|
63
|
+
|
64
|
+
def facets
|
65
|
+
servers.map(&:facet)
|
66
|
+
end
|
67
|
+
|
68
|
+
def chef_roles
|
69
|
+
[ cluster.chef_roles, facets.map(&:chef_roles) ].flatten.compact.uniq
|
70
|
+
end
|
71
|
+
|
72
|
+
# hack -- take the ssh_identity_file from the first server.
|
73
|
+
def ssh_identity_file
|
74
|
+
return if servers.empty?
|
75
|
+
servers.first.cloud.ssh_identity_file
|
76
|
+
end
|
77
|
+
|
78
|
+
#
|
79
|
+
# Actions!
|
80
|
+
#
|
81
|
+
|
82
|
+
def start
|
83
|
+
delegate_to_fog_servers( :start )
|
84
|
+
delegate_to_fog_servers( :reload )
|
85
|
+
end
|
86
|
+
|
87
|
+
def stop
|
88
|
+
delegate_to_fog_servers( :stop )
|
89
|
+
delegate_to_fog_servers( :reload )
|
90
|
+
end
|
91
|
+
|
92
|
+
def destroy
|
93
|
+
delegate_to_fog_servers( :destroy )
|
94
|
+
delegate_to_fog_servers( :reload )
|
95
|
+
end
|
96
|
+
|
97
|
+
def reload
|
98
|
+
delegate_to_fog_servers( :reload )
|
99
|
+
end
|
100
|
+
|
101
|
+
def create_servers
|
102
|
+
delegate_to_servers( :create_server )
|
103
|
+
end
|
104
|
+
|
105
|
+
def delete_chef
|
106
|
+
delegate_to_servers( :delete_chef, true )
|
107
|
+
end
|
108
|
+
|
109
|
+
def sync_to_cloud
|
110
|
+
sync_keypairs
|
111
|
+
sync_security_groups
|
112
|
+
delegate_to_servers( :sync_to_cloud )
|
113
|
+
end
|
114
|
+
|
115
|
+
def sync_to_chef
|
116
|
+
sync_roles
|
117
|
+
delegate_to_servers( :sync_to_chef )
|
118
|
+
end
|
119
|
+
|
120
|
+
#
|
121
|
+
# Display!
|
122
|
+
#
|
123
|
+
|
124
|
+
# FIXME: this is a jumble. we need to pass it in some other way.
|
125
|
+
|
126
|
+
MINIMAL_HEADINGS = ["Name", "Chef?", "State", "InstanceID", "Public IP", "Private IP", "Created At"].to_set.freeze
|
127
|
+
DEFAULT_HEADINGS = (MINIMAL_HEADINGS + ['Flavor', 'AZ', 'Env']).freeze
|
128
|
+
EXPANDED_HEADINGS = DEFAULT_HEADINGS + ['Image', 'Volumes', 'Elastic IP', 'SSH Key']
|
129
|
+
|
130
|
+
MACHINE_STATE_COLORS = {
|
131
|
+
'running' => :green,
|
132
|
+
'pending' => :yellow,
|
133
|
+
'stopping' => :magenta,
|
134
|
+
'shutting-down' => :magenta,
|
135
|
+
'stopped' => :cyan,
|
136
|
+
'terminated' => :blue,
|
137
|
+
'not running' => :blue,
|
138
|
+
}
|
139
|
+
|
140
|
+
#
|
141
|
+
# This is a generic display routine for cluster-like sets of nodes. If you
|
142
|
+
# call it with no args, you get the basic table that knife cluster show
|
143
|
+
# draws. If you give it an array of strings, you can override the order and
|
144
|
+
# headings displayed. If you also give it a block you can add your own logic
|
145
|
+
# for generating content. The block is given a Ironfan::Server instance
|
146
|
+
# for each item in the collection and should return a hash of Name,Value
|
147
|
+
# pairs to merge into the minimal fields.
|
148
|
+
#
|
149
|
+
def display hh = :default
|
150
|
+
headings =
|
151
|
+
case hh
|
152
|
+
when :minimal then MINIMAL_HEADINGS
|
153
|
+
when :default then DEFAULT_HEADINGS
|
154
|
+
when :expanded then EXPANDED_HEADINGS
|
155
|
+
else hh.to_set end
|
156
|
+
headings += ["Bogus"] if servers.any?(&:bogus?)
|
157
|
+
# probably not necessary any more
|
158
|
+
# servers = servers.sort{ |a,b| (a.facet_name <=> b.facet_name) *9 + (a.facet_index.to_i <=> b.facet_index.to_i)*3 + (a.facet_index <=> b.facet_index) }
|
159
|
+
defined_data = servers.map do |svr|
|
160
|
+
hsh = {
|
161
|
+
"Name" => svr.fullname,
|
162
|
+
"Facet" => svr.facet_name,
|
163
|
+
"Index" => svr.facet_index,
|
164
|
+
"Chef?" => (svr.chef_node? ? "yes" : "[red]no[reset]"),
|
165
|
+
"Bogus" => (svr.bogus? ? "[red]#{svr.bogosity}[reset]" : ''),
|
166
|
+
"Env" => svr.environment,
|
167
|
+
}
|
168
|
+
# if (cs = svr.chef_server)
|
169
|
+
# hsh.merge!(
|
170
|
+
# "Env" => cs.environment,
|
171
|
+
# )
|
172
|
+
# end
|
173
|
+
if (fs = svr.fog_server)
|
174
|
+
hsh.merge!(
|
175
|
+
"InstanceID" => (fs.id && fs.id.length > 0) ? fs.id : "???",
|
176
|
+
"Flavor" => fs.flavor_id,
|
177
|
+
"Image" => fs.image_id,
|
178
|
+
"AZ" => fs.availability_zone,
|
179
|
+
"SSH Key" => fs.key_name,
|
180
|
+
"State" => "[#{MACHINE_STATE_COLORS[fs.state] || 'white'}]#{fs.state}[reset]",
|
181
|
+
"Public IP" => fs.public_ip_address,
|
182
|
+
"Private IP" => fs.private_ip_address,
|
183
|
+
"Created At" => fs.created_at.strftime("%Y%m%d-%H%M%S")
|
184
|
+
)
|
185
|
+
else
|
186
|
+
hsh["State"] = "not running"
|
187
|
+
end
|
188
|
+
hsh['Volumes'] = []
|
189
|
+
svr.composite_volumes.each do |name, vol|
|
190
|
+
if vol.ephemeral_device? then next
|
191
|
+
elsif vol.volume_id then hsh['Volumes'] << vol.volume_id
|
192
|
+
elsif vol.create_at_launch? then hsh['Volumes'] << vol.snapshot_id
|
193
|
+
end
|
194
|
+
end
|
195
|
+
hsh['Volumes'] = hsh['Volumes'].join(',')
|
196
|
+
hsh['Elastic IP'] = svr.cloud.public_ip if svr.cloud.public_ip
|
197
|
+
if block_given?
|
198
|
+
extra_info = yield(svr)
|
199
|
+
hsh.merge!(extra_info)
|
200
|
+
headings += extra_info.keys
|
201
|
+
end
|
202
|
+
hsh
|
203
|
+
end
|
204
|
+
if defined_data.empty?
|
205
|
+
ui.info "Nothing to report"
|
206
|
+
else
|
207
|
+
Formatador.display_compact_table(defined_data, headings.to_a)
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
def to_s
|
212
|
+
str = super
|
213
|
+
str[0..-2] + " #{@servers.map(&:fullname)}>"
|
214
|
+
end
|
215
|
+
|
216
|
+
def joined_names
|
217
|
+
map(&:name).join(", ").gsub(/, ([^,]*)$/, ' and \1')
|
218
|
+
end
|
219
|
+
|
220
|
+
# Calls block on each server in parallel, each in its own thread
|
221
|
+
#
|
222
|
+
# @example
|
223
|
+
# target = Ironfan::Cluster.slice('web_server')
|
224
|
+
# target.parallelize{|svr| svr.launch }
|
225
|
+
#
|
226
|
+
# @yield each server, in turn
|
227
|
+
#
|
228
|
+
# @return array (in same order as servers) of each block's result
|
229
|
+
def parallelize
|
230
|
+
servers.map do |svr|
|
231
|
+
sleep(0.1) # avoid hammering with simultaneous requests
|
232
|
+
Thread.new(svr){|svr| yield(svr) }
|
233
|
+
end
|
234
|
+
end
|
235
|
+
|
236
|
+
protected
|
237
|
+
|
238
|
+
# Helper methods for iterating through the servers to do things
|
239
|
+
#
|
240
|
+
# @param [Symbol] method -- method to call on each server
|
241
|
+
# @param [Boolean] threaded -- execute each call in own thread
|
242
|
+
#
|
243
|
+
# @return array (in same order as servers) of results for that method
|
244
|
+
def delegate_to_servers method, threaded = true
|
245
|
+
if threaded # Call in threads
|
246
|
+
threads = parallelize{|svr| svr.send(method) }
|
247
|
+
threads.map{|t| t.join.value } # Wait, returning array of results
|
248
|
+
else # Call the method for each server sequentially
|
249
|
+
servers.map{|svr| svr.send(method) }
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
def delegate_to_fog_servers method
|
254
|
+
fog_servers.compact.map do |fs|
|
255
|
+
fs.send(method)
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
end
|
260
|
+
end
|
@@ -0,0 +1,157 @@
|
|
1
|
+
module Ironfan
|
2
|
+
#
|
3
|
+
# Internal or external storage
|
4
|
+
#
|
5
|
+
class Volume < Ironfan::DslObject
|
6
|
+
attr_reader :parent
|
7
|
+
attr_accessor :fog_volume
|
8
|
+
has_keys(
|
9
|
+
:name,
|
10
|
+
# mountable volume attributes
|
11
|
+
:device, :mount_point, :mount_options, :fstype, :mount_dump, :mount_pass,
|
12
|
+
:mountable, :formattable, :resizable, :in_raid,
|
13
|
+
# cloud volume attributes
|
14
|
+
:attachable, :create_at_launch, :volume_id, :snapshot_id, :size, :keep, :availability_zone,
|
15
|
+
# arbitrary tags
|
16
|
+
:tags
|
17
|
+
)
|
18
|
+
|
19
|
+
VOLUME_DEFAULTS = {
|
20
|
+
:fstype => 'xfs',
|
21
|
+
:mount_options => 'defaults,nouuid,noatime',
|
22
|
+
:keep => true,
|
23
|
+
:attachable => :ebs,
|
24
|
+
:create_at_launch => false,
|
25
|
+
#
|
26
|
+
:mountable => true,
|
27
|
+
:resizable => false,
|
28
|
+
:formattable => false,
|
29
|
+
:in_raid => false,
|
30
|
+
}
|
31
|
+
|
32
|
+
# Snapshot for snapshot_name method.
|
33
|
+
# Set your own by adding
|
34
|
+
#
|
35
|
+
# VOLUME_IDS = Mash.new unless defined?(VOLUME_IDS)
|
36
|
+
# VOLUME_IDS.merge!({ :your_id => 'snap-whatever' })
|
37
|
+
#
|
38
|
+
# to your organization's knife.rb
|
39
|
+
#
|
40
|
+
VOLUME_IDS = Mash.new unless defined?(VOLUME_IDS)
|
41
|
+
VOLUME_IDS.merge!({
|
42
|
+
:blank_xfs => 'snap-d9c1edb1',
|
43
|
+
})
|
44
|
+
|
45
|
+
# Describes a volume
|
46
|
+
#
|
47
|
+
# @example
|
48
|
+
# Ironfan::Volume.new( :name => 'redis',
|
49
|
+
# :device => '/dev/sdp', :mount_point => '/data/redis', :fstype => 'xfs', :mount_options => 'defaults,nouuid,noatime'
|
50
|
+
# :size => 1024, :snapshot_id => 'snap-66494a08', :volume_id => 'vol-12312',
|
51
|
+
# :tags => {}, :keep => true )
|
52
|
+
#
|
53
|
+
def initialize attrs={}
|
54
|
+
@parent = attrs.delete(:parent)
|
55
|
+
super(attrs)
|
56
|
+
@settings[:tags] ||= {}
|
57
|
+
end
|
58
|
+
|
59
|
+
# human-readable description for logging messages and such
|
60
|
+
def desc
|
61
|
+
"#{name} on #{parent.fullname} (#{volume_id} @ #{device})"
|
62
|
+
end
|
63
|
+
|
64
|
+
def defaults
|
65
|
+
self.configure(VOLUME_DEFAULTS)
|
66
|
+
end
|
67
|
+
|
68
|
+
def ephemeral_device?
|
69
|
+
volume_id =~ /^ephemeral/
|
70
|
+
end
|
71
|
+
|
72
|
+
# Named snapshots, as defined in Ironfan::Volume::VOLUME_IDS
|
73
|
+
def snapshot_name(name)
|
74
|
+
snap_id = VOLUME_IDS[name.to_sym]
|
75
|
+
raise "Unknown snapshot name #{name} - is it defined in Ironfan::Volume::VOLUME_IDS?" unless snap_id
|
76
|
+
self.snapshot_id(snap_id)
|
77
|
+
end
|
78
|
+
|
79
|
+
# With snapshot specified but volume missing, have it auto-created at launch
|
80
|
+
#
|
81
|
+
# Be careful with this -- you can end up with multiple volumes claiming to
|
82
|
+
# be the same thing.
|
83
|
+
#
|
84
|
+
def create_at_launch?
|
85
|
+
volume_id.blank? && self.create_at_launch
|
86
|
+
end
|
87
|
+
|
88
|
+
def in_cloud?
|
89
|
+
!! fog_volume
|
90
|
+
end
|
91
|
+
|
92
|
+
def has_server?
|
93
|
+
in_cloud? && fog_volume.server_id.present?
|
94
|
+
end
|
95
|
+
|
96
|
+
def reverse_merge!(other_hsh)
|
97
|
+
super(other_hsh)
|
98
|
+
self.tags.reverse_merge!(other_hsh.tags) if other_hsh.respond_to?(:tags) && other_hsh.tags.present?
|
99
|
+
self
|
100
|
+
end
|
101
|
+
|
102
|
+
# An array of hashes with dorky-looking keys, just like Fog wants it.
|
103
|
+
def block_device_mapping
|
104
|
+
hsh = { 'DeviceName' => device }
|
105
|
+
if ephemeral_device?
|
106
|
+
hsh['VirtualName'] = volume_id
|
107
|
+
elsif create_at_launch?
|
108
|
+
raise "Must specify a size or a snapshot ID for #{self}" if snapshot_id.blank? && size.blank?
|
109
|
+
hsh['Ebs.SnapshotId'] = snapshot_id if snapshot_id.present?
|
110
|
+
hsh['Ebs.VolumeSize'] = size.to_s if size.present?
|
111
|
+
hsh['Ebs.DeleteOnTermination'] = (! keep).to_s
|
112
|
+
else
|
113
|
+
return
|
114
|
+
end
|
115
|
+
hsh
|
116
|
+
end
|
117
|
+
|
118
|
+
end
|
119
|
+
|
120
|
+
|
121
|
+
#
|
122
|
+
# Consider raising the chunk size to 256 and setting read_ahead 65536 if you are raid'ing EBS volumes
|
123
|
+
#
|
124
|
+
# * http://victortrac.com/EC2_Ephemeral_Disks_vs_EBS_Volumes
|
125
|
+
# * http://orion.heroku.com/past/2009/7/29/io_performance_on_ebs/
|
126
|
+
# * http://tech.blog.greplin.com/aws-best-practices-and-benchmarks
|
127
|
+
# * http://stu.mp/2009/12/disk-io-and-throughput-benchmarks-on-amazons-ec2.html
|
128
|
+
#
|
129
|
+
class RaidGroup < Volume
|
130
|
+
has_keys(
|
131
|
+
:sub_volumes, # volumes that comprise this raid group
|
132
|
+
:level, # RAID level (http://en.wikipedia.org/wiki/RAID#Standard_levels)
|
133
|
+
:chunk, # Raid chunk size (https://raid.wiki.kernel.org/articles/r/a/i/RAID_setup_cbb2.html)
|
134
|
+
:read_ahead, # read-ahead buffer
|
135
|
+
)
|
136
|
+
|
137
|
+
def desc
|
138
|
+
"#{name} on #{parent.fullname} (#{volume_id} @ #{device} from #{sub_volumes.join(',')})"
|
139
|
+
end
|
140
|
+
|
141
|
+
def defaults()
|
142
|
+
super
|
143
|
+
fstype 'xfs'
|
144
|
+
mount_options "defaults,nobootwait,noatime,nouuid,comment=ironfan"
|
145
|
+
attachable false
|
146
|
+
create_at_launch false
|
147
|
+
#
|
148
|
+
mountable true
|
149
|
+
resizable false
|
150
|
+
formattable true
|
151
|
+
#
|
152
|
+
in_raid false
|
153
|
+
#
|
154
|
+
sub_volumes []
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
@@ -0,0 +1,13 @@
|
|
1
|
+
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
|
2
|
+
|
3
|
+
require IRONFAN_DIR("lib/ironfan")
|
4
|
+
|
5
|
+
describe Ironfan::Cluster do
|
6
|
+
describe 'discover!' do
|
7
|
+
let(:cluster){ get_example_cluster(:monkeyballs) }
|
8
|
+
|
9
|
+
it 'enumerates chef nodes' do
|
10
|
+
cluster.discover!
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require File.expand_path(File.dirname(__FILE__) + '/../spec_helper')
|
2
|
+
|
3
|
+
require IRONFAN_DIR("lib/ironfan")
|
4
|
+
|
5
|
+
describe Ironfan::Facet do
|
6
|
+
let(:cluster){ Ironfan.cluster(:gibbon) }
|
7
|
+
let(:facet){
|
8
|
+
cluster.facet(:namenode) do
|
9
|
+
instances 5
|
10
|
+
end
|
11
|
+
}
|
12
|
+
|
13
|
+
describe 'slicing' do
|
14
|
+
it 'has servers' do
|
15
|
+
facet.indexes.should == [0, 1, 2, 3, 4]
|
16
|
+
facet.valid_indexes.should == [0, 1, 2, 3, 4]
|
17
|
+
facet.server(3){ name(:bob) }
|
18
|
+
svrs = facet.servers
|
19
|
+
svrs.length.should == 5
|
20
|
+
svrs.map{|svr| svr.name }.should == ["gibbon-namenode-0", "gibbon-namenode-1", "gibbon-namenode-2", :bob, "gibbon-namenode-4"]
|
21
|
+
end
|
22
|
+
|
23
|
+
it 'servers have bogosity if out of range' do
|
24
|
+
facet.server(69).should be_bogus
|
25
|
+
facet.servers.select(&:bogus?).map(&:facet_index).should == [69]
|
26
|
+
facet.indexes.should == [0, 1, 2, 3, 4, 69]
|
27
|
+
facet.valid_indexes.should == [0, 1, 2, 3, 4]
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'returns all on nil or "", but [] means none' do
|
31
|
+
facet.server(69)
|
32
|
+
facet.slice('' ).map(&:facet_index).should == [0, 1, 2, 3, 4, 69]
|
33
|
+
facet.slice(nil).map(&:facet_index).should == [0, 1, 2, 3, 4, 69]
|
34
|
+
facet.slice([] ).map(&:facet_index).should == []
|
35
|
+
end
|
36
|
+
|
37
|
+
it 'slice returns all by default' do
|
38
|
+
facet.server(69)
|
39
|
+
facet.slice().map(&:facet_index).should == [0, 1, 2, 3, 4, 69]
|
40
|
+
end
|
41
|
+
|
42
|
+
it 'with an array returns specified indexes (bogus or not) in sorted order' do
|
43
|
+
facet.server(69)
|
44
|
+
facet.slice( [3, 1, 0] ).map(&:facet_index).should == [0, 1, 3]
|
45
|
+
facet.slice( [3, 1, 69, 0] ).map(&:facet_index).should == [0, 1, 3, 69]
|
46
|
+
end
|
47
|
+
|
48
|
+
it 'with an array does not create new dummy servers' do
|
49
|
+
facet.server(69)
|
50
|
+
facet.slice( [3, 1, 69, 0, 75, 123] ).map(&:facet_index).should == [0, 1, 3, 69]
|
51
|
+
facet.has_server?(75).should be_false
|
52
|
+
facet.has_server?(69).should be_true
|
53
|
+
end
|
54
|
+
|
55
|
+
it 'with a string, converts to intervals' do
|
56
|
+
facet.slice('1' ).map(&:facet_index).should == [1]
|
57
|
+
facet.slice('5' ).map(&:facet_index).should == []
|
58
|
+
facet.slice('1-1' ).map(&:facet_index).should == [1]
|
59
|
+
facet.slice('0-1' ).map(&:facet_index).should == [0,1]
|
60
|
+
facet.slice('0-1,3-4').map(&:facet_index).should == [0,1,3,4]
|
61
|
+
facet.slice('0-1,69' ).map(&:facet_index).should == [0,1,69]
|
62
|
+
facet.slice('0-2,1-3').map(&:facet_index).should == [0,1,2,3]
|
63
|
+
facet.slice('3-1' ).map(&:facet_index).should == []
|
64
|
+
facet.slice('2-5' ).map(&:facet_index).should == [2,3,4]
|
65
|
+
facet.slice(1).map(&:facet_index).should == [1]
|
66
|
+
end
|
67
|
+
|
68
|
+
end
|
69
|
+
end
|