ironfan 3.1.5 → 3.1.6

Sign up to get free protection for your applications and to get access to all the features.
@@ -170,6 +170,19 @@ module Ironfan
170
170
  @fog_keypairs = {}.tap{|hsh| Ironfan.fog_connection.key_pairs.each{|kp| hsh[kp.name] = kp } }
171
171
  end
172
172
 
173
+ def self.dry_run?
174
+ Ironfan.chef_config[:dry_run]
175
+ end
176
+
177
+ def self.placement_groups
178
+ return @placement_groups if @placement_groups
179
+ Chef::Log.debug("Using fog to catalog all placement_groups")
180
+ resp = self.fog_connection.describe_placement_groups unless dry_run?
181
+ return {} unless resp.respond_to?(:body) && resp.body.present?
182
+ arr = resp.body['placementGroupSet']
183
+ @placement_groups = arr.inject({}){|acc, pg| acc[pg['groupName']] = pg ; acc }
184
+ end
185
+
173
186
  def safely *args, &block
174
187
  Ironfan.safely(*args, &block)
175
188
  end
@@ -24,9 +24,11 @@ module Ironfan
24
24
  if client_key.body then cloud.user_data.merge({ :client_key => client_key.body })
25
25
  else cloud.user_data.merge({ :validation_key => cloud.validation_key }) ; end
26
26
  #
27
- {
27
+ description = {
28
28
  :image_id => cloud.image_id,
29
29
  :flavor_id => cloud.flavor,
30
+ :vpc_id => cloud.vpc,
31
+ :subnet_id => cloud.subnet,
30
32
  :groups => cloud.security_groups.keys,
31
33
  :key_name => cloud.keypair.to_s,
32
34
  # Fog does not actually create tags when it creates a server.
@@ -38,15 +40,20 @@ module Ironfan
38
40
  :block_device_mapping => block_device_mapping,
39
41
  :availability_zone => self.default_availability_zone,
40
42
  :monitoring => cloud.monitoring,
41
- # :disable_api_termination => cloud.permanent,
42
- # :instance_initiated_shutdown_behavior => instance_initiated_shutdown_behavior,
43
+ # permanence is applied during sync
43
44
  }
45
+ if needs_placement_group?
46
+ ui.warn "1.3.1 and earlier versions of Fog don't correctly support placement groups, so your nodes will land willy-nilly. We're working on a fix"
47
+ description[:placement] = { 'groupName' => cloud.placement_group.to_s }
48
+ end
49
+ description
44
50
  end
45
51
 
46
52
  #
47
53
  # Takes key-value pairs and idempotently sets those tags on the cloud machine
48
54
  #
49
55
  def fog_create_tags(fog_obj, desc, tags)
56
+ tags['Name'] ||= tags['name'] if tags.has_key?('name')
50
57
  tags_to_create = tags.reject{|key, val| fog_obj.tags[key] == val.to_s }
51
58
  return if tags_to_create.empty?
52
59
  step(" tagging #{desc} with #{tags_to_create.inspect}", :green)
@@ -107,6 +114,23 @@ module Ironfan
107
114
  end
108
115
  end
109
116
 
117
+ def ensure_placement_group
118
+ return unless needs_placement_group?
119
+ pg_name = cloud.placement_group.to_s
120
+ desc = "placement group #{pg_name} for #{self.fullname} (vs #{Ironfan.placement_groups.inspect}"
121
+ return if Ironfan.placement_groups.include?(pg_name)
122
+ safely do
123
+ step(" creating #{desc}", :blue)
124
+ unless_dry_run{ Ironfan.fog_connection.create_placement_group(pg_name, 'cluster') }
125
+ Ironfan.placement_groups[pg_name] = { 'groupName' => pg_name, 'strategy' => 'cluster' }
126
+ end
127
+ pg_name
128
+ end
129
+
130
+ def needs_placement_group?
131
+ cloud.flavor_info[:placement_groupable]
132
+ end
133
+
110
134
  def associate_public_ip
111
135
  address = self.cloud.public_ip
112
136
  return unless self.in_cloud? && address
@@ -130,6 +154,21 @@ module Ironfan
130
154
  end
131
155
  end
132
156
 
157
+ def set_instance_attributes
158
+ return unless self.in_cloud? && (not self.cloud.permanent.nil?)
159
+ desc = "termination flag #{permanent?} for #{self.fullname}"
160
+ # the EC2 API does not surface disable_api_termination as a value, so we
161
+ # have to set it every time.
162
+ safely do
163
+ step(" setting #{desc}", :blue)
164
+ unless_dry_run do
165
+ Ironfan.fog_connection.modify_instance_attribute(self.fog_server.id, {
166
+ 'DisableApiTermination.Value' => permanent?, })
167
+ end
168
+ true
169
+ end
170
+ end
171
+
133
172
  end
134
173
 
135
174
  class ServerSlice
@@ -28,7 +28,10 @@ module Ironfan
28
28
  def self.get_all
29
29
  groups_list = Ironfan.fog_connection.security_groups.all
30
30
  @@all = groups_list.inject(Mash.new) do |hsh, fog_group|
31
- hsh[fog_group.name] = fog_group ; hsh
31
+ # AWS security_groups are strangely case sensitive, allowing upper-case but colliding regardless
32
+ # of the case. This forces all names to lowercase, and matches against that below.
33
+ # See https://github.com/infochimps-labs/ironfan/pull/86 for more details.
34
+ hsh[fog_group.name.downcase] = fog_group ; hsh
32
35
  end
33
36
  end
34
37
 
@@ -37,6 +40,7 @@ module Ironfan
37
40
  end
38
41
 
39
42
  def self.get_or_create(group_name, description)
43
+ group_name = group_name.to_s.downcase
40
44
  # FIXME: the '|| Ironfan.fog' part is probably unnecessary
41
45
  fog_group = all[group_name] || Ironfan.fog_connection.security_groups.get(group_name)
42
46
  unless fog_group
@@ -89,7 +93,7 @@ module Ironfan
89
93
  step("authorizing access from all machines in #{other_name} to #{name}", :blue)
90
94
  self.class.get_or_create(other_name, "Authorized to access #{name}")
91
95
  begin fog_group.authorize_group_and_owner(other_name, authed_owner)
92
- rescue StandardError => e ; ui.warn e ; end
96
+ rescue StandardError => err ; handle_security_group_error(err) ; end
93
97
  end
94
98
  @group_authorized_by.uniq.each do |other_name|
95
99
  authed_owner = self.owner_id
@@ -97,13 +101,21 @@ module Ironfan
97
101
  next if group_permission_already_set?(other_group, self.name, authed_owner)
98
102
  step("authorizing access to all machines in #{other_name} from #{name}", :blue)
99
103
  begin other_group.authorize_group_and_owner(self.name, authed_owner)
100
- rescue StandardError => e ; ui.warn e ; end
104
+ rescue StandardError => err ; handle_security_group_error(err) ; end
101
105
  end
102
106
  @range_authorizations.uniq.each do |range, cidr_ip, ip_protocol|
103
107
  next if range_permission_already_set?(fog_group, range, cidr_ip, ip_protocol)
104
108
  step("opening #{ip_protocol} ports #{range} to #{cidr_ip}", :blue)
105
109
  begin fog_group.authorize_port_range(range, { :cidr_ip => cidr_ip, :ip_protocol => ip_protocol })
106
- rescue StandardError => e ; ui.warn e ; end
110
+ rescue StandardError => err ; handle_security_group_error(err) ; end
111
+ end
112
+ end
113
+
114
+ def handle_security_group_error(err)
115
+ if (/has already been authorized/ =~ err.to_s)
116
+ Chef::Log.debug err
117
+ else
118
+ ui.warn(err)
107
119
  end
108
120
  end
109
121
 
@@ -84,7 +84,12 @@ module Ironfan
84
84
  in_chef?
85
85
  end
86
86
 
87
+ def permanent?
88
+ !! self.cloud.permanent
89
+ end
90
+
87
91
  def killable?
92
+ return false if permanent?
88
93
  in_chef? || created?
89
94
  end
90
95
 
@@ -101,6 +106,10 @@ module Ironfan
101
106
  @tags[key]
102
107
  end
103
108
 
109
+ def public_hostname
110
+ give_me_a_hostname_from_one_of_these_seven_ways_you_assholes
111
+ end
112
+
104
113
  def chef_server_url() Chef::Config.chef_server_url ; end
105
114
  def validation_client_name() Chef::Config.validation_client_name ; end
106
115
  def validation_key() Chef::Config.validation_key ; end
@@ -235,6 +244,8 @@ module Ironfan
235
244
  attach_volumes
236
245
  create_tags
237
246
  associate_public_ip
247
+ ensure_placement_group
248
+ set_instance_attributes
238
249
  end
239
250
 
240
251
  def sync_to_chef
@@ -280,5 +291,23 @@ module Ironfan
280
291
  chef_node.save
281
292
  end
282
293
 
294
+ protected
295
+
296
+ def give_me_a_hostname_from_one_of_these_seven_ways_you_assholes
297
+ # note: there are not actually seven ways. That is the least absurd part of this situation.
298
+ case
299
+ when cloud.public_ip
300
+ cloud.public_ip
301
+ when fog_server && fog_server.respond_to?(:public_ip_address) && fog_server.public_ip_address.present?
302
+ fog_server.public_ip_address
303
+ when fog_server && fog_server.respond_to?(:ipaddress)
304
+ fog_server.ipaddress
305
+ when fog_server && fog_server.respond_to?(:dns_name)
306
+ fog_server.dns_name
307
+ else
308
+ nil
309
+ end
310
+ end
311
+
283
312
  end
284
313
  end
data/lib/ironfan.rb CHANGED
@@ -29,7 +29,7 @@ module Ironfan
29
29
 
30
30
  # path to search for cluster definition files
31
31
  def self.cluster_path
32
- return Chef::Config[:cluster_path] if Chef::Config[:cluster_path]
32
+ return Array(Chef::Config[:cluster_path]) if Chef::Config[:cluster_path]
33
33
  raise "Holy smokes, you have no cookbook_path or cluster_path set up. Follow chef's directions for creating a knife.rb." if Chef::Config[:cookbook_path].blank?
34
34
  cl_path = Chef::Config[:cookbook_path].map{|dir| File.expand_path('../clusters', dir) }.uniq
35
35
  ui.warn "No cluster path set. Taking a wild guess that #{cl_path.inspect} is \nreasonable based on your cookbook_path -- but please set cluster_path in your knife.rb"
@@ -4,20 +4,20 @@
4
4
  - copy the knife/example-credentials directory
5
5
  - best to not live on github: use a private server and run
6
6
 
7
- ```
8
- repo=ORGANIZATION-credentials ; repodir=/gitrepos/$repo.git ; mkdir -p $repodir ; ( GIT_DIR=$repodir git init --shared=group --bare && cd $repodir && git --bare update-server-info && chmod a+x hooks/post-update )
9
- ```
7
+ ```
8
+ repo=ORGANIZATION-credentials ; repodir=/gitrepos/$repo.git ; mkdir -p $repodir ; ( GIT_DIR=$repodir git init --shared=group --bare && cd $repodir && git --bare update-server-info && chmod a+x hooks/post-update )
9
+ ```
10
10
 
11
11
  - git submodule it into knife as `knife/yourorg-credentials`
12
12
  - or, if somebody has added it,
13
13
 
14
- ```
15
- git pull
16
- git submodule update --init
17
- find . -iname '*.pem' -exec chmod og-rw {} \;
18
- cp knife/${OLD_CHEF_ORGANIZATION}-credentials/knife-user-${CHEF_USER}.rb knife/${CHEF_ORGANIZATION}-credentials
19
- cp knife/${OLD_CHEF_ORGANIZATION}-credentials/${CHEF_USER}.pem knife/${CHEF_ORGANIZATION}-credentials/
20
- ```
14
+ ```
15
+ git pull
16
+ git submodule update --init
17
+ find . -iname '*.pem' -exec chmod og-rw {} \;
18
+ cp knife/${OLD_CHEF_ORGANIZATION}-credentials/knife-user-${CHEF_USER}.rb knife/${CHEF_ORGANIZATION}-credentials
19
+ cp knife/${OLD_CHEF_ORGANIZATION}-credentials/${CHEF_USER}.pem knife/${CHEF_ORGANIZATION}-credentials/
20
+ ```
21
21
 
22
22
  * create AWS account
23
23
  - [sign up for AWS + credit card + password]
@@ -30,14 +30,17 @@
30
30
  ## Populate Chef Server
31
31
 
32
32
  * create `prod` and `dev` environments by using
33
+
33
34
  ```
34
35
  knife environment create dev
35
36
  knife environment create prod
37
+ knife environment create stag
38
+ knife environment from file environments/stag.json
36
39
  knife environment from file environments/dev.json
37
40
  knife environment from file environments/prod.json
38
41
  ```
39
42
 
40
- ```ruby
43
+ ```
41
44
  knife cookbook upload --all
42
45
  rake roles
43
46
  # if you have data bags, do that too
data/notes/INSTALL.md CHANGED
@@ -15,6 +15,10 @@ In all of the below,
15
15
 
16
16
  _Before you begin, you may wish to fork homebase repo, as you'll be making changes to personalize it for your platform that you may want to share with teammates. If you do so, replace all references to infochimps-labs/ironfan-homebase with your fork's path._
17
17
 
18
+ 1. Install system prerequisites (libXML and libXSLT). The following works under Debian/Ubuntu:
19
+
20
+ sudo apt-get install libxml2-dev libxslt1-dev
21
+
18
22
  1. Install the Ironfan gem (you may need to use `sudo`):
19
23
 
20
24
  gem install ironfan
@@ -23,6 +27,7 @@ _Before you begin, you may wish to fork homebase repo, as you'll be making chang
23
27
 
24
28
  git clone https://github.com/infochimps-labs/ironfan-homebase homebase
25
29
  cd homebase
30
+ bundle install
26
31
  git submodule update --init
27
32
  git submodule foreach git checkout master
28
33
 
@@ -126,4 +131,4 @@ The README file in each of the subdirectories for more information about what go
126
131
 
127
132
  knife cluster launch el_ridiculoso-gordo --bootstrap
128
133
 
129
- For more information about configuring Knife, see the [Knife documentation](http://wiki.opscode.com/display/chef/knife).
134
+ For more information about configuring Knife, see the [Knife documentation](http://wiki.opscode.com/display/chef/knife).
@@ -22,7 +22,7 @@
22
22
 
23
23
  * [Systems *Bind* to provisioned resources](#binding)
24
24
 
25
- * [Binding declarations enable *Reasource Sharing*](#resource-sharing)
25
+ * [Binding declarations enable *Resource Sharing*](#resource-sharing)
26
26
 
27
27
  <a name="overview"></a>
28
28
  ### Overview
@@ -1,29 +1,35 @@
1
1
  ## Compute Costs
2
2
 
3
- code $/mo $/day $/hr CPU/$ Mem/$ mem cpu cores cpcore storage bits IO type name
4
- t1.micro 14 0.48 $0.02 10.00 33.50 0.67 0.2 1 0.2 0 64 Low Micro Micro
5
- m1.small 61 2.04 $0.085 11.76 20.00 1.7 1 1 1 160 32 Moderate Standard Small
6
- c1.medium 123 4.08 $0.17 29.41 10.00 1.7 5 2 2.5 350 32 Moderate High-CPU Medium
7
- m1.large 246 8.16 $0.34 11.76 22.06 7.5 4 2 2 850 64 High Standard Large
8
- m2.xlarge 363 12.00 $0.50 13.00 35.40 17.7 6.5 2 3.25 420 64 Moderate High-Memory Extra Large
9
- c1.xlarge 493 16.32 $0.68 29.41 10.29 7 20 8 2.5 1690 64 High High-CPU Extra Large
10
- m1.xlarge 493 16.32 $0.68 11.76 22.06 15 8 4 2 1690 64 High Standard Extra Large
11
- m2.2xlarge 726 24.00 $1.00 13.00 34.20 34.2 13 4 3.25 850 64 High High-Memory Double Extra Large
12
- m2.4xlarge 1452 48.00 $2.00 13.00 34.20 68.4 26 8 3.25 1690 64 High High-Memory Quadruple Extra Large
13
- cc1.4xlarge 1161 38.40 $1.60 20.94 14.38 23 33.5 2 16.75 1690 64 Very High 10GB Compute Quadruple Extra Large
14
- cg1.4xlarge 1524 50.40 $2.10 15.95 10.48 22 33.5 2 16.75 1690 64 Very High 10GB Cluster GPU Quadruple Extra Large
3
+
4
+ code $/mo $/day $/hr CPU/$ Mem/$ mem cpu cores cpcore storage bits IO type name
5
+ t1.micro 15 0.48 .02 13 13 0.61 0.25 0.25 1 0 32 Low Micro Micro
6
+ m1.small 58 1.92 .08 13 21 1.7 1 1 1 160 32 Moderate Standard Small
7
+ m1.medium 116 3.84 .165 13 13 3.75 2 2 1 410 32 Moderate Standard Medium
8
+ c1.medium 120 3.96 .17 30 10 1.7 5 2 2.5 350 32 Moderate High-CPU Medium
9
+ m1.large 232 7.68 .32 13 23 7.5 4 2 2 850 64 High Standard Large
10
+ m2.xlarge 327 10.80 .45 14 38 17.1 6.5 2 3.25 420 64 Moderate High-Memory Extra Large
11
+ m1.xlarge 465 15.36 .64 13 23 15 8 4 2 1690 64 High Standard Extra Large
12
+ c1.xlarge 479 15.84 .66 30 11 7 20 8 2.5 1690 64 High High-CPU Extra Large
13
+ m2.2xlarge 653 21.60 .90 14 38 34.2 13 4 3.25 850 64 High High-Memory Double Extra Large
14
+ cc1.4xlarge 944 31.20 1.30 26 18 23 33.5 2 16.75 1690 64 10GB Compute Quadruple Extra Large
15
+ m2.4xlarge 1307 43.20 1.80 14 38 68.4 26 8 3.25 1690 64 High High-Memory Quadruple Extra Large
16
+ cg1.4xlarge 1525 50.40 2.10 16 10 22 33.5 2 16.75 1690 64 10GB Cluster GPU Quadruple Extra Large
17
+ cc2.8xlarge 1742 57.60 2.40 37 25 60.5 88 2 44 3370 64 10GB Compute Eight Extra Large
18
+
19
+ dummy header ln 15 0.48 0.02 12345 12345 0.61 0.25 0.25 1.00 6712345 32123 Low Micro Micro
15
20
 
16
21
 
17
22
  ## Storage Costs
18
23
 
19
- $/GB.hr $/GB.mo $/GB.mo $/Mio
24
+ $/GB..mo $/GB.mo $/Mio
20
25
  EBS Volume $0.10
21
- EBS Snapshot S3 $0.14
22
- EBS I/O $0.10
26
+ EBS I/O $0.10
27
+ EBS Snapshot S3 $0.083
23
28
 
24
- S3 1st tb $0.14 /gb/month
25
- S3 next 49tb $0.125 /gb/month
26
- S3 next 450tb $0.110 /gb/month
29
+ Std $/GB.mo Red.Red. $/GB.mo
30
+ S3 1st tb $0.125 $0.093
31
+ S3 next 49tb $0.110 $0.083
32
+ S3 next 450tb $0.095 $0.073
27
33
 
28
34
  ### Storing 1TB data
29
35
 
@@ -0,0 +1,273 @@
1
+
2
+ # v3.2.0 (future): Revamped undercarriage, spec coverage, standalone usage
3
+
4
+ This is a Snow Leopard-style version change. No new features to speak of, but a much more solid and predictable foundation.
5
+
6
+ * **significantly cleaner DSL mixin**: uses the new, awesome `Gorillib::Builder`, giving it a much cleaner handling of fields and collections
7
+
8
+ * **attributes are late-resolved**: in previous versions, the way you 'resolved' a server was to collapse the entire attribute set of cluster/facet/server hard onto the server model, a consistent source of bugs. Resolution is now done with the `Gorillib::Record::Overlay` mechanism, which means that you can set an attribute on the cluster and read it from the facet; change it later an all lower layers see the update.
9
+
10
+ * **standalone usable**: can use ironfan-knife as a standalone library.
11
+
12
+ # v3.3.x (future): Coherent universe of Servers, Components, Aspects
13
+
14
+ * **spec coverage**:
15
+
16
+ * **coherent data model**:
17
+
18
+ ComputeLayer -- common attributes of Provider, Cluster, Facet, Server
19
+ - overlay_stack of Cloud attributes
20
+
21
+ Universe -- across organizations
22
+ Organization -- one or many providers
23
+ Provider --
24
+ - has_many :clusters
25
+ Cluster --
26
+ - has_many :providers
27
+ - overlays :main_provider
28
+ Facet --
29
+ - has_one :cluster
30
+ - overlays :cluster
31
+ Server
32
+ - has_one :facet
33
+ - overlays :cluster
34
+ - has_one chef_node
35
+ - has_one machine
36
+
37
+
38
+ System Role Cookbook
39
+ Component Cookbook+Recipes
40
+
41
+
42
+
43
+ * **improved discovery**:
44
+
45
+ * **config isolation**:
46
+
47
+
48
+ ### Nitpicks
49
+
50
+
51
+ * make bootstrap_distro and image_name follow from os_version
52
+
53
+ * minidash just publishes announcements
54
+ * silverware is always included; it subsumes volumes
55
+
56
+ * if you add a `data_dir_for :hadoop` to
57
+
58
+ * volumes should name their `mount_point` after themselves by default
59
+
60
+ ### Components
61
+
62
+ * components replace roles (they are auto-generated by the component, and tie strictly to it)
63
+ *
64
+
65
+ ### Clusters
66
+
67
+ If clusters are more repeatable they won't be so bothersomely multi-provider:
68
+
69
+ Ironfan.cluster :gibbon do
70
+ cloud(:ec2) do
71
+ backing 'ebs'
72
+ permanent false
73
+ end
74
+ stack :systemwide
75
+ stack :devstack
76
+ stack :monitoring
77
+ stack :log_handling
78
+
79
+ component :hadoop_devstack
80
+ component :hadoop_dedicated
81
+
82
+ discovers :zookeeper, :realm => :zk
83
+ discovers :hbase, :realm => :hbase
84
+
85
+ facet :master do
86
+ component :hadoop_namenode
87
+ component :hadoop_secondarynn
88
+ component :hadoop_jobtracker
89
+ end
90
+ facet :worker do
91
+ component :hadoop_datanode
92
+ component :hadoop_tasktracker
93
+ end
94
+
95
+ volume :hadoop_data do
96
+ data_dir_for :hadoop_datanode, :hadoop_namenode, :hadoop_secondarynn
97
+ device '/dev/sdj1'
98
+ size 100
99
+ keep true
100
+ end
101
+ end
102
+
103
+
104
+ Here are ideas about how to get there
105
+
106
+ # silverware is always included; it subsumes volumes
107
+
108
+ organization :infochimps do
109
+ cloud(:ec2) do
110
+ availability_zones ['us-east-1d']
111
+ backing :ebs
112
+ image_name 'ironfan-natty'
113
+ bootstrap_distro 'ironfan-natty'
114
+ chef_client_script 'client.rb'
115
+ permanent true
116
+ end
117
+
118
+ volume(:default) do
119
+ keep true
120
+ snapshot_name :blank_xfs
121
+ resizable true
122
+ create_at_launch true
123
+ end
124
+
125
+ stack :systemwide do
126
+ system(:chef_client) do
127
+ run_state :on_restart
128
+ end
129
+ component :set_hostname
130
+ component :minidash
131
+ component :org_base
132
+ component :org_users
133
+ component :org_final
134
+ end
135
+
136
+ stack :devstack do
137
+ component :ssh
138
+ component :nfs_client
139
+ component :package_set
140
+ end
141
+
142
+ stack :monitoring do
143
+ component :zabbix_agent
144
+ end
145
+
146
+ stack :log_handling do
147
+ component :log_handling
148
+ end
149
+ end
150
+
151
+ stack :hadoop do
152
+ end
153
+
154
+ stack :hadoop_devstack do
155
+ component :pig
156
+ component :jruby
157
+ component :rstats
158
+ end
159
+
160
+ stack :hadoop_dedicated do
161
+ component :tuning
162
+ end
163
+
164
+ system :hadoop do
165
+ stack :hadoop_devstack
166
+ stack :zookeeper_client
167
+ stack :hbase_client
168
+ end
169
+
170
+ Ironfan.cluster :gibbon do
171
+ cloud(:ec2) do
172
+ backing 'ebs'
173
+ permanent false
174
+ end
175
+
176
+ system :systemwide do
177
+ exclude_stack :monitoring
178
+ end
179
+
180
+ # how are its components configured? distributed among machines?
181
+ system :hadoop do
182
+
183
+ # all servers will
184
+ # * have the `hadoop` role
185
+ # * have run_state => false for components with a daemon aspect by default
186
+
187
+ facet :master do
188
+ # component :hadoop_namenode means
189
+ # * this facet has the `hadoop_namenode` role
190
+ # * it has the component's security_groups
191
+ # * it sets node[:hadoop][:namenode][:run_state] = true
192
+ # * it will mount the volumes that adhere to this component
193
+ component :hadoop_namenode
194
+ end
195
+
196
+ # something gains eg zookeeper client if it discovers a zookeeper in another realm
197
+ # zookeeper must explicitly admit it discovers zookeeper, but can do that in the component
198
+
199
+ # what volumes should it use on those machines?
200
+ # create the volumes, pair it to components
201
+ # if a component is on a server, it adds its volumes.
202
+ # you can also add them explicitly.
203
+
204
+ # volume tags are applied automagically from their adherance to components
205
+
206
+ volume :hadoop_data do # will be assigned to servers with components it lists
207
+ data_dir_for :hadoop_datanode, :hadoop_namenode, :hadoop_secondarynn
208
+ end
209
+
210
+ ### Providers
211
+
212
+ I want to be able to:
213
+
214
+ * on a compute layer, modify its behavior depending on provider:
215
+ - example:
216
+
217
+ facet(:bob) do
218
+ cloud do
219
+ security_group :bob
220
+ authorize :from => :bobs_friends, :to => :bob
221
+ end
222
+ cloud(:ec2, :flavor => 'm1.small')
223
+ cloud(:rackspace, :flavor => '2GB')
224
+ cloud(:vagrant, :ram_mb => 256 )
225
+ end
226
+
227
+ - Any world that understands security groups will endeavor to make a `bob` security group, and authorize the `bobs_friends` group to use it.
228
+ - On EC2 and rackspace, the `flavor` attribute is set explicitly
229
+ - On vagrant (which got no `flavor`), we instead specify how much ram to supply
230
+ - On any other provider the flavor and machine ram will follow defaults.
231
+
232
+ * see all machines and clusters within an organization
233
+
234
+
235
+ ### Organizations
236
+
237
+ * see the entire universe; this might get hairy, but not ridiculous
238
+ - each org describes its providers; only those are used
239
+ - you don't have to do much to add a provider, just say `provider(:ec2)`
240
+ - you can configure the provider like this:
241
+
242
+ organization(:infochimps_test, :doc => 'Infochimps test cloud') do
243
+ provider(:vagrant)
244
+ provider(:ec2) do
245
+ access_key '...'
246
+ secret_access_key '...'
247
+ end
248
+ provider(:hp_cloud) do
249
+ access_key '...'
250
+ secret_access_key '...'
251
+ end
252
+ end
253
+
254
+ organization(:demo, :doc => 'Live client demo cloud') do
255
+ provider(:vagrant)
256
+ provider(:ec2) do #... end
257
+ provider(:hp_cloud) do #... end
258
+ provider(:rackspace) do #... end
259
+ end
260
+
261
+ - clusters can be declared directly or imported from other organizations:
262
+
263
+ organization :infochimps_test do
264
+ # developers' sandboxes
265
+ cluster :dev_sandboxes
266
+ # all the example clusters, for development
267
+ organization(:examples).clusters.each do |cl|
268
+ add_cluster cl
269
+ end
270
+ end
271
+
272
+ - if just starting, should see clusters;
273
+ - per-org cluster dirs