ironfan 5.0.11 → 6.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +4 -0
- data/.gitmodules +3 -0
- data/Gemfile +8 -26
- data/Gemfile.lock +38 -41
- data/NOTES-REALM.md +172 -0
- data/Rakefile +19 -77
- data/config/ubuntu12.04-ironfan.erb +7 -0
- data/ironfan.gemspec +28 -225
- data/lib/chef/cluster_knife.rb +26 -0
- data/lib/chef/knife/bootstrap/ubuntu12.04-ironfan.erb +7 -0
- data/lib/chef/knife/cluster_bootstrap.rb +1 -3
- data/lib/chef/knife/cluster_diff.rb +2 -8
- data/lib/chef/knife/cluster_kick.rb +1 -3
- data/lib/chef/knife/cluster_kill.rb +1 -2
- data/lib/chef/knife/cluster_launch.rb +17 -34
- data/lib/chef/knife/cluster_list.rb +6 -5
- data/lib/chef/knife/cluster_proxy.rb +1 -3
- data/lib/chef/knife/cluster_pry.rb +1 -2
- data/lib/chef/knife/cluster_show.rb +6 -7
- data/lib/chef/knife/cluster_ssh.rb +10 -8
- data/lib/chef/knife/cluster_start.rb +1 -2
- data/lib/chef/knife/cluster_stop.rb +1 -2
- data/lib/chef/knife/cluster_sync.rb +2 -3
- data/lib/chef/knife/ironfan_knife_common.rb +58 -18
- data/lib/chef/knife/ironfan_script.rb +0 -3
- data/lib/ironfan/broker/computer.rb +14 -11
- data/lib/ironfan/broker.rb +17 -12
- data/lib/ironfan/cookbook_requirements.rb +155 -0
- data/lib/ironfan/dsl/cloud.rb +2 -0
- data/lib/ironfan/dsl/cluster.rb +25 -15
- data/lib/ironfan/dsl/component.rb +12 -15
- data/lib/ironfan/dsl/compute.rb +10 -8
- data/lib/ironfan/dsl/ec2.rb +2 -26
- data/lib/ironfan/dsl/facet.rb +16 -14
- data/lib/ironfan/dsl/openstack.rb +147 -0
- data/lib/ironfan/dsl/realm.rb +23 -16
- data/lib/ironfan/dsl/security_group.rb +29 -0
- data/lib/ironfan/dsl/server.rb +14 -5
- data/lib/ironfan/dsl/static.rb +63 -0
- data/lib/ironfan/dsl/vsphere.rb +1 -0
- data/lib/ironfan/dsl.rb +1 -134
- data/lib/ironfan/headers.rb +19 -0
- data/lib/ironfan/provider/chef/node.rb +3 -2
- data/lib/ironfan/provider/ec2/machine.rb +10 -14
- data/lib/ironfan/provider/ec2/security_group.rb +58 -43
- data/lib/ironfan/provider/openstack/elastic_ip.rb +96 -0
- data/lib/ironfan/provider/openstack/keypair.rb +78 -0
- data/lib/ironfan/provider/openstack/machine.rb +371 -0
- data/lib/ironfan/provider/openstack/security_group.rb +224 -0
- data/lib/ironfan/provider/openstack.rb +69 -0
- data/lib/ironfan/provider/static/machine.rb +192 -0
- data/lib/ironfan/provider/static.rb +23 -0
- data/lib/ironfan/provider.rb +58 -1
- data/lib/ironfan/requirements.rb +17 -1
- data/lib/ironfan/version.rb +3 -0
- data/lib/ironfan.rb +107 -172
- data/spec/chef/cluster_bootstrap_spec.rb +2 -7
- data/spec/chef/cluster_launch_spec.rb +1 -2
- data/spec/fixtures/realms/samurai.rb +26 -0
- data/spec/integration/minimal-chef-repo/clusters/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/config/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/.gitignore +1 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/certificates/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/client_keys/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/data_bag_keys/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/ec2_certs/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/ec2_keys/.gitkeep +0 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/ironfantest-validator.pem +27 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/ironfantester.pem +27 -0
- data/spec/integration/minimal-chef-repo/tasks/.gitkeep +0 -0
- data/spec/ironfan/cluster_spec.rb +1 -2
- data/spec/ironfan/diff_spec.rb +0 -2
- data/spec/ironfan/dsl_spec.rb +6 -3
- data/spec/ironfan/ec2/cloud_provider_spec.rb +17 -18
- data/spec/ironfan/ec2/elb_spec.rb +44 -41
- data/spec/ironfan/ec2/security_group_spec.rb +45 -47
- data/spec/ironfan/manifest_spec.rb +0 -1
- data/spec/ironfan/plugin_spec.rb +55 -40
- data/spec/ironfan/realm_spec.rb +42 -30
- data/spec/spec_helper.rb +17 -31
- data/spec/{spec_helper → support}/dummy_chef.rb +0 -0
- data/spec/{spec_helper → support}/dummy_diff_drawer.rb +0 -0
- metadata +78 -155
- data/.rspec +0 -2
- data/.yardopts +0 -19
- data/VERSION +0 -2
- data/chefignore +0 -41
- data/notes/Future-development-proposals.md +0 -266
- data/notes/Home.md +0 -55
- data/notes/INSTALL-cloud_setup.md +0 -103
- data/notes/INSTALL.md +0 -134
- data/notes/Ironfan-Roadmap.md +0 -70
- data/notes/Upgrading-to-v4.md +0 -66
- data/notes/advanced-superpowers.md +0 -16
- data/notes/aws_servers.jpg +0 -0
- data/notes/aws_user_key.png +0 -0
- data/notes/cookbook-versioning.md +0 -11
- data/notes/core_concepts.md +0 -200
- data/notes/declaring_volumes.md +0 -3
- data/notes/design_notes-aspect_oriented_devops.md +0 -36
- data/notes/design_notes-ci_testing.md +0 -169
- data/notes/design_notes-cookbook_event_ordering.md +0 -249
- data/notes/design_notes-meta_discovery.md +0 -59
- data/notes/ec2-pricing_and_capacity.md +0 -75
- data/notes/ec2-pricing_and_capacity.numbers +0 -0
- data/notes/homebase-layout.txt +0 -102
- data/notes/knife-cluster-commands.md +0 -21
- data/notes/named-cloud-objects.md +0 -11
- data/notes/opscode_org_key.png +0 -0
- data/notes/opscode_user_key.png +0 -0
- data/notes/philosophy.md +0 -13
- data/notes/rake_tasks.md +0 -24
- data/notes/renamed-recipes.txt +0 -142
- data/notes/silverware.md +0 -85
- data/notes/style_guide.md +0 -300
- data/notes/tips_and_troubleshooting.md +0 -92
- data/notes/walkthrough-hadoop.md +0 -168
- data/notes/walkthrough-web.md +0 -166
- data/spec/fixtures/gunbai.rb +0 -24
- data/spec/test_config.rb +0 -20
- data/tasks/chef_config.rake +0 -38
data/notes/walkthrough-hadoop.md
DELETED
@@ -1,168 +0,0 @@
|
|
1
|
-
FIXME: Repurpose general structure to demonstrate a Hadoop cluster.
|
2
|
-
|
3
|
-
## Walkthrough: Hadoop Cluster
|
4
|
-
|
5
|
-
Here's a very simple cluster:
|
6
|
-
|
7
|
-
```ruby
|
8
|
-
Ironfan.cluster 'hadoop_demo' do
|
9
|
-
cloud(:ec2) do
|
10
|
-
flavor 't1.micro'
|
11
|
-
end
|
12
|
-
|
13
|
-
role :base_role
|
14
|
-
role :chef_client
|
15
|
-
role :ssh
|
16
|
-
|
17
|
-
# The database server
|
18
|
-
facet :dbnode do
|
19
|
-
instances 1
|
20
|
-
role :mysql_server
|
21
|
-
|
22
|
-
cloud do
|
23
|
-
flavor 'm1.large'
|
24
|
-
backing 'ebs'
|
25
|
-
end
|
26
|
-
end
|
27
|
-
|
28
|
-
# A throwaway facet for development.
|
29
|
-
facet :webnode do
|
30
|
-
instances 2
|
31
|
-
role :nginx_server
|
32
|
-
role :awesome_webapp
|
33
|
-
end
|
34
|
-
end
|
35
|
-
```
|
36
|
-
|
37
|
-
This code defines a cluster named hadoop_demo. A cluster is a group of servers united around a common purpose, in this case to serve a scalable web application.
|
38
|
-
|
39
|
-
The hadoop_demo cluster has two 'facets' -- dbnode and webnode. A facet is a subgroup of interchangeable servers that provide a logical set of systems: in this case, the systems that store the website's data and those that render it.
|
40
|
-
|
41
|
-
The dbnode facet has one server, which will be named `hadoop_demo-dbnode-0`; the webnode facet has two servers, `hadoop_demo-webnode-0` and `hadoop_demo-webnode-1`.
|
42
|
-
|
43
|
-
Each server inherits the appropriate behaviors from its facet and cluster. All the servers in this cluster have the `base_role`, `chef_client` and `ssh` roles. The dbnode machines additionally house a MySQL server, while the webnodes have an nginx reverse proxy for the custom `hadoop_demo_webapp`.
|
44
|
-
|
45
|
-
As you can see, the dbnode facet asks for a different flavor of machine (`m1.large`) than the cluster default (`t1.micro`). Settings in the facet override those in the server, and settings in the server override those of its facet. You economically describe only what's significant about each machine.
|
46
|
-
|
47
|
-
### Cluster-level tools
|
48
|
-
|
49
|
-
```
|
50
|
-
$ knife cluster show hadoop_demo
|
51
|
-
|
52
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
53
|
-
| Name | Chef? | InstanceID | State | Public IP | Private IP | Created At | Flavor | Image | AZ | SSH Key |
|
54
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
55
|
-
| hadoop_demo-dbnode-0 | yes | i-43c60e20 | running | 107.22.6.104 | 10.88.112.201 | 20111029-204156 | t1.micro | ami-cef405a7 | us-east-1a | hadoop_demo |
|
56
|
-
| hadoop_demo-webnode-0 | yes | i-1233aef1 | running | 102.99.3.123 | 10.88.112.123 | 20111029-204156 | t1.micro | ami-cef405a7 | us-east-1a | hadoop_demo |
|
57
|
-
| hadoop_demo-webnode-1 | yes | i-0986423b | not running | | | | | | | |
|
58
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
59
|
-
|
60
|
-
```
|
61
|
-
|
62
|
-
The commands available are:
|
63
|
-
|
64
|
-
* list -- lists known clusters
|
65
|
-
* show -- show the named servers
|
66
|
-
* launch -- launch server
|
67
|
-
* bootstrap
|
68
|
-
* sync
|
69
|
-
* ssh
|
70
|
-
* start/stop
|
71
|
-
* kill
|
72
|
-
* kick -- trigger a chef-client run on each named machine, tailing the logs until the run completes
|
73
|
-
|
74
|
-
|
75
|
-
### Advanced clusters remain simple
|
76
|
-
|
77
|
-
Let's say that app is truly awesome, and the features and demand increases. This cluster adds an [ElasticSearch server](http://elasticsearch.org) for searching, a haproxy loadbalancer, and spreads the webnodes across two availability zones.
|
78
|
-
|
79
|
-
```ruby
|
80
|
-
Ironfan.cluster 'hadoop_demo' do
|
81
|
-
cloud(:ec2) do
|
82
|
-
image_name "maverick"
|
83
|
-
flavor "t1.micro"
|
84
|
-
availability_zones ['us-east-1a']
|
85
|
-
end
|
86
|
-
|
87
|
-
# The database server
|
88
|
-
facet :dbnode do
|
89
|
-
instances 1
|
90
|
-
role :mysql_server
|
91
|
-
cloud do
|
92
|
-
flavor 'm1.large'
|
93
|
-
backing 'ebs'
|
94
|
-
end
|
95
|
-
|
96
|
-
volume(:data) do
|
97
|
-
size 20
|
98
|
-
keep true
|
99
|
-
device '/dev/sdi'
|
100
|
-
mount_point '/data'
|
101
|
-
snapshot_id 'snap-a10234f'
|
102
|
-
attachable :ebs
|
103
|
-
end
|
104
|
-
end
|
105
|
-
|
106
|
-
facet :webnode do
|
107
|
-
instances 6
|
108
|
-
cloud.availability_zones ['us-east-1a', 'us-east-1b']
|
109
|
-
|
110
|
-
role :nginx_server
|
111
|
-
role :awesome_webapp
|
112
|
-
role :elasticsearch_client
|
113
|
-
|
114
|
-
volume(:server_logs) do
|
115
|
-
size 5
|
116
|
-
keep true
|
117
|
-
device '/dev/sdi'
|
118
|
-
mount_point '/server_logs'
|
119
|
-
snapshot_id 'snap-d9c1edb1'
|
120
|
-
end
|
121
|
-
end
|
122
|
-
|
123
|
-
facet :esnode do
|
124
|
-
instances 1
|
125
|
-
role "elasticsearch_data_esnode"
|
126
|
-
role "elasticsearch_http_esnode"
|
127
|
-
cloud.flavor "m1.large"
|
128
|
-
end
|
129
|
-
|
130
|
-
facet :loadbalancer do
|
131
|
-
instances 1
|
132
|
-
role "haproxy"
|
133
|
-
cloud.flavor "m1.xlarge"
|
134
|
-
elastic_ip "128.69.69.23"
|
135
|
-
end
|
136
|
-
|
137
|
-
cluster_role.override_attributes({
|
138
|
-
:elasticsearch => {
|
139
|
-
:version => '0.17.8',
|
140
|
-
},
|
141
|
-
})
|
142
|
-
end
|
143
|
-
```
|
144
|
-
|
145
|
-
The facets are described and scale independently. If you'd like to add more webnodes, just increase the instance count. If a machine misbehaves, just terminate it. Running `knife cluster launch hadoop_demo webnode` will note which machines are missing, and launch and configure them appropriately.
|
146
|
-
|
147
|
-
Ironfan speaks naturally to both Chef and your cloud provider. The esnode's `cluster_role.override_attributes` statement will be synchronized to the chef server, pinning the elasticsearch version across the server and clients. Your chef roles should focus on specific subsystems; the cluster file lets you see the architecture as a whole.
|
148
|
-
|
149
|
-
With these simple settings, if you have already [set up chef's knife to launch cloud servers](http://wiki.opscode.com/display/chef/Launch+Cloud+Instances+with+Knife), typing `knife cluster launch hadoop_demo --bootstrap` will (using Amazon EC2 as an example):
|
150
|
-
|
151
|
-
* Synchronize to the chef server:
|
152
|
-
- create chef roles on the server for the cluster and each facet.
|
153
|
-
- apply role directives (eg the homebase's `default_attributes` declaration).
|
154
|
-
- create a node for each machine
|
155
|
-
- apply the runlist to each node
|
156
|
-
* Set up security isolation:
|
157
|
-
- uses a keypair (login ssh key) isolated to that cluster
|
158
|
-
- Recognizes the `ssh` role, and add a security group `ssh` that by default opens port 22.
|
159
|
-
- Recognize the `nfs_server` role, and adds security groups `nfs_server` and `nfs_client`
|
160
|
-
- Authorizes the `nfs_server` to accept connections from all `nfs_client`s. Machines in other clusters that you mark as `nfs_client`s can connect to the NFS server, but are not automatically granted any other access to the machines in this cluster. Ironfan's opinionated behavior is about more than saving you effort -- tying this behavior to the chef role means you can't screw it up.
|
161
|
-
* Launches the machines in parallel:
|
162
|
-
- using the image name and the availability zone, it determines the appropriate region, image ID, and other implied behavior.
|
163
|
-
- passes a JSON-encoded user_data hash specifying the machine's chef `node_name` and client key. An appropriately-configured machine image will need no further bootstrapping -- it will connect to the chef server with the appropriate identity and proceed completely unattended.
|
164
|
-
* Syncronizes to the cloud provider:
|
165
|
-
- Applies EC2 tags to the machine, making your console intelligible: ![AWS Console screenshot](https://github.com/infochimps-labs/ironfan/raw/version_3/notes/aws_console_screenshot.jpg)
|
166
|
-
- Connects external (EBS) volumes, if any, to the correct mount point -- it uses (and applies) tags to the volumes, so they know which machine to adhere to. If you've manually added volumes, just make sure they're defined correctly in your cluster file and run `knife cluster sync {cluster_name}`; it will paint them with the correct tags.
|
167
|
-
- Associates an elastic IP, if any, to the machine
|
168
|
-
* Bootstraps the machine using knife bootstrap
|
data/notes/walkthrough-web.md
DELETED
@@ -1,166 +0,0 @@
|
|
1
|
-
## Walkthrough: Web Cluster
|
2
|
-
|
3
|
-
Here's a very simple cluster:
|
4
|
-
|
5
|
-
```ruby
|
6
|
-
Ironfan.cluster 'web_demo' do
|
7
|
-
cloud(:ec2) do
|
8
|
-
flavor 't1.micro'
|
9
|
-
end
|
10
|
-
|
11
|
-
role :base_role
|
12
|
-
role :chef_client
|
13
|
-
role :ssh
|
14
|
-
|
15
|
-
# The database server
|
16
|
-
facet :dbnode do
|
17
|
-
instances 1
|
18
|
-
role :mysql_server
|
19
|
-
|
20
|
-
cloud do
|
21
|
-
flavor 'm1.large'
|
22
|
-
backing 'ebs'
|
23
|
-
end
|
24
|
-
end
|
25
|
-
|
26
|
-
# A throwaway facet for development.
|
27
|
-
facet :webnode do
|
28
|
-
instances 2
|
29
|
-
role :nginx_server
|
30
|
-
role :awesome_webapp
|
31
|
-
end
|
32
|
-
end
|
33
|
-
```
|
34
|
-
|
35
|
-
This code defines a cluster named web_demo. A cluster is a group of servers united around a common purpose, in this case to serve a scalable web application.
|
36
|
-
|
37
|
-
The web_demo cluster has two 'facets' -- dbnode and webnode. A facet is a subgroup of interchangeable servers that provide a logical set of systems: in this case, the systems that store the website's data and those that render it.
|
38
|
-
|
39
|
-
The dbnode facet has one server, which will be named `web_demo-dbnode-0`; the webnode facet has two servers, `web_demo-webnode-0` and `web_demo-webnode-1`.
|
40
|
-
|
41
|
-
Each server inherits the appropriate behaviors from its facet and cluster. All the servers in this cluster have the `base_role`, `chef_client` and `ssh` roles. The dbnode machines additionally house a MySQL server, while the webnodes have an nginx reverse proxy for the custom `web_demo_webapp`.
|
42
|
-
|
43
|
-
As you can see, the dbnode facet asks for a different flavor of machine (`m1.large`) than the cluster default (`t1.micro`). Settings in the facet override those in the cluster, and settings in the server override those of its facet. You economically describe only what's significant about each machine.
|
44
|
-
|
45
|
-
### Cluster-level tools
|
46
|
-
|
47
|
-
```
|
48
|
-
$ knife cluster show web_demo
|
49
|
-
|
50
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
51
|
-
| Name | Chef? | InstanceID | State | Public IP | Private IP | Created At | Flavor | Image | AZ | SSH Key |
|
52
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
53
|
-
| web_demo-dbnode-0 | yes | i-43c60e20 | running | 107.22.6.104 | 10.88.112.201 | 20111029-204156 | t1.micro | ami-cef405a7 | us-east-1a | web_demo |
|
54
|
-
| web_demo-webnode-0 | yes | i-1233aef1 | running | 102.99.3.123 | 10.88.112.123 | 20111029-204156 | t1.micro | ami-cef405a7 | us-east-1a | web_demo |
|
55
|
-
| web_demo-webnode-1 | yes | i-0986423b | not running | | | | | | | |
|
56
|
-
+---------------------+-------+------------+-------------+--------------+---------------+-----------------+----------+--------------+------------+------------+
|
57
|
-
|
58
|
-
```
|
59
|
-
|
60
|
-
The commands available are:
|
61
|
-
|
62
|
-
* list -- lists known clusters
|
63
|
-
* show -- show the named servers
|
64
|
-
* launch -- launch server
|
65
|
-
* bootstrap
|
66
|
-
* sync
|
67
|
-
* ssh
|
68
|
-
* start/stop
|
69
|
-
* kill
|
70
|
-
* kick -- trigger a chef-client run on each named machine, tailing the logs until the run completes
|
71
|
-
|
72
|
-
|
73
|
-
### Advanced clusters remain simple
|
74
|
-
|
75
|
-
Let's say that app is truly awesome, and the features and demand increases. This cluster adds an [ElasticSearch server](http://elasticsearch.org) for searching, a haproxy loadbalancer, and spreads the webnodes across two availability zones.
|
76
|
-
|
77
|
-
```ruby
|
78
|
-
Ironfan.cluster 'web_demo' do
|
79
|
-
cloud(:ec2) do
|
80
|
-
image_name "maverick"
|
81
|
-
flavor "t1.micro"
|
82
|
-
availability_zones ['us-east-1a']
|
83
|
-
end
|
84
|
-
|
85
|
-
# The database server
|
86
|
-
facet :dbnode do
|
87
|
-
instances 1
|
88
|
-
role :mysql_server
|
89
|
-
cloud do
|
90
|
-
flavor 'm1.large'
|
91
|
-
backing 'ebs'
|
92
|
-
end
|
93
|
-
|
94
|
-
volume(:data) do
|
95
|
-
size 20
|
96
|
-
keep true
|
97
|
-
device '/dev/sdi'
|
98
|
-
mount_point '/data'
|
99
|
-
snapshot_id 'snap-a10234f'
|
100
|
-
attachable :ebs
|
101
|
-
end
|
102
|
-
end
|
103
|
-
|
104
|
-
facet :webnode do
|
105
|
-
instances 6
|
106
|
-
cloud.availability_zones ['us-east-1a', 'us-east-1b']
|
107
|
-
|
108
|
-
role :nginx_server
|
109
|
-
role :awesome_webapp
|
110
|
-
role :elasticsearch_client
|
111
|
-
|
112
|
-
volume(:server_logs) do
|
113
|
-
size 5
|
114
|
-
keep true
|
115
|
-
device '/dev/sdi'
|
116
|
-
mount_point '/server_logs'
|
117
|
-
snapshot_id 'snap-d9c1edb1'
|
118
|
-
end
|
119
|
-
end
|
120
|
-
|
121
|
-
facet :esnode do
|
122
|
-
instances 1
|
123
|
-
role "elasticsearch_data_esnode"
|
124
|
-
role "elasticsearch_http_esnode"
|
125
|
-
cloud.flavor "m1.large"
|
126
|
-
end
|
127
|
-
|
128
|
-
facet :loadbalancer do
|
129
|
-
instances 1
|
130
|
-
role "haproxy"
|
131
|
-
cloud.flavor "m1.xlarge"
|
132
|
-
elastic_ip "128.69.69.23"
|
133
|
-
end
|
134
|
-
|
135
|
-
cluster_role.override_attributes({
|
136
|
-
:elasticsearch => {
|
137
|
-
:version => '0.17.8',
|
138
|
-
},
|
139
|
-
})
|
140
|
-
end
|
141
|
-
```
|
142
|
-
|
143
|
-
The facets are described and scale independently. If you'd like to add more webnodes, just increase the instance count. If a machine misbehaves, just terminate it. Running `knife cluster launch web_demo webnode` will note which machines are missing, and launch and configure them appropriately.
|
144
|
-
|
145
|
-
Ironfan speaks naturally to both Chef and your cloud provider. The esnode's `cluster_role.override_attributes` statement will be synchronized to the chef server, pinning the elasticsearch version across the server and clients. Your chef roles should focus on specific subsystems; the cluster file lets you see the architecture as a whole.
|
146
|
-
|
147
|
-
With these simple settings, if you have already [set up chef's knife to launch cloud servers](http://wiki.opscode.com/display/chef/Launch+Cloud+Instances+with+Knife), typing `knife cluster launch web_demo --bootstrap` will (using Amazon EC2 as an example):
|
148
|
-
|
149
|
-
* Synchronize to the chef server:
|
150
|
-
- create chef roles on the server for the cluster and each facet.
|
151
|
-
- apply role directives (eg the homebase's `default_attributes` declaration).
|
152
|
-
- create a node for each machine
|
153
|
-
- apply the runlist to each node
|
154
|
-
* Set up security isolation:
|
155
|
-
- uses a keypair (login ssh key) isolated to that cluster
|
156
|
-
- Recognizes the `ssh` role, and add a security group `ssh` that by default opens port 22.
|
157
|
-
- Recognize the `nfs_server` role, and adds security groups `nfs_server` and `nfs_client`
|
158
|
-
- Authorizes the `nfs_server` to accept connections from all `nfs_client`s. Machines in other clusters that you mark as `nfs_client`s can connect to the NFS server, but are not automatically granted any other access to the machines in this cluster. Ironfan's opinionated behavior is about more than saving you effort -- tying this behavior to the chef role means you can't screw it up.
|
159
|
-
* Launches the machines in parallel:
|
160
|
-
- using the image name and the availability zone, it determines the appropriate region, image ID, and other implied behavior.
|
161
|
-
- passes a JSON-encoded user_data hash specifying the machine's chef `node_name` and client key. An appropriately-configured machine image will need no further bootstrapping -- it will connect to the chef server with the appropriate identity and proceed completely unattended.
|
162
|
-
* Syncronizes to the cloud provider:
|
163
|
-
- Applies EC2 tags to the machine, making your console intelligible: ![AWS Console screenshot](https://github.com/infochimps-labs/ironfan/wiki/aws_servers.jpg)
|
164
|
-
- Connects external (EBS) volumes, if any, to the correct mount point -- it uses (and applies) tags to the volumes, so they know which machine to adhere to. If you've manually added volumes, just make sure they're defined correctly in your cluster file and run `knife cluster sync {cluster_name}`; it will paint them with the correct tags.
|
165
|
-
- Associates an elastic IP, if any, to the machine
|
166
|
-
* Bootstraps the machine using knife bootstrap
|
data/spec/fixtures/gunbai.rb
DELETED
@@ -1,24 +0,0 @@
|
|
1
|
-
Ironfan.cluster 'gunbai' do
|
2
|
-
cloud(:ec2) do
|
3
|
-
permanent false
|
4
|
-
availability_zones ['us-east-1d']
|
5
|
-
flavor 't1.micro'
|
6
|
-
backing 'ebs'
|
7
|
-
image_name 'natty'
|
8
|
-
bootstrap_distro 'ubuntu10.04-ironfan'
|
9
|
-
chef_client_script 'client.rb'
|
10
|
-
mount_ephemerals
|
11
|
-
end
|
12
|
-
|
13
|
-
environment :dev
|
14
|
-
|
15
|
-
role :ssh
|
16
|
-
cloud(:ec2).security_group(:ssh).authorize_port_range(22..22)
|
17
|
-
|
18
|
-
facet :hub do
|
19
|
-
end
|
20
|
-
|
21
|
-
facet :spoke do
|
22
|
-
environment :other
|
23
|
-
end
|
24
|
-
end
|
data/spec/test_config.rb
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
current_dir = File.expand_path('~/.chef')
|
2
|
-
organization = 'infochimps'
|
3
|
-
username = 'mrflip'
|
4
|
-
|
5
|
-
cookbook_root = ENV['PATH_TO_COOKBOOK_REPOS'] || File.expand_path('~/ics/sysadmin')
|
6
|
-
|
7
|
-
ironfan_path File.expand_path(cookbook_root+'/ironfan')
|
8
|
-
keypair_path File.expand_path(current_dir+"/keypairs")
|
9
|
-
cookbook_path [
|
10
|
-
"ironfan/cookbooks", "ironfan/site-cookbooks",
|
11
|
-
].map{|path| File.join(cookbook_root, path) }
|
12
|
-
cluster_path [
|
13
|
-
'ironfan/clusters',
|
14
|
-
].map{|path| File.join(cookbook_root, path) }
|
15
|
-
|
16
|
-
node_name username
|
17
|
-
validation_client_name "chef-validator"
|
18
|
-
validation_key "#{keypair_path}/#{organization}-validator.pem"
|
19
|
-
client_key "#{keypair_path}/#{username}-client_key.pem"
|
20
|
-
chef_server_url "https://api.opscode.com/organizations/#{organization}"
|
data/tasks/chef_config.rake
DELETED
@@ -1,38 +0,0 @@
|
|
1
|
-
# Configure the Rakefile's tasks.
|
2
|
-
|
3
|
-
###
|
4
|
-
# Company and SSL Details
|
5
|
-
# Used with the ssl_cert task.
|
6
|
-
###
|
7
|
-
|
8
|
-
# The company name - used for SSL certificates, and in srvious other places
|
9
|
-
COMPANY_NAME = "Infochimps, Inc"
|
10
|
-
|
11
|
-
# The Country Name to use for SSL Certificates
|
12
|
-
SSL_COUNTRY_NAME = "US"
|
13
|
-
|
14
|
-
# The State Name to use for SSL Certificates
|
15
|
-
SSL_STATE_NAME = "Several"
|
16
|
-
|
17
|
-
# The Locality Name for SSL - typically, the city
|
18
|
-
SSL_LOCALITY_NAME = "Locality"
|
19
|
-
|
20
|
-
# What department?
|
21
|
-
SSL_ORGANIZATIONAL_UNIT_NAME = "Operations"
|
22
|
-
|
23
|
-
# The SSL contact email address
|
24
|
-
SSL_EMAIL_ADDRESS = "coders@infochimps.com"
|
25
|
-
|
26
|
-
# License for new Cookbooks
|
27
|
-
# Can be :apachev2 or :none
|
28
|
-
NEW_COOKBOOK_LICENSE = :apachev2
|
29
|
-
|
30
|
-
###
|
31
|
-
# Useful Extras (which you probably don't need to change)
|
32
|
-
###
|
33
|
-
|
34
|
-
# The top of the repository checkout
|
35
|
-
TOPDIR = File.expand_path(File.join(File.dirname(__FILE__), ".."))
|
36
|
-
|
37
|
-
# Where to store certificates generated with ssl_cert
|
38
|
-
CADIR = File.expand_path(File.join(TOPDIR, "certificates"))
|