kite 0.0.2 → 0.0.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +2 -0
- data/bin/kite +7 -0
- data/bin/{setup → kite-config} +0 -0
- data/bin/{console → kite-console} +0 -0
- data/kite.gemspec +2 -2
- data/lib/kite.rb +6 -4
- data/lib/kite/commands.rb +46 -0
- data/lib/kite/helpers.rb +2 -0
- data/lib/kite/version.rb +1 -1
- data/tpl/aws/README.md +103 -0
- data/tpl/aws/bin/make_cloud_config.sh +104 -0
- data/tpl/aws/bin/make_manifest_bosh-init.sh +164 -0
- data/tpl/aws/bin/make_manifest_concourse-cluster.sh +96 -0
- data/tpl/aws/bootstrap.sh +24 -0
- data/tpl/aws/env.example.erb +12 -0
- data/tpl/aws/terraform/aws-concourse.tf +127 -0
- data/tpl/aws/terraform/aws-vault.tf +26 -0
- data/tpl/aws/terraform/bosh-aws-base.tf +118 -0
- data/tpl/aws/terraform/outputs.tf +15 -0
- data/tpl/aws/terraform/terraform.tfvars.erb +7 -0
- data/tpl/aws/terraform/variables.tf +26 -0
- data/tpl/gcp/INSTALL.md +25 -0
- data/tpl/gcp/README.md +377 -0
- data/tpl/gcp/cloud-config.yml.erb +66 -0
- data/tpl/gcp/concourse.tf +62 -0
- data/tpl/gcp/concourse.yml.erb +101 -0
- data/tpl/gcp/env.example.erb +7 -0
- data/tpl/gcp/main.tf +107 -0
- data/tpl/gcp/manifest.yml.erb +173 -0
- data/tpl/gcp/scripts/01_create_infrastructure.sh +24 -0
- data/tpl/gcp/scripts/02_deploy_director.sh +35 -0
- data/tpl/gcp/scripts/03_deploy_concourse.sh +29 -0
- data/tpl/gcp/scripts/04_delete_director.sh +6 -0
- data/tpl/gcp/scripts/05_delete_infrastructure.sh +23 -0
- data/tpl/gcp/scripts/bootstrap.sh +22 -0
- data/tpl/gcp/scripts/delete.sh +16 -0
- metadata +38 -5
data/tpl/gcp/INSTALL.md
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# Cloudfactory Bootstrap
|
2
|
+
|
3
|
+
## Installation
|
4
|
+
|
5
|
+
Copy environment example file
|
6
|
+
```
|
7
|
+
cp env.example .env
|
8
|
+
```
|
9
|
+
|
10
|
+
Define all .env file variables
|
11
|
+
```
|
12
|
+
vim .env
|
13
|
+
```
|
14
|
+
|
15
|
+
Run installation
|
16
|
+
```
|
17
|
+
source bootstrap.sh
|
18
|
+
```
|
19
|
+
|
20
|
+
## Teardown
|
21
|
+
|
22
|
+
Run teardown script
|
23
|
+
```
|
24
|
+
source delete.sh
|
25
|
+
```
|
data/tpl/gcp/README.md
ADDED
@@ -0,0 +1,377 @@
|
|
1
|
+
# Deploying Concourse on Google Compute Engine
|
2
|
+
|
3
|
+
This guide describes how to deploy [Concourse](http://concourse.ci/) on [Google Compute Engine](https://cloud.google.com/) using BOSH. You will deploy a BOSH director as part of these instructions.
|
4
|
+
|
5
|
+
## Prerequisites
|
6
|
+
* You must have the `terraform` CLI installed on your workstation. See [Download Terraform](https://www.terraform.io/downloads.html) for more details.
|
7
|
+
* You must have the `gcloud` CLI installed on your workstation. See [cloud.google.com/sdk](https://cloud.google.com/sdk/).
|
8
|
+
|
9
|
+
### Setup your workstation
|
10
|
+
|
11
|
+
1. Set your project ID:
|
12
|
+
|
13
|
+
```
|
14
|
+
export projectid=REPLACE_WITH_YOUR_PROJECT_ID
|
15
|
+
```
|
16
|
+
|
17
|
+
1. Export your preferred compute region and zone:
|
18
|
+
|
19
|
+
```
|
20
|
+
export region=us-east1
|
21
|
+
export zone=us-east1-c
|
22
|
+
export zone2=us-east1-d
|
23
|
+
```
|
24
|
+
|
25
|
+
1. Configure `gcloud` with a user who is an owner of the project:
|
26
|
+
|
27
|
+
```
|
28
|
+
gcloud auth login
|
29
|
+
gcloud config set project ${projectid}
|
30
|
+
gcloud config set compute/zone ${zone}
|
31
|
+
gcloud config set compute/region ${region}
|
32
|
+
```
|
33
|
+
|
34
|
+
1. Create a service account and key:
|
35
|
+
|
36
|
+
```
|
37
|
+
gcloud iam service-accounts create terraform-bosh
|
38
|
+
gcloud iam service-accounts keys create /tmp/terraform-bosh.key.json \
|
39
|
+
--iam-account terraform-bosh@${projectid}.iam.gserviceaccount.com
|
40
|
+
```
|
41
|
+
|
42
|
+
1. Grant the new service account editor access to your project:
|
43
|
+
|
44
|
+
```
|
45
|
+
gcloud projects add-iam-policy-binding ${projectid} \
|
46
|
+
--member serviceAccount:terraform-bosh@${projectid}.iam.gserviceaccount.com \
|
47
|
+
--role roles/editor
|
48
|
+
```
|
49
|
+
|
50
|
+
1. Make your service account's key available in an environment variable to be used by `terraform`:
|
51
|
+
|
52
|
+
```
|
53
|
+
export GOOGLE_CREDENTIALS=$(cat /tmp/terraform-bosh.key.json)
|
54
|
+
```
|
55
|
+
|
56
|
+
### Create required infrastructure with Terraform
|
57
|
+
|
58
|
+
1. Download [main.tf](main.tf) and [concourse.tf](concourse.tf) from this repository.
|
59
|
+
|
60
|
+
1. In a terminal from the same directory where the 2 `.tf` files are located, view the Terraform execution plan to see the resources that will be created:
|
61
|
+
|
62
|
+
```
|
63
|
+
terraform plan -var projectid=${projectid} -var region=${region} -var zone-1=${zone} -var zone-2=${zone2}
|
64
|
+
```
|
65
|
+
|
66
|
+
1. Create the resources:
|
67
|
+
|
68
|
+
```
|
69
|
+
terraform apply -var projectid=${projectid} -var region=${region} -var zone-1=${zone} -var zone-2=${zone2}
|
70
|
+
```
|
71
|
+
|
72
|
+
### Deploy a BOSH Director
|
73
|
+
|
74
|
+
1. SSH to the bastion VM you created in the previous step. All SSH commands after this should be run from the VM:
|
75
|
+
|
76
|
+
```
|
77
|
+
gcloud compute ssh bosh-bastion-concourse
|
78
|
+
```
|
79
|
+
|
80
|
+
1. Configure `gcloud` to use the correct zone, region, and project:
|
81
|
+
|
82
|
+
```
|
83
|
+
zone=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/zone)
|
84
|
+
export zone=${zone##*/}
|
85
|
+
export region=${zone%-*}
|
86
|
+
gcloud config set compute/zone ${zone}
|
87
|
+
gcloud config set compute/region ${region}
|
88
|
+
export project_id=`curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id`
|
89
|
+
```
|
90
|
+
|
91
|
+
1. Explicitly set your secondary zone:
|
92
|
+
|
93
|
+
```
|
94
|
+
export zone2=us-east1-d
|
95
|
+
```
|
96
|
+
|
97
|
+
1. Create a **password-less** SSH key:
|
98
|
+
|
99
|
+
```
|
100
|
+
ssh-keygen -t rsa -f ~/.ssh/bosh -C bosh
|
101
|
+
```
|
102
|
+
|
103
|
+
1. Run this `export` command to set the full path of the SSH private key you created earlier:
|
104
|
+
|
105
|
+
```
|
106
|
+
export ssh_key_path=$HOME/.ssh/bosh
|
107
|
+
```
|
108
|
+
|
109
|
+
1. Navigate to your [project's web console](https://console.cloud.google.com/compute/metadata/sshKeys) and add the new SSH public key by pasting the contents of ~/.ssh/bosh.pub:
|
110
|
+
|
111
|
+
![](../img/add-ssh.png)
|
112
|
+
|
113
|
+
> **Important:** The username field should auto-populate the value `bosh` after you paste the public key. If it does not, be sure there are no newlines or carriage returns being pasted; the value you paste should be a single line.
|
114
|
+
|
115
|
+
|
116
|
+
1. Confirm that `bosh-init` is installed by querying its version:
|
117
|
+
|
118
|
+
```
|
119
|
+
bosh-init -v
|
120
|
+
```
|
121
|
+
|
122
|
+
1. Create and `cd` to a directory:
|
123
|
+
|
124
|
+
```
|
125
|
+
mkdir google-bosh-director
|
126
|
+
cd google-bosh-director
|
127
|
+
```
|
128
|
+
|
129
|
+
1. Use `vim` or `nano` to create a BOSH Director deployment manifest named `manifest.yml.erb`:
|
130
|
+
|
131
|
+
```
|
132
|
+
---
|
133
|
+
<%
|
134
|
+
['region', 'project_id', 'zone', 'ssh_key_path'].each do |val|
|
135
|
+
if ENV[val].nil? || ENV[val].empty?
|
136
|
+
raise "Missing environment variable: #{val}"
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
region = ENV['region']
|
141
|
+
project_id = ENV['project_id']
|
142
|
+
zone = ENV['zone']
|
143
|
+
ssh_key_path = ENV['ssh_key_path']
|
144
|
+
%>
|
145
|
+
name: bosh
|
146
|
+
|
147
|
+
releases:
|
148
|
+
- name: bosh
|
149
|
+
url: https://bosh.io/d/github.com/cloudfoundry/bosh?v=260.1
|
150
|
+
sha1: 7fb8e99e28b67df6604e97ef061c5425460518d3
|
151
|
+
- name: bosh-google-cpi
|
152
|
+
url: https://bosh.io/d/github.com/cloudfoundry-incubator/bosh-google-cpi-release?v=25.6.2
|
153
|
+
sha1: b4865397d867655fdcc112bc5a7f9a5025cdf311
|
154
|
+
|
155
|
+
resource_pools:
|
156
|
+
- name: vms
|
157
|
+
network: private
|
158
|
+
stemcell:
|
159
|
+
url: https://bosh.io/d/stemcells/bosh-google-kvm-ubuntu-trusty-go_agent?v=3312.12
|
160
|
+
sha1: 3a2c407be6c1b3d04bb292ceb5007159100c85d7
|
161
|
+
cloud_properties:
|
162
|
+
zone: <%=zone %>
|
163
|
+
machine_type: n1-standard-4
|
164
|
+
root_disk_size_gb: 40
|
165
|
+
root_disk_type: pd-standard
|
166
|
+
service_scopes:
|
167
|
+
- compute
|
168
|
+
- devstorage.full_control
|
169
|
+
|
170
|
+
disk_pools:
|
171
|
+
- name: disks
|
172
|
+
disk_size: 32_768
|
173
|
+
cloud_properties:
|
174
|
+
type: pd-standard
|
175
|
+
|
176
|
+
networks:
|
177
|
+
- name: vip
|
178
|
+
type: vip
|
179
|
+
- name: private
|
180
|
+
type: manual
|
181
|
+
subnets:
|
182
|
+
- range: 10.0.0.0/29
|
183
|
+
gateway: 10.0.0.1
|
184
|
+
static: [10.0.0.3-10.0.0.7]
|
185
|
+
cloud_properties:
|
186
|
+
network_name: concourse
|
187
|
+
subnetwork_name: bosh-concourse-<%=region %>
|
188
|
+
ephemeral_external_ip: true
|
189
|
+
tags:
|
190
|
+
- bosh-internal
|
191
|
+
|
192
|
+
jobs:
|
193
|
+
- name: bosh
|
194
|
+
instances: 1
|
195
|
+
|
196
|
+
templates:
|
197
|
+
- name: nats
|
198
|
+
release: bosh
|
199
|
+
- name: postgres
|
200
|
+
release: bosh
|
201
|
+
- name: powerdns
|
202
|
+
release: bosh
|
203
|
+
- name: blobstore
|
204
|
+
release: bosh
|
205
|
+
- name: director
|
206
|
+
release: bosh
|
207
|
+
- name: health_monitor
|
208
|
+
release: bosh
|
209
|
+
- name: google_cpi
|
210
|
+
release: bosh-google-cpi
|
211
|
+
|
212
|
+
resource_pool: vms
|
213
|
+
persistent_disk_pool: disks
|
214
|
+
|
215
|
+
networks:
|
216
|
+
- name: private
|
217
|
+
static_ips: [10.0.0.6]
|
218
|
+
default:
|
219
|
+
- dns
|
220
|
+
- gateway
|
221
|
+
|
222
|
+
properties:
|
223
|
+
nats:
|
224
|
+
address: 127.0.0.1
|
225
|
+
user: nats
|
226
|
+
password: nats-password
|
227
|
+
|
228
|
+
postgres: &db
|
229
|
+
listen_address: 127.0.0.1
|
230
|
+
host: 127.0.0.1
|
231
|
+
user: postgres
|
232
|
+
password: postgres-password
|
233
|
+
database: bosh
|
234
|
+
adapter: postgres
|
235
|
+
|
236
|
+
dns:
|
237
|
+
address: 10.0.0.6
|
238
|
+
domain_name: microbosh
|
239
|
+
db: *db
|
240
|
+
recursor: 169.254.169.254
|
241
|
+
|
242
|
+
blobstore:
|
243
|
+
address: 10.0.0.6
|
244
|
+
port: 25250
|
245
|
+
provider: dav
|
246
|
+
director:
|
247
|
+
user: director
|
248
|
+
password: director-password
|
249
|
+
agent:
|
250
|
+
user: agent
|
251
|
+
password: agent-password
|
252
|
+
|
253
|
+
director:
|
254
|
+
address: 127.0.0.1
|
255
|
+
name: micro-google
|
256
|
+
db: *db
|
257
|
+
cpi_job: google_cpi
|
258
|
+
user_management:
|
259
|
+
provider: local
|
260
|
+
local:
|
261
|
+
users:
|
262
|
+
- name: admin
|
263
|
+
password: admin
|
264
|
+
- name: hm
|
265
|
+
password: hm-password
|
266
|
+
hm:
|
267
|
+
director_account:
|
268
|
+
user: hm
|
269
|
+
password: hm-password
|
270
|
+
resurrector_enabled: true
|
271
|
+
|
272
|
+
google: &google_properties
|
273
|
+
project: <%=project_id %>
|
274
|
+
|
275
|
+
agent:
|
276
|
+
mbus: nats://nats:nats-password@10.0.0.6:4222
|
277
|
+
ntp: *ntp
|
278
|
+
blobstore:
|
279
|
+
options:
|
280
|
+
endpoint: http://10.0.0.6:25250
|
281
|
+
user: agent
|
282
|
+
password: agent-password
|
283
|
+
|
284
|
+
ntp: &ntp
|
285
|
+
- 169.254.169.254
|
286
|
+
|
287
|
+
cloud_provider:
|
288
|
+
template:
|
289
|
+
name: google_cpi
|
290
|
+
release: bosh-google-cpi
|
291
|
+
|
292
|
+
ssh_tunnel:
|
293
|
+
host: 10.0.0.6
|
294
|
+
port: 22
|
295
|
+
user: bosh
|
296
|
+
private_key: <%=ssh_key_path %>
|
297
|
+
|
298
|
+
mbus: https://mbus:mbus-password@10.0.0.6:6868
|
299
|
+
|
300
|
+
properties:
|
301
|
+
google: *google_properties
|
302
|
+
agent: {mbus: "https://mbus:mbus-password@0.0.0.0:6868"}
|
303
|
+
blobstore: {provider: local, path: /var/vcap/micro_bosh/data/cache}
|
304
|
+
ntp: *ntp
|
305
|
+
```
|
306
|
+
|
307
|
+
1. Fill in the template values of the manifest with your environment variables:
|
308
|
+
```
|
309
|
+
erb manifest.yml.erb > manifest.yml
|
310
|
+
```
|
311
|
+
|
312
|
+
1. Deploy the new manifest to create a BOSH Director:
|
313
|
+
|
314
|
+
```
|
315
|
+
bosh-init deploy manifest.yml
|
316
|
+
```
|
317
|
+
|
318
|
+
1. Target your BOSH environment:
|
319
|
+
|
320
|
+
```
|
321
|
+
bosh target 10.0.0.6
|
322
|
+
```
|
323
|
+
|
324
|
+
Your username is `admin` and password is `admin`.
|
325
|
+
|
326
|
+
### Deploy Concourse
|
327
|
+
Complete the following steps from your bastion instance.
|
328
|
+
|
329
|
+
1. Upload the required [Google BOSH Stemcell](http://bosh.io/docs/stemcell.html):
|
330
|
+
|
331
|
+
```
|
332
|
+
bosh upload stemcell https://bosh.io/d/stemcells/bosh-google-kvm-ubuntu-trusty-go_agent?v=3263.8
|
333
|
+
```
|
334
|
+
|
335
|
+
1. Upload the required [BOSH Releases](http://bosh.io/docs/release.html):
|
336
|
+
|
337
|
+
```
|
338
|
+
bosh upload release https://bosh.io/d/github.com/concourse/concourse?v=2.5.0
|
339
|
+
bosh upload release https://bosh.io/d/github.com/cloudfoundry/garden-runc-release?v=1.0.3
|
340
|
+
```
|
341
|
+
|
342
|
+
1. Download the [cloud-config.yml](cloud-config.yml) manifest file.
|
343
|
+
|
344
|
+
1. Download the [concourse.yml](concourse.yml) manifest file and set a few environment variables:
|
345
|
+
|
346
|
+
```
|
347
|
+
export external_ip=`gcloud compute addresses describe concourse | grep ^address: | cut -f2 -d' '`
|
348
|
+
export director_uuid=`bosh status --uuid 2>/dev/null`
|
349
|
+
```
|
350
|
+
|
351
|
+
1. Chose unique passwords for internal services and ATC and export them
|
352
|
+
```
|
353
|
+
export common_password=
|
354
|
+
export atc_password=
|
355
|
+
```
|
356
|
+
|
357
|
+
1. (Optional) Enable https support for concourse atc
|
358
|
+
|
359
|
+
In `concourse.yml` under the atc properties block fill in the following fields:
|
360
|
+
```
|
361
|
+
tls_bind_port: 443
|
362
|
+
tls_cert: << SSL Cert for HTTPS >>
|
363
|
+
tls_key: << SSL Private Key >>
|
364
|
+
```
|
365
|
+
|
366
|
+
1. Upload the cloud config:
|
367
|
+
|
368
|
+
```
|
369
|
+
bosh update cloud-config cloud-config.yml
|
370
|
+
```
|
371
|
+
|
372
|
+
1. Target the deployment file and deploy:
|
373
|
+
|
374
|
+
```
|
375
|
+
bosh deployment concourse.yml
|
376
|
+
bosh deploy
|
377
|
+
```
|
@@ -0,0 +1,66 @@
|
|
1
|
+
<%
|
2
|
+
['zone', 'region'].each do |val|
|
3
|
+
if @values['gcp'][val].to_s.empty?
|
4
|
+
raise "Missing environment variable: #{val}"
|
5
|
+
end
|
6
|
+
end
|
7
|
+
|
8
|
+
zone_1 = @values['gcp']['zone']
|
9
|
+
region = @values['gcp']['region']
|
10
|
+
%>
|
11
|
+
|
12
|
+
azs:
|
13
|
+
- name: z1
|
14
|
+
cloud_properties:
|
15
|
+
zone: <%=zone_1 %>
|
16
|
+
|
17
|
+
vm_types:
|
18
|
+
- name: common
|
19
|
+
cloud_properties:
|
20
|
+
machine_type: n1-standard-2
|
21
|
+
root_disk_size_gb: 20
|
22
|
+
root_disk_type: pd-ssd
|
23
|
+
|
24
|
+
- name: worker
|
25
|
+
cloud_properties:
|
26
|
+
machine_type: n1-standard-4
|
27
|
+
root_disk_size_gb: 100
|
28
|
+
root_disk_type: pd-ssd
|
29
|
+
|
30
|
+
vm_extensions:
|
31
|
+
- name: concourse-lb
|
32
|
+
cloud_properties:
|
33
|
+
target_pool: concourse-target-pool
|
34
|
+
|
35
|
+
compilation:
|
36
|
+
workers: 2
|
37
|
+
network: public
|
38
|
+
reuse_compilation_vms: true
|
39
|
+
az: z1
|
40
|
+
cloud_properties:
|
41
|
+
machine_type: n1-standard-4
|
42
|
+
root_disk_size_gb: 100
|
43
|
+
root_disk_type: pd-ssd
|
44
|
+
preemptible: true
|
45
|
+
|
46
|
+
networks:
|
47
|
+
- name: public
|
48
|
+
type: manual
|
49
|
+
subnets:
|
50
|
+
- az: z1
|
51
|
+
range: 10.150.0.0/24
|
52
|
+
gateway: 10.150.0.1
|
53
|
+
cloud_properties:
|
54
|
+
network_name: bosh
|
55
|
+
subnetwork_name: concourse-public-<%=region %>-1
|
56
|
+
ephemeral_external_ip: true
|
57
|
+
tags:
|
58
|
+
- concourse-public
|
59
|
+
- concourse-internal
|
60
|
+
|
61
|
+
- name: vip
|
62
|
+
type: vip
|
63
|
+
|
64
|
+
disk_types:
|
65
|
+
- name: database
|
66
|
+
disk_size: 10240
|