bosh-gen 0.20.1 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. checksums.yaml +4 -4
  2. data/ChangeLog.md +7 -1
  3. data/README.md +76 -19
  4. data/lib/bosh/gen/generators/new_release_generator.rb +2 -1
  5. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/{%project_name_hyphenated%/templates/config/.gitkeep → just_install_packages/monit.tt} +0 -0
  6. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/just_install_packages/spec.tt +6 -0
  7. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/just_install_packages/templates/ignoreme +0 -0
  8. data/lib/bosh/gen/generators/new_release_generator/templates/templates/deployment.yml.tt +9 -14
  9. data/lib/bosh/gen/generators/new_release_generator/templates/templates/infrastructure-aws-ec2.yml.tt +6 -15
  10. data/lib/bosh/gen/generators/new_release_generator/templates/templates/infrastructure-warden.yml.tt +6 -76
  11. data/lib/bosh/gen/generators/new_release_generator/templates/templates/jobs.yml.tt +5 -19
  12. data/lib/bosh/gen/generators/new_release_generator/templates/templates/make_manifest.tt +5 -1
  13. data/lib/bosh/gen/version.rb +1 -1
  14. data/spec/generators/new_release_generator_spec.rb +2 -0
  15. metadata +6 -12
  16. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/monit.tt +0 -5
  17. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/spec.tt +0 -13
  18. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/bin/%job_name%_ctl.tt +0 -36
  19. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/bin/monit_debugger +0 -13
  20. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/config/%job_name%.conf.erb.tt +0 -5
  21. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/data/properties.sh.erb +0 -16
  22. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/helpers/ctl_setup.sh +0 -81
  23. data/lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/helpers/ctl_utils.sh +0 -156
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 84853d3716260cd3a1740c5c633148f7c7f642f7
4
- data.tar.gz: 621964bd09838822cc1de26aa4d79fa5b048380e
3
+ metadata.gz: 5e6e0c6cb1f7f7e80dee18ae57909755becabdca
4
+ data.tar.gz: a0ca46233c2e00e297698ce92bacd1ca69e1199c
5
5
  SHA512:
6
- metadata.gz: 9f90eb0c40643cffcdc1c34efd6b517488a2f7390e28aeffe5c5ec6a835332cd4ce1a73582ce427659268287ce24d19e40fbc9f5dbe51efb84a9aa5a9a2165eb
7
- data.tar.gz: 93b53fee3e0dc2d40be857adbee4e239bea705844ba3012a77d3903da680dbef48efefdde36eb1745af4b809c9a2ba843e00f7a12f7902348af35b66d57d6a4c
6
+ metadata.gz: 5a987ad0f2fad0ee08ff94d84ac5b23c228d3d6299162eb09e95939c313ae14ebbe53f346f8253569117f6ff06646cc63ce286a94349b2478fa97e8e769b04e2
7
+ data.tar.gz: 4d130b7944ac886292ec1bdccf337d3bce55c97ca66ac1d2e9c1d3fe0e4b8ace22597bf76610316f024cfd545a9b0e6a437f6ec13a0cc902f59635dda2acc8e7
data/ChangeLog.md CHANGED
@@ -1,6 +1,12 @@
1
1
  Change Log
2
2
  ==========
3
3
 
4
+ v0.21.0
5
+
6
+ - `new` assumes using [spruce](https://github.com/geofffranks/spruce) instead of spiff. Yay!
7
+ - `new` no longer creates a master-slave job template
8
+ - `new` now creates a wrapper job `just_install_packages` that is useful for initial development of packages prior to creating new job templates with `bosh-gen job`.
9
+
4
10
  v0.20.0
5
11
  -------
6
12
 
@@ -13,7 +19,7 @@ Improved generators:
13
19
  - `new` - spiff templates include modern `templates:` array of `{name: job, release: release}`
14
20
  - `new` - added an Apache `LICENSE.md` file which will be included in final releases
15
21
  - `new --apt` - Vagrant image upgraded to trusty to match trusty stemcells [thx @cyrille-leclerc]
16
- - `new` - describes how to use release via https://bosh.io; how developers share with https://bosh.io [v0.21.1]
22
+ - `new` - describes how to use release via https://bosh.io; how developers share with https://bosh.io [v0.20.1]
17
23
 
18
24
  Removed generators:
19
25
 
data/README.md CHANGED
@@ -3,6 +3,8 @@ BOSH Generators
3
3
 
4
4
  Generators for creating and sharing BOSH releases.
5
5
 
6
+ New in 0.20: Create packages from embedded Docker images
7
+
6
8
  New in 0.17: Creates blobstore/bucket when creating new release. AWS bucket is publicly readable.
7
9
 
8
10
  If you would like to share your BOSH release with the world, you can use the [BOSH Community AWS S3 account](#share-bosh-releases).
@@ -22,12 +24,13 @@ Usage
22
24
  -----
23
25
 
24
26
  ```
25
- $ bosh-gen new my-project
27
+ $ bosh-gen new $(whoami)-project
26
28
  create
27
29
  Auto-detected infrastructure API credentials at ~/.fog (override with $FOG)
28
30
  1. AWS (community)
29
31
  2. Alternate credentials
30
32
  Choose an auto-detected infrastructure: 2
33
+ Choose AWS region: 1
31
34
 
32
35
  create README.md
33
36
  create Rakefile
@@ -57,33 +60,26 @@ $ cd ./my-project-boshrelease
57
60
  ```
58
61
 
59
62
  ```
60
- $ wget -P /tmp http://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.3-p194.tar.gz
61
- $ bosh-gen package ruby -f /tmp/ruby-1.9.3-p194.tar.gz
63
+ $ wget -P /tmp http://ftp.ruby-lang.org/pub/ruby/2.2/ruby-2.2.2.tar.gz
64
+ $ bosh-gen package ruby -f /tmp/ruby-2.2.2.tar.gz
62
65
 
63
66
  $ bosh-gen job some-ruby-job -d ruby
64
67
 
65
- $ git add .
66
- $ git commit -m "added a job + 3 packages"
67
-
68
- $ bosh create release
68
+ $ bosh create release --force
69
69
  ```
70
70
 
71
- It is not ideal to include large source files, such as the 10Mb ruby tarball, in your git repository. Rather, use the blobstore for those:
71
+ To test out each iteration of your release, you can create a manifest, upload your release, and deploy it:
72
72
 
73
73
  ```
74
- $ rm -rf src/ruby/ruby-1.9.3-p194.tar.gz
75
- $ bosh add blob /tmp/ruby-1.9.3-p194.tar.gz ruby
76
- $ bosh upload blobs
77
-
78
- $ bosh create release
74
+ ./templates/make_manifest warden
75
+ bosh upload release
76
+ bosh -n deploy
79
77
  ```
80
78
 
81
- Your job may need additional configuration files or executables installed.
79
+ The large ruby tarball is automatically placed in the `blobs/` folder. Before you share your boshrelease with other developers you will want to sync it to your blobstore (the S3 bucket created via `bosh-gen new`\):
82
80
 
83
81
  ```
84
- $ bosh-gen template some-ruby-job config/some-config.ini
85
- create jobs/some-ruby-job/templates/some-config.ini.erb
86
- force jobs/some-ruby-job/spec
82
+ $ bosh upload blobs
87
83
  ```
88
84
 
89
85
  Quickly creating packages
@@ -95,19 +91,25 @@ There is a slow way to create a package, and there are three faster ways. Slow v
95
91
 
96
92
  ```
97
93
  $ bosh-gen package apache2
94
+ create packages/apache2/packaging
95
+ create packages/apache2/spec
98
96
  ```
99
97
 
100
98
  The slowest way to create a package is to run the command above, then get the source, read the "install from source" instructions, and create a package.
101
99
 
102
100
  ### Slightly faster way
103
101
 
102
+ As above, when we created the `ruby` package we included a pre-downloaded asset:
103
+
104
104
  ```
105
- $ bosh-gen package redis -f ~/Downloads/redis-2.8.3.tar.gz
105
+ $ bosh-gen package ruby -f /tmp/ruby-2.2.2.tar.gz
106
106
  ```
107
107
 
108
108
  If you download the source files first, and reference them with the `bosh-gen package` generator, then it will attempt to guess how to install the package. The generated `packaging` script will include some starting commands that might work.
109
109
 
110
- The command above will also copy the target file into the `blobs/redis/` folder. One less thing for you to do.
110
+ The command above will also copy the target file into the `blobs/ruby/` folder. One less thing for you to do.
111
+
112
+ You still need to look up "how to install from source" instructions and put them in `packages/ruby/packaging` script.
111
113
 
112
114
  ### Fastest way - reuse existing packages
113
115
 
@@ -123,6 +125,61 @@ This is a great command to use. There are a growing number of BOSH releases on G
123
125
 
124
126
  Remember, first run `bosh sync blobs` in the target BOSH release project. Otherwise it will not be able to copy over the blobs.
125
127
 
128
+ ### Fast way - embedded Docker images
129
+
130
+ This use case assumes you have `docker` CLI installed and access to a Docker daemon.
131
+
132
+ It will also make your BOSH release dependent upon the [cf-platform-eng/docker-boshrelease](https://bosh.io/d/github.com/cf-platform-eng/docker-boshrelease) release which installs the Docker daemon on VMs; and offers a simple way to run Docker containers if required.
133
+
134
+ ```
135
+ $ bosh-gen package tmate --docker-image nicopace/tmate-docker
136
+ exist jobs
137
+ create jobs/nicopace_tmate_docker_image/monit
138
+ create jobs/nicopace_tmate_docker_image/spec
139
+ create jobs/nicopace_tmate_docker_image/templates/bin/install_ctl
140
+ create jobs/nicopace_tmate_docker_image/templates/bin/monit_debugger
141
+ create jobs/nicopace_tmate_docker_image/templates/helpers/ctl_setup.sh
142
+ create jobs/nicopace_tmate_docker_image/templates/helpers/ctl_utils.sh
143
+ exist packages
144
+ create packages/tmate/packaging
145
+ create packages/tmate/spec
146
+ docker pull nicopace/tmate-docker
147
+ Pulling repository nicopace/tmate-docker
148
+ 7b9df453c66b: Download complete
149
+ ...
150
+ 6df853718c80: Download complete
151
+ Status: Image is up to date for nicopace/tmate-docker:latest
152
+ docker save nicopace/tmate-docker > blobs/docker-images/nicopace_tmate_docker.tgz
153
+
154
+ $ bosh create release --force
155
+ ...
156
+ Release name: tmate-server
157
+ Release version: 0+dev.1
158
+ ```
159
+
160
+ The `package --docker-image` flag will display the next steps help as well:
161
+
162
+ ```
163
+ Next steps:
164
+ 1. To use this BOSH release, first upload it and the docker release to your BOSH:
165
+ bosh upload release https://bosh.io/releases/cloudfoundry-community/consul-docker
166
+ bosh upload release https://bosh.io/d/github.com/cf-platform-eng/docker-boshrelease
167
+
168
+ 2. To use the docker image, your deployment job needs to start with the following:
169
+
170
+ jobs:
171
+ - name: some_job
172
+ templates:
173
+ # run docker daemon
174
+ - {name: docker, release: docker}
175
+ # warm docker image cache from bosh package
176
+ - {name: nicopace_tmate_docker_image, release: tmate-server}
177
+
178
+ 3. To simply run a single container, try the 'containers' job from 'docker' release
179
+
180
+ https://github.com/cloudfoundry-community/consul-docker-boshrelease/blob/master/templates/jobs.yml#L18-L40
181
+ ```
182
+
126
183
  ### Fast way - reuse Aptitude/Debian packages
127
184
 
128
185
  ```
@@ -120,6 +120,7 @@ module Bosh::Gen
120
120
  }
121
121
  }
122
122
  end
123
+ config_final["final_name"] = project_name
123
124
 
124
125
  create_file "config/final.yml", YAML.dump(config_final)
125
126
  end
@@ -129,7 +130,7 @@ module Bosh::Gen
129
130
  config/dev.yml
130
131
  config/private.yml
131
132
  config/settings.yml
132
- releases/*.tgz
133
+ releases/**/*.tgz
133
134
  dev_releases
134
135
  blobs/*
135
136
  .blobs
@@ -0,0 +1,6 @@
1
+ ---
2
+ name: just_install_packages
3
+ packages: []
4
+ templates:
5
+ ignoreme: ignoreme
6
+ properties: {}
@@ -1,24 +1,20 @@
1
1
  meta:
2
- environment: (( merge ))
3
- stemcell: (( merge ))
2
+ environment: (( params "please set environment" ))
3
+ stemcell: (( params "please set stemcell" ))
4
4
 
5
- name: (( meta.environment ))
5
+ name: (( grab meta.environment ))
6
6
 
7
- director_uuid: (( merge ))
7
+ director_uuid: (( params "please set director_uuid" ))
8
8
 
9
- releases: (( merge ))
9
+ releases: (( params "please set release" ))
10
10
 
11
- networks: (( merge ))
12
-
13
- jobs: (( merge ))
14
-
15
- properties: (( merge ))
11
+ jobs: (( params "please set jobs" ))
16
12
 
17
13
  compilation:
18
14
  workers: 6
19
15
  network: <%= project_name_underscored %>1
20
16
  reuse_compilation_vms: true
21
- cloud_properties: (( merge ))
17
+ cloud_properties: (( params "please set compilation cloud properties" ))
22
18
 
23
19
  update:
24
20
  canaries: 1
@@ -30,6 +26,5 @@ update:
30
26
  resource_pools:
31
27
  - name: small_z1
32
28
  network: <%= project_name_underscored %>1
33
- size: (( auto ))
34
- stemcell: (( meta.stemcell ))
35
- cloud_properties: (( merge ))
29
+ stemcell: (( grab meta.stemcell ))
30
+ cloud_properties: (( params "please set resource_pool cloud properties" ))
@@ -1,7 +1,7 @@
1
1
  meta:
2
- environment: (( merge ))
3
- dns_root: (( merge ))
4
- security_groups: (( merge ))
2
+ environment: (( params "please set meta environment" ))
3
+ dns_root: (( params "please set meta dns_root" ))
4
+ security_groups: (( params "please set meta security_groups" ))
5
5
  persistent_disk: 4096
6
6
 
7
7
  stemcell:
@@ -9,19 +9,10 @@ meta:
9
9
  version: latest
10
10
 
11
11
  jobs:
12
- - name: <%= project_name_underscored %>_leader_z1
13
- instances: 1
12
+ - name: just_install_packages
14
13
  networks:
15
14
  - name: <%= project_name_underscored %>1
16
- persistent_disk: (( meta.persistent_disk ))
17
- - name: <%= project_name_underscored %>_z1
18
- instances: 2
19
- networks:
20
- - name: <%= project_name_underscored %>1
21
- persistent_disk: (( meta.persistent_disk ))
22
- properties:
23
- <%= project_name_underscored %>:
24
- leader_address: (( "0.<%= project_name_hyphenated %>-leader-z1.<%= project_name_hyphenated %>1." meta.environment "." meta.dns_root ))
15
+ persistent_disk: (( grab meta.persistent_disk ))
25
16
 
26
17
  compilation:
27
18
  cloud_properties:
@@ -39,4 +30,4 @@ networks:
39
30
  - name: <%= project_name_underscored %>1
40
31
  type: dynamic
41
32
  cloud_properties:
42
- security_groups: (( meta.security_groups ))
33
+ security_groups: (( grab meta.security_groups ))
@@ -12,19 +12,9 @@ update:
12
12
  update_watch_time: 1000-30000
13
13
 
14
14
  jobs:
15
- - name: <%= project_name_underscored %>_leader_z1
16
- instances: 1
15
+ - name: just_install_packages
17
16
  networks:
18
17
  - name: <%= project_name_underscored %>1
19
- static_ips: (( static_ips(0) ))
20
- - name: <%= project_name_underscored %>_z1
21
- instances: 2
22
- networks:
23
- - name: <%= project_name_underscored %>1
24
- static_ips: ~
25
- properties:
26
- <%= project_name_underscored %>:
27
- leader_address: (( jobs.<%= project_name_underscored %>_leader_z1.networks.<%= project_name_underscored %>1.static_ips.[0] ))
28
18
 
29
19
  compilation:
30
20
  cloud_properties:
@@ -37,70 +27,10 @@ resource_pools:
37
27
 
38
28
  networks:
39
29
  - name: <%= project_name_underscored %>1
40
- # Assumes up to 5 VMs, including 1 static and 4 dynamic.
41
- # Plus 5 (double the size) unused IPs, due to BOSH bug/quirk.
30
+ type: manual
42
31
  subnets:
43
- - cloud_properties:
44
- name: random
45
- range: 10.244.2.0/30
46
- reserved:
47
- - 10.244.2.1
32
+ - range: 10.244.2.0/24
33
+ name: <%= project_name_underscored %>1
34
+ gateway: 10.244.2.1
48
35
  static:
49
- - 10.244.2.2
50
-
51
- - cloud_properties:
52
- name: random
53
- range: 10.244.2.4/30
54
- reserved:
55
- - 10.244.2.5
56
- static: []
57
- - cloud_properties:
58
- name: random
59
- range: 10.244.2.8/30
60
- reserved:
61
- - 10.244.2.9
62
- static: []
63
- - cloud_properties:
64
- name: random
65
- range: 10.244.2.12/30
66
- reserved:
67
- - 10.244.2.13
68
- static: []
69
- - cloud_properties:
70
- name: random
71
- range: 10.244.2.16/30
72
- reserved:
73
- - 10.244.2.17
74
- static: []
75
-
76
- # Bonus double-sized network required due to BOSH oddity
77
- - cloud_properties:
78
- name: random
79
- range: 10.244.2.20/30
80
- reserved:
81
- - 10.244.2.21
82
- static: []
83
- - cloud_properties:
84
- name: random
85
- range: 10.244.2.24/30
86
- reserved:
87
- - 10.244.2.25
88
- static: []
89
- - cloud_properties:
90
- name: random
91
- range: 10.244.2.28/30
92
- reserved:
93
- - 10.244.2.29
94
- static: []
95
- - cloud_properties:
96
- name: random
97
- range: 10.244.2.32/30
98
- reserved:
99
- - 10.244.2.33
100
- static: []
101
- - cloud_properties:
102
- name: random
103
- range: 10.244.2.36/30
104
- reserved:
105
- - 10.244.2.37
106
- static: []
36
+ - 10.244.2.2-10.244.2.60
@@ -9,29 +9,15 @@ update:
9
9
  serial: false
10
10
 
11
11
  jobs:
12
- - name: <%= project_name_underscored %>_leader_z1
12
+ - name: just_install_packages
13
13
  templates:
14
- - name: <%= project_name_hyphenated %>
14
+ - name: just_install_packages
15
15
  release: <%= project_name_hyphenated %>
16
- instances: 0
17
- resource_pool: small_z1
18
- networks: (( merge ))
16
+ instances: 1
19
17
  persistent_disk: 0
20
- properties: {}
21
- - name: <%= project_name_underscored %>_z1
22
- templates:
23
- - name: <%= project_name_hyphenated %>
24
- release: <%= project_name_hyphenated %>
25
- instances: 0
26
18
  resource_pool: small_z1
27
- networks: (( merge ))
28
- persistent_disk: 0
29
- update:
30
- canaries: 10
31
- properties:
32
- <%= project_name_underscored %>:
33
- leader_address: (( merge ))
19
+ networks: (( params "please set just_install_package networks" ))
34
20
 
35
- networks: (( merge ))
21
+ networks: (( params "please set networks" ))
36
22
 
37
23
  properties: {}
@@ -59,7 +59,11 @@ perl -pi -e "s/PLACEHOLDER-DIRECTOR-UUID/$DIRECTOR_UUID/g" $tmpdir/stub-with-uui
59
59
  perl -pi -e "s/NAME/$NAME/g" $tmpdir/stub-with-uuid.yml
60
60
  perl -pi -e "s/STEMCELL/$STEMCELL/g" $tmpdir/stub-with-uuid.yml
61
61
 
62
- spiff merge \
62
+ if ! [ -x "$(command -v spruce)" ]; then
63
+ echo 'spruce is not installed. Please download at https://github.com/geofffranks/spruce/releases' >&2
64
+ fi
65
+
66
+ spruce merge --prune meta \
63
67
  $templates/deployment.yml \
64
68
  $templates/jobs.yml \
65
69
  $templates/infrastructure-${infrastructure}.yml \
@@ -1,5 +1,5 @@
1
1
  module Bosh
2
2
  module Gen
3
- VERSION = "0.20.1"
3
+ VERSION = "0.21.0"
4
4
  end
5
5
  end
@@ -39,6 +39,7 @@ describe Bosh::Gen::Generators::NewReleaseGenerator do
39
39
 
40
40
  config = YAML.load_file("config/final.yml")
41
41
  expect(config).to_not be_nil
42
+ expect(config["final_name"]).to eq("redis")
42
43
  expect(config["blobstore"]).to_not be_nil
43
44
  expect(config["blobstore"]["options"]).to_not be_nil
44
45
  expect(config["blobstore"]["provider"]).to eq("s3")
@@ -71,6 +72,7 @@ describe Bosh::Gen::Generators::NewReleaseGenerator do
71
72
 
72
73
  config = YAML.load_file("config/final.yml")
73
74
  expect(config).to_not be_nil
75
+ expect(config["final_name"]).to eq("redis")
74
76
  expect(config["blobstore"]).to_not be_nil
75
77
  expect(config["blobstore"]["options"]).to_not be_nil
76
78
  expect(config["blobstore"]["provider"]).to eq("swift")
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: bosh-gen
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.20.1
4
+ version: 0.21.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dr Nic Williams
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-04-28 00:00:00.000000000 Z
11
+ date: 2015-10-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -234,16 +234,10 @@ files:
234
234
  - lib/bosh/gen/generators/new_release_generator/templates/README.md.tt
235
235
  - lib/bosh/gen/generators/new_release_generator/templates/Rakefile
236
236
  - lib/bosh/gen/generators/new_release_generator/templates/blobs/.gitkeep
237
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/monit.tt
238
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/spec.tt
239
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/bin/%job_name%_ctl.tt
240
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/bin/monit_debugger
241
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/config/%job_name%.conf.erb.tt
242
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/config/.gitkeep
243
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/data/properties.sh.erb
244
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/helpers/ctl_setup.sh
245
- - lib/bosh/gen/generators/new_release_generator/templates/jobs/%project_name_hyphenated%/templates/helpers/ctl_utils.sh
246
237
  - lib/bosh/gen/generators/new_release_generator/templates/jobs/.gitkeep
238
+ - lib/bosh/gen/generators/new_release_generator/templates/jobs/just_install_packages/monit.tt
239
+ - lib/bosh/gen/generators/new_release_generator/templates/jobs/just_install_packages/spec.tt
240
+ - lib/bosh/gen/generators/new_release_generator/templates/jobs/just_install_packages/templates/ignoreme
247
241
  - lib/bosh/gen/generators/new_release_generator/templates/packages/.gitkeep
248
242
  - lib/bosh/gen/generators/new_release_generator/templates/src/.gitkeep
249
243
  - lib/bosh/gen/generators/new_release_generator/templates/templates/deployment.yml.tt
@@ -370,7 +364,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
370
364
  version: '0'
371
365
  requirements: []
372
366
  rubyforge_project:
373
- rubygems_version: 2.4.3
367
+ rubygems_version: 2.4.6
374
368
  signing_key:
375
369
  specification_version: 4
376
370
  summary: ''
@@ -1,5 +0,0 @@
1
- check process <%= job_name %>
2
- with pidfile /var/vcap/sys/run/<%= job_name %>/<%= job_name %>.pid
3
- start program "/var/vcap/jobs/<%= job_name %>/bin/monit_debugger <%= job_name %>_ctl '/var/vcap/jobs/<%= job_name %>/bin/<%= job_name %>_ctl start'"
4
- stop program "/var/vcap/jobs/<%= job_name %>/bin/monit_debugger <%= job_name %>_ctl '/var/vcap/jobs/<%= job_name %>/bin/<%= job_name %>_ctl stop'"
5
- group vcap
@@ -1,13 +0,0 @@
1
- ---
2
- name: <%= project_name_hyphenated %>
3
- packages: []
4
- templates:
5
- bin/monit_debugger: bin/monit_debugger
6
- bin/<%= job_name %>_ctl: bin/<%= job_name %>_ctl
7
- config/<%= job_name %>.conf.erb: config/<%= job_name %>.conf
8
- data/properties.sh.erb: data/properties.sh
9
- helpers/ctl_setup.sh: helpers/ctl_setup.sh
10
- helpers/ctl_utils.sh: helpers/ctl_utils.sh
11
- properties:
12
- <%= project_name_underscored %>.leader_address:
13
- description: Hostname/IP to the leader/master of the cluster
@@ -1,36 +0,0 @@
1
- #!/bin/bash
2
-
3
- set -e # exit immediately if a simple command exits with a non-zero status
4
- set -u # report the usage of uninitialized variables
5
-
6
- # Setup env vars and folders for the webapp_ctl script
7
- source /var/vcap/jobs/<%= job_name %>/helpers/ctl_setup.sh '<%= job_name %>'
8
-
9
- export PORT=${PORT:-5000}
10
- export LANG=en_US.UTF-8
11
-
12
- case $1 in
13
-
14
- start)
15
- pid_guard $PIDFILE $JOB_NAME
16
-
17
- # store pid in $PIDFILE
18
- echo $$ > $PIDFILE
19
-
20
- exec chpst -u vcap:vcap <%= job_name %> \
21
- >>$LOG_DIR/$JOB_NAME.stdout.log \
22
- 2>>$LOG_DIR/$JOB_NAME.stderr.log
23
-
24
- ;;
25
-
26
- stop)
27
- kill_and_wait $PIDFILE
28
-
29
- ;;
30
- *)
31
- echo "Usage: <%= job_name %>_ctl {start|stop}"
32
-
33
- ;;
34
-
35
- esac
36
- exit 0
@@ -1,13 +0,0 @@
1
- #!/bin/sh
2
- # USAGE monit_debugger <label> command to run
3
- mkdir -p /var/vcap/sys/log/monit
4
- {
5
- echo "MONIT-DEBUG date"
6
- date
7
- echo "MONIT-DEBUG env"
8
- env
9
- echo "MONIT-DEBUG $@"
10
- $2 $3 $4 $5 $6 $7
11
- R=$?
12
- echo "MONIT-DEBUG exit code $R"
13
- } >/var/vcap/sys/log/monit/monit_debugger.$1.log 2>&1
@@ -1,5 +0,0 @@
1
- # Example of how to do one thing when job is master/leader, and another thing if its follower/slave
2
- # See templates/infrastructure-warden.yml & templates/jobs.yml for where this value originates
3
- <%% if_p("<%= project_name_underscored %>.leader_address") do |leader_address| %>
4
- leader: <%%= leader_address %>
5
- <%% end %>
@@ -1,16 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # job template binding variables
4
-
5
- # job name & index of this VM within cluster
6
- # e.g. JOB_NAME=redis, JOB_INDEX=0
7
- export NAME='<%= name %>'
8
- export JOB_INDEX=<%= index %>
9
- # full job name, like redis/0 or webapp/3
10
- export JOB_FULL="$NAME/$JOB_INDEX"
11
-
12
- export DEPLOYMENT_NAME=<%= spec.deployment %>
13
- export DNS_ROOT=<%= spec.dns_domain_name %>
14
-
15
- # $BIND_ADDR is the IP of the first network
16
- export BIND_ADDR=<%= spec.networks.send(spec.networks.methods(false).first).ip %>
@@ -1,81 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # Setup env vars and folders for the ctl script
4
- # This helps keep the ctl script as readable
5
- # as possible
6
-
7
- # Usage options:
8
- # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh JOB_NAME OUTPUT_LABEL
9
- # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar
10
- # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar foobar
11
- # source /var/vcap/jobs/foobar/helpers/ctl_setup.sh foobar nginx
12
-
13
- set -e # exit immediately if a simple command exits with a non-zero status
14
- set -u # report the usage of uninitialized variables
15
-
16
- JOB_NAME=$1
17
- output_label=${1:-JOB_NAME}
18
-
19
- export JOB_DIR=/var/vcap/jobs/$JOB_NAME
20
- chmod 755 $JOB_DIR # to access file via symlink
21
-
22
- # Load some bosh deployment properties into env vars
23
- # Try to put all ERb into data/properties.sh.erb
24
- # incl $NAME, $JOB_INDEX, $WEBAPP_DIR
25
- source $JOB_DIR/data/properties.sh
26
-
27
- source $JOB_DIR/helpers/ctl_utils.sh
28
- redirect_output ${output_label}
29
-
30
- export HOME=${HOME:-/home/vcap}
31
-
32
- # Add all packages' /bin & /sbin into $PATH
33
- for package_bin_dir in $(ls -d /var/vcap/packages/*/*bin)
34
- do
35
- export PATH=${package_bin_dir}:$PATH
36
- done
37
-
38
- export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-''} # default to empty
39
- for package_bin_dir in $(ls -d /var/vcap/packages/*/lib)
40
- do
41
- export LD_LIBRARY_PATH=${package_bin_dir}:$LD_LIBRARY_PATH
42
- done
43
-
44
- # Setup log, run and tmp folders
45
-
46
- export RUN_DIR=/var/vcap/sys/run/$JOB_NAME
47
- export LOG_DIR=/var/vcap/sys/log/$JOB_NAME
48
- export TMP_DIR=/var/vcap/sys/tmp/$JOB_NAME
49
- export STORE_DIR=/var/vcap/store/$JOB_NAME
50
- for dir in $RUN_DIR $LOG_DIR $TMP_DIR $STORE_DIR
51
- do
52
- mkdir -p ${dir}
53
- chown vcap:vcap ${dir}
54
- chmod 775 ${dir}
55
- done
56
- export TMPDIR=$TMP_DIR
57
-
58
- export C_INCLUDE_PATH=/var/vcap/packages/mysqlclient/include/mysql:/var/vcap/packages/sqlite/include:/var/vcap/packages/libpq/include
59
- export LIBRARY_PATH=/var/vcap/packages/mysqlclient/lib/mysql:/var/vcap/packages/sqlite/lib:/var/vcap/packages/libpq/lib
60
-
61
- # consistent place for vendoring python libraries within package
62
- if [[ -d ${WEBAPP_DIR:-/xxxx} ]]
63
- then
64
- export PYTHONPATH=$WEBAPP_DIR/vendor/lib/python
65
- fi
66
-
67
- if [[ -d /var/vcap/packages/java7 ]]
68
- then
69
- export JAVA_HOME="/var/vcap/packages/java7"
70
- fi
71
-
72
- # setup CLASSPATH for all jars/ folders within packages
73
- export CLASSPATH=${CLASSPATH:-''} # default to empty
74
- for java_jar in $(ls -d /var/vcap/packages/*/*/*.jar)
75
- do
76
- export CLASSPATH=${java_jar}:$CLASSPATH
77
- done
78
-
79
- PIDFILE=$RUN_DIR/$JOB_NAME.pid
80
-
81
- echo '$PATH' $PATH
@@ -1,156 +0,0 @@
1
- # Helper functions used by ctl scripts
2
-
3
- # links a job file (probably a config file) into a package
4
- # Example usage:
5
- # link_job_file_to_package config/redis.yml [config/redis.yml]
6
- # link_job_file_to_package config/wp-config.php wp-config.php
7
- link_job_file_to_package() {
8
- source_job_file=$1
9
- target_package_file=${2:-$source_job_file}
10
- full_package_file=$WEBAPP_DIR/${target_package_file}
11
-
12
- link_job_file ${source_job_file} ${full_package_file}
13
- }
14
-
15
- # links a job file (probably a config file) somewhere
16
- # Example usage:
17
- # link_job_file config/bashrc /home/vcap/.bashrc
18
- link_job_file() {
19
- source_job_file=$1
20
- target_file=$2
21
- full_job_file=$JOB_DIR/${source_job_file}
22
-
23
- echo link_job_file ${full_job_file} ${target_file}
24
- if [[ ! -f ${full_job_file} ]]
25
- then
26
- echo "file to link ${full_job_file} does not exist"
27
- else
28
- # Create/recreate the symlink to current job file
29
- # If another process is using the file, it won't be
30
- # deleted, so don't attempt to create the symlink
31
- mkdir -p $(dirname ${target_file})
32
- ln -nfs ${full_job_file} ${target_file}
33
- fi
34
- }
35
-
36
- # If loaded within monit ctl scripts then pipe output
37
- # If loaded from 'source ../utils.sh' then normal STDOUT
38
- redirect_output() {
39
- SCRIPT=$1
40
- mkdir -p /var/vcap/sys/log/monit
41
- exec 1>> /var/vcap/sys/log/monit/$SCRIPT.log
42
- exec 2>> /var/vcap/sys/log/monit/$SCRIPT.err.log
43
- }
44
-
45
- pid_guard() {
46
- pidfile=$1
47
- name=$2
48
-
49
- if [ -f "$pidfile" ]; then
50
- pid=$(head -1 "$pidfile")
51
-
52
- if [ -n "$pid" ] && [ -e /proc/$pid ]; then
53
- echo "$name is already running, please stop it first"
54
- exit 1
55
- fi
56
-
57
- echo "Removing stale pidfile..."
58
- rm $pidfile
59
- fi
60
- }
61
-
62
- wait_pid() {
63
- pid=$1
64
- try_kill=$2
65
- timeout=${3:-0}
66
- force=${4:-0}
67
- countdown=$(( $timeout * 10 ))
68
-
69
- echo wait_pid $pid $try_kill $timeout $force $countdown
70
- if [ -e /proc/$pid ]; then
71
- if [ "$try_kill" = "1" ]; then
72
- echo "Killing $pidfile: $pid "
73
- kill $pid
74
- fi
75
- while [ -e /proc/$pid ]; do
76
- sleep 0.1
77
- [ "$countdown" != '0' -a $(( $countdown % 10 )) = '0' ] && echo -n .
78
- if [ $timeout -gt 0 ]; then
79
- if [ $countdown -eq 0 ]; then
80
- if [ "$force" = "1" ]; then
81
- echo -ne "\nKill timed out, using kill -9 on $pid... "
82
- kill -9 $pid
83
- sleep 0.5
84
- fi
85
- break
86
- else
87
- countdown=$(( $countdown - 1 ))
88
- fi
89
- fi
90
- done
91
- if [ -e /proc/$pid ]; then
92
- echo "Timed Out"
93
- else
94
- echo "Stopped"
95
- fi
96
- else
97
- echo "Process $pid is not running"
98
- echo "Attempting to kill pid anyway..."
99
- kill $pid
100
- fi
101
- }
102
-
103
- wait_pidfile() {
104
- pidfile=$1
105
- try_kill=$2
106
- timeout=${3:-0}
107
- force=${4:-0}
108
- countdown=$(( $timeout * 10 ))
109
-
110
- if [ -f "$pidfile" ]; then
111
- pid=$(head -1 "$pidfile")
112
- if [ -z "$pid" ]; then
113
- echo "Unable to get pid from $pidfile"
114
- exit 1
115
- fi
116
-
117
- wait_pid $pid $try_kill $timeout $force
118
-
119
- rm -f $pidfile
120
- else
121
- echo "Pidfile $pidfile doesn't exist"
122
- fi
123
- }
124
-
125
- kill_and_wait() {
126
- pidfile=$1
127
- # Monit default timeout for start/stop is 30s
128
- # Append 'with timeout {n} seconds' to monit start/stop program configs
129
- timeout=${2:-25}
130
- force=${3:-1}
131
- if [[ -f ${pidfile} ]]
132
- then
133
- wait_pidfile $pidfile 1 $timeout $force
134
- else
135
- # TODO assume $1 is something to grep from 'ps ax'
136
- pid="$(ps auwwx | grep "$1" | awk '{print $2}')"
137
- wait_pid $pid 1 $timeout $force
138
- fi
139
- }
140
-
141
- check_nfs_mount() {
142
- opts=$1
143
- exports=$2
144
- mount_point=$3
145
-
146
- if grep -qs $mount_point /proc/mounts; then
147
- echo "Found NFS mount $mount_point"
148
- else
149
- echo "Mounting NFS..."
150
- mount $opts $exports $mount_point
151
- if [ $? != 0 ]; then
152
- echo "Cannot mount NFS from $exports to $mount_point, exiting..."
153
- exit 1
154
- fi
155
- fi
156
- }