subspace 2.5.10 → 3.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/.ruby-version +1 -1
  3. data/CHANGELOG.md +12 -5
  4. data/README.md +57 -24
  5. data/UPGRADING.md +10 -0
  6. data/ansible/roles/common/defaults/main.yml +0 -1
  7. data/ansible/roles/common/files/sudoers-service +1 -1
  8. data/ansible/roles/common/tasks/main.yml +18 -7
  9. data/ansible/roles/common/tasks/no_swap.yml +26 -0
  10. data/ansible/roles/common/templates/motd +1 -1
  11. data/ansible/roles/common/templates/motd2 +1 -1
  12. data/ansible/roles/delayed_job/tasks/main.yml +1 -1
  13. data/ansible/roles/memcache/defaults/main.yml +2 -0
  14. data/ansible/roles/memcache/tasks/main.yml +16 -1
  15. data/ansible/roles/newrelic-infra/tasks/main.yml +3 -3
  16. data/ansible/roles/nginx/tasks/main.yml +12 -3
  17. data/ansible/roles/puma/tasks/main.yml +32 -20
  18. data/ansible/roles/puma/templates/puma-systemd.service +36 -0
  19. data/ansible/roles/puma/templates/puma-systemd.socket +14 -0
  20. data/ansible/roles/puma/templates/puma.rb +4 -2
  21. data/ansible/roles/rails/defaults/main.yml +0 -7
  22. data/ansible/roles/redis/tasks/main.yml +7 -0
  23. data/ansible/roles/resque/tasks/main.yml +11 -12
  24. data/ansible/roles/resque/templates/resque-systemd.service +10 -3
  25. data/ansible/roles/ruby-common/README.md +1 -1
  26. data/ansible/roles/ruby-common/tasks/main.yml +2 -17
  27. data/ansible/roles/sidekiq/defaults/main.yml +1 -1
  28. data/ansible/roles/sidekiq/tasks/main.yml +11 -15
  29. data/ansible/roles/sidekiq/templates/sidekiq-monit-rc +1 -1
  30. data/ansible/roles/sidekiq/templates/sidekiq-systemd.service +62 -0
  31. data/ansible/roles/tailscale/defaults/main.yml +2 -0
  32. data/ansible/roles/tailscale/tasks/main.yml +22 -0
  33. data/exe/subspace +1 -2
  34. data/lib/subspace/cli.rb +50 -14
  35. data/lib/subspace/commands/ansible.rb +11 -2
  36. data/lib/subspace/commands/base.rb +20 -5
  37. data/lib/subspace/commands/bootstrap.rb +16 -21
  38. data/lib/subspace/commands/configure.rb +2 -2
  39. data/lib/subspace/commands/exec.rb +20 -0
  40. data/lib/subspace/commands/init.rb +94 -45
  41. data/lib/subspace/commands/inventory.rb +45 -0
  42. data/lib/subspace/commands/maintain.rb +1 -1
  43. data/lib/subspace/commands/provision.rb +1 -3
  44. data/lib/subspace/commands/{vars.rb → secrets.rb} +6 -5
  45. data/lib/subspace/commands/ssh.rb +10 -8
  46. data/lib/subspace/commands/terraform.rb +83 -0
  47. data/lib/subspace/inventory.rb +144 -0
  48. data/lib/subspace/version.rb +1 -1
  49. data/subspace.gemspec +8 -2
  50. data/template/{provision → subspace}/.gitignore +3 -0
  51. data/template/{provision → subspace}/ansible.cfg.erb +2 -2
  52. data/template/subspace/group_vars/all.erb +28 -0
  53. data/template/subspace/group_vars/template.erb +26 -0
  54. data/template/subspace/inventory.yml.erb +11 -0
  55. data/template/{provision → subspace}/playbook.yml.erb +2 -5
  56. data/template/subspace/templates/authorized_keys.erb +1 -0
  57. data/template/subspace/terraform/.gitignore +2 -0
  58. data/template/subspace/terraform/template/main-oxenwagen.tf.erb +116 -0
  59. data/template/subspace/terraform/template/main-workhorse.tf.erb +41 -0
  60. data/template/subspace/terraformrc.erb +9 -0
  61. data/terraform/modules/s3_backend/README +2 -0
  62. data/terraform/modules/s3_backend/dynamodb.tf +1 -0
  63. data/terraform/modules/s3_backend/iam_user.tf +38 -0
  64. data/terraform/modules/s3_backend/main.tf +39 -0
  65. data/terraform/modules/s3_backend/state_bucket.tf +14 -0
  66. metadata +42 -53
  67. data/ansible/roles/monit/files/monit-http.conf +0 -3
  68. data/ansible/roles/monit/files/sudoers-monit +0 -1
  69. data/ansible/roles/monit/handlers/main.yml +0 -14
  70. data/ansible/roles/monit/tasks/main.yml +0 -34
  71. data/ansible/roles/mtpereira.passenger/.bumpversion.cfg +0 -7
  72. data/ansible/roles/mtpereira.passenger/.gitignore +0 -2
  73. data/ansible/roles/mtpereira.passenger/LICENSE +0 -20
  74. data/ansible/roles/mtpereira.passenger/README.md +0 -31
  75. data/ansible/roles/mtpereira.passenger/defaults/main.yml +0 -5
  76. data/ansible/roles/mtpereira.passenger/handlers/main.yml +0 -8
  77. data/ansible/roles/mtpereira.passenger/meta/.galaxy_install_info +0 -1
  78. data/ansible/roles/mtpereira.passenger/meta/main.yml +0 -21
  79. data/ansible/roles/mtpereira.passenger/tasks/apt.yml +0 -13
  80. data/ansible/roles/mtpereira.passenger/tasks/main.yml +0 -8
  81. data/ansible/roles/mtpereira.passenger/tasks/pkg.yml +0 -35
  82. data/ansible/roles/mtpereira.passenger/tasks/service.yml +0 -8
  83. data/ansible/roles/passenger/files/sudoers-passenger +0 -1
  84. data/ansible/roles/passenger/meta/main.yml +0 -6
  85. data/ansible/roles/passenger/tasks/main.yml +0 -5
  86. data/ansible/roles/postgis/defaults/main.yml +0 -2
  87. data/ansible/roles/puma/defaults/main.yml +0 -5
  88. data/ansible/roles/puma/meta/main.yml +0 -5
  89. data/ansible/roles/sidekiq/meta/main.yml +0 -5
  90. data/template/provision/group_vars/all.erb +0 -17
  91. data/template/provision/group_vars/template.erb +0 -11
  92. data/template/provision/host_vars/template.erb +0 -4
  93. /data/template/{provision → subspace}/hosts.erb +0 -0
  94. /data/template/{provision/vars → subspace/secrets}/template.erb +0 -0
  95. /data/template/{provision → subspace}/templates/application.yml.template +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 19de34c265cd2948a3dc40c63d8974f76e5a4ce63a014eb2fe19db81181cf2ea
4
- data.tar.gz: efeab17b834a3e09270c9ae2a94976f635e92eb890ae218324029d11dc139a92
3
+ metadata.gz: df006855b51c155540821c2e5fd82968ebf79580e3e85f1149eeef576bb63672
4
+ data.tar.gz: 3548c10a76fa9117c130d73e198c42097373a1f3942f47fa47722741e2368f92
5
5
  SHA512:
6
- metadata.gz: b1137de151178b6960cf83438b05b0ba674b7918b56387fd1dfc2932647231d2fdb5225dddbdbef47849c84aca0dd6b340124c917396cce4a4022fea31bf0417
7
- data.tar.gz: a9a78b1cae0a192319455041009f0bbe0ba1f928262b96c8fe357dd21dc6b4d5557ce375988ce44735cdefedae7d5f83a98573e7d87394b27a1918976786a1fb
6
+ metadata.gz: c5ed6d9c43e07d68482e1481378e9295bda85c2b133cd50ac4fb18787c6d755945ca4f58c83031ade513e0d01233ee088032506d427f080a07730aaf9deb22df
7
+ data.tar.gz: 7d7779fe933a7297a4a682cc3a76fce740c74d2bd84306760ab7248e50c34251e3401039082900918a588660b1b82afd76b42072ae13bb23dafac37af2da4c9b
data/.ruby-version CHANGED
@@ -1 +1 @@
1
- 2.7.4
1
+ 3.1.0
data/CHANGELOG.md CHANGED
@@ -11,16 +11,23 @@ This project attempts to follow [semantic versioning](https://semver.org/).
11
11
  * Stops showing color if you `sudo su`
12
12
 
13
13
  ## Unreleased
14
- ## 2.5.10
15
- * Backport the fix for ansible's change of get_url checksum arguments
16
14
 
17
- ## 2.5.9
18
- * backport disabling mitogen
15
+ ## 3.0.0rc1
16
+ * Added infrastructure management via Terraform!
17
+ * Added new `subspace exec` command for manual remote management
18
+ * BREAKING: Consolidated inventory file into config/provision/inventory.env.yml
19
+ * No more hosts file
20
+ * No more host_vars directory
21
+ * No more group_vars directory
22
+ * All of the host/group configuration is in that one file now!
23
+ * BREAKING: `subspace vars` is now `subspace secrets`
24
+ * BREAKING: sidekiq_concurrency renamed to sidekiq_workers, default changed from 10 -> 1
25
+ * BREAKING: swap_space variable must be defined for the `common` ansible role (previously defaulted to 512MB)
26
+ * BREAKING: removed defaults from rails, postgis, puma roles
19
27
 
20
28
  ## 2.5.8
21
29
  * Add a new role for configuring a monit-based resque server
22
30
  * Auto-detect mitogen for speed
23
-
24
31
  ## 2.5.7
25
32
  * Add ability to set the timezone for servers instead of forcing to Central Time
26
33
  * Update puma configuration to support puma 5 with puma-daemon
data/README.md CHANGED
@@ -32,13 +32,23 @@ Or install it yourself as:
32
32
 
33
33
  $ gem install subspace
34
34
 
35
+ ### Mitogen
36
+ Optionally, you can install a python/pip packaged called "Mitogen" which dramatically speeds up running ansible over ssh. See [Here](https://github.com/mitogen-hq/mitogen/blob/master/docs/ansible_detailed.rst) for details.
37
+
38
+ pip install mitogen
39
+
40
+ Subspace will try and detect if mitogen is present and use it can. If mitogen causes problems (sometimes it can cause problems depending on the system versions, and particaularly when brand new versions of anible come up and it hasn't updated), you can disable it:
41
+
42
+ DISABLE_MITOGEN=1 subspace provision env
35
43
  ## Usage
36
44
 
37
45
  ### `subspace init`
38
46
 
39
- Initialize the project for subspace. Creates `config/provision` with all
47
+ Initialize the project for subspace. Creates `config/subspace` with all
40
48
  necessary files.
41
49
 
50
+ Subspace 3 supports terraform. You will need to create an IAM user manually with administrative access to the target AWS environment for terraform.
51
+
42
52
  ### `subspace bootstrap <environment>`
43
53
 
44
54
  Ensures the $HOME/.ssh directory is present and ensures python is installed.
@@ -56,7 +66,7 @@ At the time of this writing, we pass through the `ansible-playbook` "tags" and
56
66
  "start-at-task" options. The tags option is probably the most useful.
57
67
 
58
68
  e.g. To run only the alienvault tasks (all of which have been tagged with the
59
- 'alienvault' tag): `subspace provision dev --tags=alienvault`
69
+ 'alienvault' tag): `subspace provision staging --tags=alienvault`
60
70
 
61
71
  ### `subspace maintain <environment>`
62
72
 
@@ -109,7 +119,7 @@ ENABLE_SOME_FEATURE: false
109
119
  development:
110
120
  INSECURE_VARIABLE: "this isn't secret"
111
121
 
112
- dev:
122
+ staging:
113
123
  INSECURE_VARIABLE: "but it changes"
114
124
 
115
125
  production:
@@ -119,7 +129,7 @@ production:
119
129
 
120
130
  Further, you can use the extremely command to create a local copy of `config/application.yml`
121
131
 
122
- # Create a local copy of config/application.yml with the secrets encrypted in vars/development.yml
132
+ # Create a local copy of config/application.yml with the secrets encrypted in secrets/development.yml
123
133
  $ subspace vars development --create
124
134
 
125
135
  This can get you up and running in development securely, the only thing you need to distribute to new team members is the vault password. Grab it from a teammate and put it into `config/provision/.vault_pass`
@@ -138,7 +148,7 @@ Then,
138
148
 
139
149
  If you get an error saying you need a vault password file, you should be able to find it in 1Password. You might also need to update `ansible`.
140
150
 
141
- You'll want to do this for each environment (ie: `subspace provision qa`, etc.). Best to start with dev and work your way up.
151
+ You'll want to do this for each environment (ie: `subspace provision qa`, etc.). Best to start with staging and work your way up.
142
152
 
143
153
  # Host configuration
144
154
 
@@ -202,8 +212,7 @@ Aside from basic statistics like free memory, disk, load averages, etc, we have
202
212
  3. If nginx is installed, it will collect stats from the "status port"
203
213
  4. (TODO) add something for pumas
204
214
  5. (TODO) add something for sidekiq
205
- 6. (TODO) add something for memcache
206
- 7. If you're using our standard lograge format, you can enable lograge collection which will provide stats on request count and timers (db/view/total)
215
+ 6. If you're using our standard lograge format, you can enable lograge collection which will provide stats on request count and timers (db/view/total)
207
216
 
208
217
  rails_lograge: true
209
218
 
@@ -274,6 +283,14 @@ Installs logrotate and lets you configure logs for automatic rotation. Example
274
283
 
275
284
  ## memcache
276
285
 
286
+ Installs memcache on the server. By default, memcache will only listen on localhost which needs to be changed if other servers needs to connect.
287
+
288
+ # Default Value
289
+ memcache_bind: "127.0.0.1"
290
+
291
+ # bind to all interfaces
292
+ memcache_bind: "0.0.0.0"
293
+
277
294
  ## monit
278
295
 
279
296
  ## mysql
@@ -286,6 +303,7 @@ Installs logrotate and lets you configure logs for automatic rotation. Example
286
303
  This role will install the next-gen "Newrelic One" infrastructure agent which can perform a few different functions for newrelic. The previous "newrelic" role is deprecated.
287
304
 
288
305
  Variables:
306
+
289
307
  # Required, the newrelic license key you get after signing up.
290
308
  newrelic_license: "longhashthingyougetfromnewrelichere"
291
309
  # Optional - send logs to newrelic one's log aggregator.
@@ -328,7 +346,14 @@ Optional variables:
328
346
 
329
347
  ## nodejs
330
348
 
331
- Used to install recent version of NodeJS. Must set `nodejs_version`. e.g. `nodejs_version: "8.x"`
349
+ Used to install different versions of NodeJS. This uses NodeSource's apt repositories. You must define a variable called `nodejs_version` and choose a major version supported by NodeSource:
350
+
351
+ nodejs_version: 14.x
352
+ nodejs_version: 17.x
353
+ nodejs_version: lts
354
+ nodejs_version: current
355
+
356
+ The full list of distributions is here: https://github.com/nodesource/distributions#installation-instructions
332
357
 
333
358
  ## papertrail
334
359
 
@@ -344,33 +369,42 @@ Used to install recent version of NodeJS. Must set `nodejs_version`. e.g. `nodej
344
369
  database_user: "{{project_name}}"
345
370
 
346
371
  ## puma
372
+ Use the puma app server for your rails app. Usually combined with nginx to server as a static file server and reverse proxy.
347
373
 
348
- add puma gem to gemfile
349
- add config/puma to symlinks in deploy.rb
374
+ **Prerequesites:**
375
+ - add `gem puma` to your gemfile
376
+ - add `config/puma/` to the `linked_dirs` config in capistrano's `deploy.rb`
350
377
 
378
+ This role will generate a reasonable `puma.rb` and configure it to be controlled by systemd.
379
+
380
+ **Variables:**
381
+
382
+ puma_workers: 1 # Puma process count (usually == vCPU count)
383
+ puma_min_threads: 4 # Min threads/process
384
+ puma_max_threads: 16 # Max threads/process
351
385
 
352
386
  ## rails
353
387
 
354
388
  Provisions for a rails app. This one is probably pretty important.
355
389
 
356
- Default values (these are usually fine)
390
+ We no longer provider default values, so make sure to define all the following variables:
357
391
 
392
+ rails_env: production
358
393
  database_pool: 5
359
394
  database_name: "{{project_name}}_{{rails_env}}"
360
395
  database_user: "{{project_name}}"
396
+ database_host: localhost
397
+ database_adapter: postgresql
398
+ database_password: # usually defined in the encrypted vault
361
399
  job_queues:
362
400
  - default
363
401
  - mailers
364
402
 
365
- Customize:
366
-
367
- rails_env: [whatever]
368
-
369
403
  ## redis
370
404
 
371
405
  Installs redis on the server.
372
406
 
373
- # Change to * if you want tthis available everywhere.
407
+ # Change to * if you want this available everywhere instead of localhost
374
408
  redis_bind: 127.0.0.1
375
409
 
376
410
  ## resque
@@ -381,12 +415,15 @@ Install monitoring and automatic startup for resque workers via monit. You MUST
381
415
  - default
382
416
  - mailers
383
417
  - exports
418
+
419
+ redis_bind: "*"
420
+
384
421
  ## ruby-common
385
422
 
386
423
  Installs ruby on the machine. YOu can set a version by picking off the download url and sha hash from ruby-lang.org
387
424
 
388
425
  ruby_version: ruby-2.4.1
389
- ruby_checksum: sha256:a330e10d5cb5e53b3a0078326c5731888bb55e32c4abfeb27d9e7f8e5d000250
426
+ ruby_checksum: a330e10d5cb5e53b3a0078326c5731888bb55e32c4abfeb27d9e7f8e5d000250
390
427
  ruby_download_location: 'https://cache.ruby-lang.org/pub/ruby/2.4/ruby-2.4.1.tar.gz'
391
428
  bundler_version: 2.0.1
392
429
 
@@ -395,13 +432,13 @@ Installs ruby on the machine. YOu can set a version by picking off the download
395
432
 
396
433
  This will install a monit script that keeps sidekiq running. We spawn one sidekiq instance that manages as many queues as you need. Varaibles of note:
397
434
 
398
- # Process these background job queues
435
+ # Process these queues on this server
399
436
  job_queues:
400
437
  - default
401
438
  - mailers
402
439
 
403
440
  # Number of sidekiq *processes* to run
404
- sidekiq_concurrency: 1
441
+ sidekiq_workers: 1
405
442
 
406
443
  * Note that as of v0.4.13, we now also add a unique job queue for each host with its hostname. This is handy if you need to assign a job to a specific host. In general you should use named queues, but occasionally this is useful and there's no harm in having it there unused.
407
444
 
@@ -425,11 +462,7 @@ In order to dramatically speed up ansible, you can install Mitogen: https://gith
425
462
 
426
463
  pip install -g mitogen
427
464
 
428
- Subspace will automatically detect this and update your ansible.cfg file so it is blazing fast. Sometimes this can cause issues with older servers that have weird pythons, so if you have mitogen installed locally but dont wan't to use it, you can set an environment variable:
429
-
430
- DISABLE_MITOGEN=1 subspace provision staging
431
-
432
-
465
+ Subspace will automatically detect this and update your ansible.cfg file so it is blazing fast.
433
466
 
434
467
 
435
468
  ## Directory Structure
data/UPGRADING.md ADDED
@@ -0,0 +1,10 @@
1
+ # Subspace Upgrade Guide
2
+
3
+
4
+ # 2.x -> 3.0
5
+
6
+ - Run subspace init
7
+ - Someone write subspace upgrade3 please
8
+
9
+ # 2.x -> 2.y
10
+ We strive to follow semver so upgrading from 2.x to 2.y should be safe. However, since use of this tool can affect your production infrastructure, we highly recommend reviewing the [CHANGELOG](CHANGELOG.md) when upgrading.
@@ -1,5 +1,4 @@
1
1
  ---
2
- swap_space: 512M
3
2
  deploy_user: deploy
4
3
  send_stats: false
5
4
  timezone: America/Chicago
@@ -1 +1 @@
1
- deploy ALL=(root) NOPASSWD: /usr/bin/systemctl, /usr/sbin/service
1
+ deploy ALL=(root) NOPASSWD: /usr/bin/systemctl, /usr/sbin/service, /bin/systemctl
@@ -64,6 +64,14 @@
64
64
  tags:
65
65
  - maintenance
66
66
 
67
+ - name: apt-get update
68
+ apt: update_cache=yes cache_valid_time=86400
69
+ become: true
70
+ tags:
71
+ - upgrade
72
+ - maintenance
73
+ ignore_errors: yes
74
+
67
75
  - name: install aptitude
68
76
  apt:
69
77
  pkg: aptitude
@@ -72,12 +80,11 @@
72
80
  tags:
73
81
  - maintenance
74
82
 
75
- - name: apt-get update
76
- apt: update_cache=yes cache_valid_time=86400
77
- become: true
78
- tags:
79
- - upgrade
80
- - maintenance
83
+ - name: "Ensure systemd is installed"
84
+ apt:
85
+ name: systemd
86
+ state: latest
87
+ update_cache: yes
81
88
 
82
89
  - name: Add ppa:ondrej/nginx apt repository for TLS 1.3
83
90
  apt_repository:
@@ -276,7 +283,7 @@
276
283
 
277
284
  - name: Update authorized_keys for deploy user
278
285
  copy:
279
- src: authorized_keys
286
+ src: templates/authorized_keys
280
287
  dest: "/home/{{deploy_user}}/.ssh/authorized_keys"
281
288
  owner: "{{deploy_user}}"
282
289
  become: true
@@ -320,3 +327,7 @@
320
327
  - stats
321
328
 
322
329
  - import_tasks: swap.yml
330
+ when: swap_space is defined
331
+
332
+ - import_tasks: no_swap.yml
333
+ when: swap_space is not defined
@@ -0,0 +1,26 @@
1
+ ---
2
+ - name: turn off swap
3
+ become: true
4
+ command: swapoff -a
5
+
6
+ - name: set swapiness
7
+ become: true
8
+ sysctl:
9
+ name: vm.swappiness
10
+ value: "0"
11
+
12
+ - name: delete swap file
13
+ become: true
14
+ file:
15
+ path: /swapfile
16
+ state: absent
17
+
18
+ - name: remove from fstab
19
+ become: true
20
+ lineinfile:
21
+ dest: /etc/fstab
22
+ regexp: /swapfile
23
+ line: "/swapfile none swap sw 0 0"
24
+ state: absent
25
+
26
+
@@ -8,6 +8,6 @@ This server brought to you by:
8
8
  ~~~ https://github.com/tenforwardconsulting/subspace ~~~
9
9
 
10
10
  If you need to make configuration changes to the server, please modify the
11
- config/provision directory in the app or risk the changes disappearing.
11
+ config/subspace directory in the app or risk the changes disappearing.
12
12
 
13
13
  Last subspace run: {{ansible_date_time.iso8601}}
@@ -21,4 +21,4 @@
21
21
  https://github.com/tenforwardconsulting/subspace
22
22
 
23
23
  If you need to make configuration changes to the server, please modify the
24
- config/provision directory in the app or risk the changes dissapearing.
24
+ config/subspace directory in the app or risk the changes dissapearing.
@@ -40,6 +40,6 @@
40
40
  pause:
41
41
  seconds: 3
42
42
 
43
- - name: restart_monit
43
+ - name: restart monit services
44
44
  shell: monit restart all
45
45
  become: true
@@ -0,0 +1,2 @@
1
+ ---
2
+ memcache_bind: 127.0.0.1
@@ -3,4 +3,19 @@
3
3
  apt: update_cache=yes cache_valid_time=86400
4
4
 
5
5
  - name: Install Memcached.
6
- apt: name=memcached state=present
6
+ apt:
7
+ name: memcached
8
+ state: present
9
+
10
+ - name: Configure memcache bind address
11
+ lineinfile:
12
+ path: /etc/memcached.conf
13
+ regex: "^(#\\s*)?-l"
14
+ state: present
15
+ line: "-l {{memcache_bind}}"
16
+
17
+ - name: restart memcached
18
+ systemd:
19
+ name: memcached
20
+ state: restarted
21
+ enabled: yes
@@ -1,12 +1,12 @@
1
1
  ---
2
2
  - name: Add New Relic apt key
3
3
  apt_key:
4
- url: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg
4
+ url: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg
5
5
  state: present
6
6
  become: true
7
7
 
8
8
  - name: create license key
9
- copy:
9
+ copy:
10
10
  dest: "/etc/newrelic-infra.yml"
11
11
  content: |
12
12
  license_key: {{newrelic_license}}
@@ -27,7 +27,7 @@
27
27
  - name: Configure application log forwarding if enabled
28
28
  when: "{{ newrelic_logs|length }}"
29
29
  become: true
30
- template:
30
+ template:
31
31
  dest: "/etc/newrelic-infra/logging.d/subspace.yml"
32
32
  src: logs.yml.j2
33
33
  notify: Restart newrelic-infra
@@ -25,10 +25,19 @@
25
25
  state: link
26
26
  become: true
27
27
 
28
- - name: Restart nginx
29
- action: service name=nginx state=restarted
28
+ - name: Restart nginx (systemd)
30
29
  become: true
30
+ systemd:
31
+ state: restarted
32
+ daemon_reload: yes
33
+ name: nginx
34
+
35
+ - name: Enable nginx to start on reboot
36
+ become: true
37
+ systemd:
38
+ name: nginx
39
+ enabled: true
31
40
 
32
41
  - name: Nginx is installed
33
42
  set_fact:
34
- nginx_installed: true
43
+ nginx_installed: true
@@ -6,31 +6,43 @@
6
6
  tags: puma
7
7
 
8
8
  - name: Add puma shared/config
9
- template: src=puma.rb dest=/u/apps/{{project_name}}/shared/config/puma/{{rails_env}}.rb group=deploy owner=deploy force=yes mode=755
9
+ template:
10
+ src: puma.rb
11
+ dest: /u/apps/{{project_name}}/shared/config/puma/{{rails_env}}.rb
12
+ group: deploy
13
+ owner: deploy
14
+ force: yes
15
+ mode: 0755
10
16
  tags: puma
11
17
 
12
18
  - name: Make shared/tmp/sockets
13
19
  file: path=/u/apps/{{project_name}}/shared/tmp/sockets group=deploy owner=deploy state=directory
14
20
  tags: tmp
15
21
 
16
- - name: Install puma monit script
22
+ - name: Install systemd script
23
+ become: true
24
+ template:
25
+ src: puma-systemd.service
26
+ dest: /etc/systemd/system/puma.service
27
+
28
+ - name: Install systemd socket
29
+ become: true
17
30
  template:
18
- src: puma-monit-rc
19
- dest: /etc/monit/conf-available/puma_{{project_name}}_{{rails_env}}
20
-
21
- - name: Clean up old puma monit scripts
22
- shell: rm -f /etc/monit/conf.d/puma_*
23
-
24
- - name: mkdir /etc/monit/conf-enabled
25
- file:
26
- path: /etc/monit/conf-enabled
27
- state: directory
28
-
29
- - name: Enable puma monit script
30
- file:
31
- src: /etc/monit/conf-available/puma_{{project_name}}_{{rails_env}}
32
- dest: /etc/monit/conf-enabled/puma_{{project_name}}_{{rails_env}}
33
- state: link
34
- notify:
35
- - restart_monit
31
+ src: puma-systemd.socket
32
+ dest: /etc/systemd/system/puma.socket
33
+
34
+ - name: enable systemd service
35
+ become: true
36
+ systemd:
37
+ name: puma.service
38
+ enabled: yes
39
+ daemon_reload: yes
40
+
41
+ - name: disable systemd socket
42
+ become: true
43
+ systemd:
44
+ name: puma.socket
45
+ enabled: no
46
+ daemon_reload: yes
47
+
36
48
 
@@ -0,0 +1,36 @@
1
+ [Unit]
2
+ Description=Puma HTTP Server
3
+ After=network.target
4
+
5
+ # Uncomment for socket activation (see below)
6
+ # Requires=puma.socket
7
+
8
+ [Service]
9
+ # Puma supports systemd's `Type=notify` and watchdog service
10
+ # monitoring, if the [sd_notify](https://github.com/agis/ruby-sdnotify) gem is installed,
11
+ # as of Puma 5.1 or later.
12
+ # On earlier versions of Puma or JRuby, change this to `Type=simple` and remove
13
+ # the `WatchdogSec` line.
14
+ Type=simple
15
+
16
+ # If your Puma process locks up, systemd's watchdog will restart it within seconds.
17
+ # WatchdogSec=10
18
+
19
+ # Preferably configure a non-privileged user
20
+ User=deploy
21
+
22
+ WorkingDirectory=/u/apps/{{project_name}}/current
23
+
24
+ # Helpful for debugging socket activation, etc.
25
+ # Environment=PUMA_DEBUG=1
26
+
27
+ # SystemD will not run puma even if it is in your path. You must specify
28
+ # an absolute URL to puma. For example /usr/local/bin/puma
29
+
30
+ # Variant: Use `bundle exec --keep-file-descriptors puma` instead of binstub
31
+ ExecStart=/usr/local/bin/bundle exec --keep-file-descriptors puma -C /u/apps/{{project_name}}/current/config/puma/{{rails_env}}.rb
32
+
33
+ Restart=always
34
+
35
+ [Install]
36
+ WantedBy=multi-user.target
@@ -0,0 +1,14 @@
1
+ # /etc/systemd/system/puma.socket
2
+
3
+ [Unit]
4
+ Description=Puma HTTP Server Accept Sockets
5
+
6
+ [Socket]
7
+ ListenStream=0.0.0.0:9292
8
+
9
+ NoDelay=true
10
+ ReusePort=true
11
+ Backlog=1024
12
+
13
+ [Install]
14
+ WantedBy=sockets.target
@@ -1,10 +1,14 @@
1
+ {% if monit_installed is defined %}
1
2
  begin
2
3
  # Needed for Puma 5 + puma-damon, but built in to Puma 4
3
4
  # https://github.com/kigster/puma-daemon
5
+ # however not needed if we're using systemd which is the future
4
6
  require 'puma/daemon'
5
7
  rescue LoadError => e
8
+ daemonize
6
9
  # Puma 4 has `daemonize` built in
7
10
  end
11
+ {% endif %}
8
12
 
9
13
  # Change to match your CPU core count
10
14
  workers {{puma_workers}}
@@ -23,8 +27,6 @@ bind "tcp://127.0.0.1:9292"
23
27
  # Logging
24
28
  stdout_redirect "#{app_dir}/log/puma.stdout.log", "#{app_dir}/log/puma.stderr.log", true
25
29
 
26
- # Set master PID and state locations
27
- daemonize
28
30
  pidfile "/u/apps/{{project_name}}/shared/tmp/pids/puma.pid"
29
31
  state_path "/u/apps/{{project_name}}/shared/tmp/pids/puma.state"
30
32
  activate_control_app
@@ -1,9 +1,2 @@
1
1
  ---
2
- database_pool: 5
3
- database_name: "{{project_name}}_{{rails_env}}"
4
- database_user: "{{project_name}}"
5
- database_adapter: postgresql
6
- job_queues:
7
- - default
8
- - mailers
9
2
  send_stats: false
@@ -5,6 +5,7 @@
5
5
  pkg: redis-server
6
6
  state: present
7
7
  update_cache: true
8
+
8
9
  - name: Set bind IP
9
10
  become: true
10
11
  lineinfile:
@@ -12,3 +13,9 @@
12
13
  regexp: '^bind '
13
14
  line: 'bind {{redis_bind}}'
14
15
  state: present
16
+
17
+ - name: restart redis
18
+ become: true
19
+ systemd:
20
+ name: redis
21
+ state: restarted
@@ -1,15 +1,14 @@
1
1
  ---
2
- - name: Install resque monit script
3
- template:
4
- src: resque-monit-rc
5
- dest: /etc/monit/conf-available/resque_{{project_name}}_{{rails_env}}
2
+ - name: Install systemd resque script
6
3
  become: true
4
+ template:
5
+ src: resque-systemd.service
6
+ dest: /etc/systemd/system/resque.service
7
7
 
8
- - name: Enable resque monit script
9
- file:
10
- src: /etc/monit/conf-available/resque_{{project_name}}_{{rails_env}}
11
- dest: /etc/monit/conf-enabled/resque_{{project_name}}_{{rails_env}}
12
- state: link
13
- notify:
14
- - reload_monit
15
- - restart_monit
8
+ - name: Enable systemd resque service
9
+ become: true
10
+ systemd:
11
+ name: resque
12
+ daemon_reload: true
13
+ enabled: yes
14
+ state: started