subspace 2.5.10 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. checksums.yaml +4 -4
  2. data/.ruby-version +1 -1
  3. data/CHANGELOG.md +22 -5
  4. data/README.md +105 -51
  5. data/UPGRADING.md +10 -0
  6. data/ansible/roles/common/defaults/main.yml +0 -1
  7. data/ansible/roles/common/files/sudoers-service +1 -1
  8. data/ansible/roles/common/tasks/main.yml +18 -7
  9. data/ansible/roles/common/tasks/no_swap.yml +26 -0
  10. data/ansible/roles/common/templates/motd +1 -1
  11. data/ansible/roles/common/templates/motd2 +1 -1
  12. data/ansible/roles/delayed_job/tasks/main.yml +21 -38
  13. data/ansible/roles/delayed_job/templates/delayed-job-systemd.service +33 -0
  14. data/ansible/roles/letsencrypt/defaults/main.yml +7 -7
  15. data/ansible/roles/letsencrypt/tasks/main.yml +18 -24
  16. data/ansible/roles/memcache/defaults/main.yml +2 -0
  17. data/ansible/roles/memcache/tasks/main.yml +16 -1
  18. data/ansible/roles/newrelic-infra/tasks/main.yml +3 -3
  19. data/ansible/roles/nginx/tasks/main.yml +12 -3
  20. data/ansible/roles/puma/tasks/main.yml +32 -20
  21. data/ansible/roles/puma/templates/puma-systemd.service +37 -0
  22. data/ansible/roles/puma/templates/puma-systemd.socket +14 -0
  23. data/ansible/roles/puma/templates/puma.rb +4 -2
  24. data/ansible/roles/rails/defaults/main.yml +0 -7
  25. data/ansible/roles/redis/tasks/main.yml +28 -3
  26. data/ansible/roles/resque/tasks/main.yml +11 -12
  27. data/ansible/roles/resque/templates/resque-systemd.service +10 -3
  28. data/ansible/roles/ruby-common/tasks/main.yml +1 -16
  29. data/ansible/roles/sidekiq/defaults/main.yml +1 -1
  30. data/ansible/roles/sidekiq/tasks/main.yml +11 -15
  31. data/ansible/roles/sidekiq/templates/sidekiq-monit-rc +1 -1
  32. data/ansible/roles/sidekiq/templates/sidekiq-systemd.service +63 -0
  33. data/ansible/roles/tailscale/defaults/main.yml +2 -0
  34. data/ansible/roles/tailscale/tasks/main.yml +22 -0
  35. data/bin/console +0 -4
  36. data/exe/subspace +1 -2
  37. data/lib/subspace/cli.rb +51 -14
  38. data/lib/subspace/commands/ansible.rb +12 -3
  39. data/lib/subspace/commands/base.rb +20 -5
  40. data/lib/subspace/commands/bootstrap.rb +16 -21
  41. data/lib/subspace/commands/configure.rb +2 -2
  42. data/lib/subspace/commands/exec.rb +20 -0
  43. data/lib/subspace/commands/init.rb +94 -45
  44. data/lib/subspace/commands/inventory.rb +54 -0
  45. data/lib/subspace/commands/maintain.rb +1 -1
  46. data/lib/subspace/commands/provision.rb +1 -3
  47. data/lib/subspace/commands/secrets.rb +69 -0
  48. data/lib/subspace/commands/ssh.rb +14 -8
  49. data/lib/subspace/commands/terraform.rb +83 -0
  50. data/lib/subspace/inventory.rb +144 -0
  51. data/lib/subspace/version.rb +1 -1
  52. data/subspace.gemspec +8 -2
  53. data/template/{provision → subspace}/.gitignore +3 -0
  54. data/template/{provision → subspace}/ansible.cfg.erb +2 -2
  55. data/template/subspace/group_vars/all.erb +28 -0
  56. data/template/subspace/group_vars/template.erb +26 -0
  57. data/template/{provision → subspace}/hosts.erb +0 -0
  58. data/template/subspace/inventory.yml.erb +11 -0
  59. data/template/{provision → subspace}/playbook.yml.erb +2 -5
  60. data/template/{provision/vars → subspace/secrets}/template.erb +0 -0
  61. data/template/{provision → subspace}/templates/application.yml.template +0 -0
  62. data/template/subspace/templates/authorized_keys.erb +1 -0
  63. data/template/subspace/terraform/.gitignore +2 -0
  64. data/template/subspace/terraform/template/main-oxenwagen.tf.erb +116 -0
  65. data/template/subspace/terraform/template/main-workhorse.tf.erb +41 -0
  66. data/template/subspace/terraformrc.erb +9 -0
  67. data/terraform/modules/s3_backend/README +2 -0
  68. data/terraform/modules/s3_backend/dynamodb.tf +1 -0
  69. data/terraform/modules/s3_backend/iam_user.tf +38 -0
  70. data/terraform/modules/s3_backend/main.tf +39 -0
  71. data/terraform/modules/s3_backend/state_bucket.tf +14 -0
  72. metadata +41 -55
  73. data/ansible/roles/awscli/tasks/main.yml +0 -10
  74. data/ansible/roles/delayed_job/meta/main.yml +0 -5
  75. data/ansible/roles/letsencrypt_dns/defaults/main.yml +0 -4
  76. data/ansible/roles/letsencrypt_dns/tasks/main.yml +0 -133
  77. data/ansible/roles/monit/files/monit-http.conf +0 -3
  78. data/ansible/roles/monit/files/sudoers-monit +0 -1
  79. data/ansible/roles/monit/handlers/main.yml +0 -14
  80. data/ansible/roles/monit/tasks/main.yml +0 -34
  81. data/ansible/roles/mtpereira.passenger/.bumpversion.cfg +0 -7
  82. data/ansible/roles/mtpereira.passenger/.gitignore +0 -2
  83. data/ansible/roles/mtpereira.passenger/LICENSE +0 -20
  84. data/ansible/roles/mtpereira.passenger/README.md +0 -31
  85. data/ansible/roles/mtpereira.passenger/defaults/main.yml +0 -5
  86. data/ansible/roles/mtpereira.passenger/handlers/main.yml +0 -8
  87. data/ansible/roles/mtpereira.passenger/meta/.galaxy_install_info +0 -1
  88. data/ansible/roles/mtpereira.passenger/meta/main.yml +0 -21
  89. data/ansible/roles/mtpereira.passenger/tasks/apt.yml +0 -13
  90. data/ansible/roles/mtpereira.passenger/tasks/main.yml +0 -8
  91. data/ansible/roles/mtpereira.passenger/tasks/pkg.yml +0 -35
  92. data/ansible/roles/mtpereira.passenger/tasks/service.yml +0 -8
  93. data/ansible/roles/passenger/files/sudoers-passenger +0 -1
  94. data/ansible/roles/passenger/meta/main.yml +0 -6
  95. data/ansible/roles/passenger/tasks/main.yml +0 -5
  96. data/ansible/roles/postgis/defaults/main.yml +0 -2
  97. data/ansible/roles/puma/defaults/main.yml +0 -5
  98. data/ansible/roles/puma/meta/main.yml +0 -5
  99. data/ansible/roles/sidekiq/meta/main.yml +0 -5
  100. data/lib/subspace/commands/vars.rb +0 -48
  101. data/template/provision/group_vars/all.erb +0 -17
  102. data/template/provision/group_vars/template.erb +0 -11
  103. data/template/provision/host_vars/template.erb +0 -4
@@ -0,0 +1,2 @@
1
+ ---
2
+ memcache_bind: 127.0.0.1
@@ -3,4 +3,19 @@
3
3
  apt: update_cache=yes cache_valid_time=86400
4
4
 
5
5
  - name: Install Memcached.
6
- apt: name=memcached state=present
6
+ apt:
7
+ name: memcached
8
+ state: present
9
+
10
+ - name: Configure memcache bind address
11
+ lineinfile:
12
+ path: /etc/memcached.conf
13
+ regex: "^(#\\s*)?-l"
14
+ state: present
15
+ line: "-l {{memcache_bind}}"
16
+
17
+ - name: restart memcached
18
+ systemd:
19
+ name: memcached
20
+ state: restarted
21
+ enabled: yes
@@ -1,12 +1,12 @@
1
1
  ---
2
2
  - name: Add New Relic apt key
3
3
  apt_key:
4
- url: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg
4
+ url: https://download.newrelic.com/infrastructure_agent/gpg/newrelic-infra.gpg
5
5
  state: present
6
6
  become: true
7
7
 
8
8
  - name: create license key
9
- copy:
9
+ copy:
10
10
  dest: "/etc/newrelic-infra.yml"
11
11
  content: |
12
12
  license_key: {{newrelic_license}}
@@ -27,7 +27,7 @@
27
27
  - name: Configure application log forwarding if enabled
28
28
  when: "{{ newrelic_logs|length }}"
29
29
  become: true
30
- template:
30
+ template:
31
31
  dest: "/etc/newrelic-infra/logging.d/subspace.yml"
32
32
  src: logs.yml.j2
33
33
  notify: Restart newrelic-infra
@@ -25,10 +25,19 @@
25
25
  state: link
26
26
  become: true
27
27
 
28
- - name: Restart nginx
29
- action: service name=nginx state=restarted
28
+ - name: Restart nginx (systemd)
30
29
  become: true
30
+ systemd:
31
+ state: restarted
32
+ daemon_reload: yes
33
+ name: nginx
34
+
35
+ - name: Enable nginx to start on reboot
36
+ become: true
37
+ systemd:
38
+ name: nginx
39
+ enabled: true
31
40
 
32
41
  - name: Nginx is installed
33
42
  set_fact:
34
- nginx_installed: true
43
+ nginx_installed: true
@@ -6,31 +6,43 @@
6
6
  tags: puma
7
7
 
8
8
  - name: Add puma shared/config
9
- template: src=puma.rb dest=/u/apps/{{project_name}}/shared/config/puma/{{rails_env}}.rb group=deploy owner=deploy force=yes mode=755
9
+ template:
10
+ src: puma.rb
11
+ dest: /u/apps/{{project_name}}/shared/config/puma/{{rails_env}}.rb
12
+ group: deploy
13
+ owner: deploy
14
+ force: yes
15
+ mode: 0755
10
16
  tags: puma
11
17
 
12
18
  - name: Make shared/tmp/sockets
13
19
  file: path=/u/apps/{{project_name}}/shared/tmp/sockets group=deploy owner=deploy state=directory
14
20
  tags: tmp
15
21
 
16
- - name: Install puma monit script
22
+ - name: Install systemd script
23
+ become: true
24
+ template:
25
+ src: puma-systemd.service
26
+ dest: /etc/systemd/system/puma.service
27
+
28
+ - name: Install systemd socket
29
+ become: true
17
30
  template:
18
- src: puma-monit-rc
19
- dest: /etc/monit/conf-available/puma_{{project_name}}_{{rails_env}}
20
-
21
- - name: Clean up old puma monit scripts
22
- shell: rm -f /etc/monit/conf.d/puma_*
23
-
24
- - name: mkdir /etc/monit/conf-enabled
25
- file:
26
- path: /etc/monit/conf-enabled
27
- state: directory
28
-
29
- - name: Enable puma monit script
30
- file:
31
- src: /etc/monit/conf-available/puma_{{project_name}}_{{rails_env}}
32
- dest: /etc/monit/conf-enabled/puma_{{project_name}}_{{rails_env}}
33
- state: link
34
- notify:
35
- - restart_monit
31
+ src: puma-systemd.socket
32
+ dest: /etc/systemd/system/puma.socket
33
+
34
+ - name: enable systemd service
35
+ become: true
36
+ systemd:
37
+ name: puma.service
38
+ enabled: yes
39
+ daemon_reload: yes
40
+
41
+ - name: disable systemd socket
42
+ become: true
43
+ systemd:
44
+ name: puma.socket
45
+ enabled: no
46
+ daemon_reload: yes
47
+
36
48
 
@@ -0,0 +1,37 @@
1
+ [Unit]
2
+ Description=Puma HTTP Server
3
+ After=network.target
4
+
5
+ # Uncomment for socket activation (see below)
6
+ # Requires=puma.socket
7
+
8
+ [Service]
9
+ # Puma supports systemd's `Type=notify` and watchdog service
10
+ # monitoring, if the [sd_notify](https://github.com/agis/ruby-sdnotify) gem is installed,
11
+ # as of Puma 5.1 or later.
12
+ # On earlier versions of Puma or JRuby, change this to `Type=simple` and remove
13
+ # the `WatchdogSec` line.
14
+ Type=simple
15
+
16
+ # If your Puma process locks up, systemd's watchdog will restart it within seconds.
17
+ # WatchdogSec=10
18
+
19
+ # Preferably configure a non-privileged user
20
+ User=deploy
21
+
22
+ WorkingDirectory=/u/apps/{{project_name}}/current
23
+
24
+ # Helpful for debugging socket activation, etc.
25
+ # Environment=PUMA_DEBUG=1
26
+ Environment=RAILS_ENV={{rails_env}}
27
+
28
+ # SystemD will not run puma even if it is in your path. You must specify
29
+ # an absolute URL to puma. For example /usr/local/bin/puma
30
+
31
+ # Variant: Use `bundle exec --keep-file-descriptors puma` instead of binstub
32
+ ExecStart=/usr/local/bin/bundle exec --keep-file-descriptors puma -C /u/apps/{{project_name}}/current/config/puma/{{rails_env}}.rb
33
+
34
+ Restart=always
35
+
36
+ [Install]
37
+ WantedBy=multi-user.target
@@ -0,0 +1,14 @@
1
+ # /etc/systemd/system/puma.socket
2
+
3
+ [Unit]
4
+ Description=Puma HTTP Server Accept Sockets
5
+
6
+ [Socket]
7
+ ListenStream=0.0.0.0:9292
8
+
9
+ NoDelay=true
10
+ ReusePort=true
11
+ Backlog=1024
12
+
13
+ [Install]
14
+ WantedBy=sockets.target
@@ -1,10 +1,14 @@
1
+ {% if monit_installed is defined %}
1
2
  begin
2
3
  # Needed for Puma 5 + puma-damon, but built in to Puma 4
3
4
  # https://github.com/kigster/puma-daemon
5
+ # however not needed if we're using systemd which is the future
4
6
  require 'puma/daemon'
5
7
  rescue LoadError => e
8
+ daemonize
6
9
  # Puma 4 has `daemonize` built in
7
10
  end
11
+ {% endif %}
8
12
 
9
13
  # Change to match your CPU core count
10
14
  workers {{puma_workers}}
@@ -23,8 +27,6 @@ bind "tcp://127.0.0.1:9292"
23
27
  # Logging
24
28
  stdout_redirect "#{app_dir}/log/puma.stdout.log", "#{app_dir}/log/puma.stderr.log", true
25
29
 
26
- # Set master PID and state locations
27
- daemonize
28
30
  pidfile "/u/apps/{{project_name}}/shared/tmp/pids/puma.pid"
29
31
  state_path "/u/apps/{{project_name}}/shared/tmp/pids/puma.state"
30
32
  activate_control_app
@@ -1,9 +1,2 @@
1
1
  ---
2
- database_pool: 5
3
- database_name: "{{project_name}}_{{rails_env}}"
4
- database_user: "{{project_name}}"
5
- database_adapter: postgresql
6
- job_queues:
7
- - default
8
- - mailers
9
2
  send_stats: false
@@ -1,10 +1,29 @@
1
1
  ---
2
- - name: Install Redis
2
+ - name: Add an Apt signing key for redis repo
3
+ ansible.builtin.apt_key:
4
+ url: https://packages.redis.io/gpg
5
+ state: present
6
+
7
+ - name: Add redis repository into sources list
8
+ ansible.builtin.apt_repository:
9
+ repo: deb https://packages.redis.io/deb {{ ansible_distribution_release }} main
10
+ state: present
11
+ register: redis_apt_repo
12
+
13
+ - name: Purge distro redis package
14
+ apt:
15
+ name: redis-server
16
+ state: absent
17
+ purge: true
18
+ when: redis_apt_repo.changed
19
+
20
+ - name: Install Redis from official repo
3
21
  become: true
4
22
  apt:
5
- pkg: redis-server
6
- state: present
23
+ name: redis-server
24
+ state: latest
7
25
  update_cache: true
26
+
8
27
  - name: Set bind IP
9
28
  become: true
10
29
  lineinfile:
@@ -12,3 +31,9 @@
12
31
  regexp: '^bind '
13
32
  line: 'bind {{redis_bind}}'
14
33
  state: present
34
+
35
+ - name: restart redis
36
+ become: true
37
+ systemd:
38
+ name: redis
39
+ state: restarted
@@ -1,15 +1,14 @@
1
1
  ---
2
- - name: Install resque monit script
3
- template:
4
- src: resque-monit-rc
5
- dest: /etc/monit/conf-available/resque_{{project_name}}_{{rails_env}}
2
+ - name: Install systemd resque script
6
3
  become: true
4
+ template:
5
+ src: resque-systemd.service
6
+ dest: /etc/systemd/system/resque.service
7
7
 
8
- - name: Enable resque monit script
9
- file:
10
- src: /etc/monit/conf-available/resque_{{project_name}}_{{rails_env}}
11
- dest: /etc/monit/conf-enabled/resque_{{project_name}}_{{rails_env}}
12
- state: link
13
- notify:
14
- - reload_monit
15
- - restart_monit
8
+ - name: Enable systemd resque service
9
+ become: true
10
+ systemd:
11
+ name: resque
12
+ daemon_reload: true
13
+ enabled: yes
14
+ state: started
@@ -15,11 +15,13 @@ After=syslog.target network.target
15
15
  #
16
16
  # !!!! !!!! !!!!
17
17
  #
18
- Type=simple
18
+ Type=forking
19
19
 
20
20
  WorkingDirectory=/u/apps/{{project_name}}/current
21
21
 
22
- ExecStart="RAILS_ENV={{rails_env}} COUNT={{resque_concurrency}} QUEUES={{hostname}},{{ job_queues | join(',') }} BACKGROUND=yes PIDFILE=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid bundle exec rake resque:work"
22
+ ExecStart=/usr/local/bin/bundle exec rake resque:work
23
+ ExecStop=/bin/kill -s QUIT $MAINPID
24
+ PIDFile=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid
23
25
 
24
26
  # Uncomment this if you are going to use this as a system service
25
27
  # if using as a user service then leave commented out, or you will get an error trying to start the service
@@ -31,6 +33,11 @@ UMask=0002
31
33
  # Greatly reduce Ruby memory fragmentation and heap usage
32
34
  # https://www.mikeperham.com/2018/04/25/taming-rails-memory-bloat/
33
35
  Environment=MALLOC_ARENA_MAX=2
36
+ Environment=RAILS_ENV={{rails_env}}
37
+ Environment=COUNT=1
38
+ Environment=QUEUES={{ job_queues | join(',') }}
39
+ Environment=BACKGROUND=yes
40
+ Environment=PIDFILE=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid
34
41
 
35
42
  # if we crash, restart
36
43
  RestartSec=1
@@ -44,4 +51,4 @@ StandardError=syslog
44
51
  SyslogIdentifier=resque
45
52
 
46
53
  [Install]
47
- WantedBy=multi-user.target
54
+ WantedBy=multi-user.target
@@ -71,22 +71,7 @@
71
71
  command: "{{ ruby_location }}/bin/gem update --system"
72
72
  become: true
73
73
 
74
- - name: Remove old bundler bin
75
- file:
76
- path: "{{ ruby_location }}/bin/bundle"
77
- state: absent
78
- become: true
79
-
80
- - name: Uninstall Bundler
81
- gem:
82
- name: bundler
83
- state: absent
84
- user_install: no
85
- executable: "{{ ruby_location }}/bin/gem"
86
- become: true
87
- ignore_errors: yes
88
-
89
- - name: Install Bundler
74
+ - name: Install/update Bundler
90
75
  shell: "{{ ruby_location }}/bin/gem install bundler -v {{ bundler_version }}"
91
76
  become: true
92
77
 
@@ -1,2 +1,2 @@
1
1
  ---
2
- sidekiq_concurrency: 1
2
+ sidekiq_workers: 1
@@ -1,19 +1,15 @@
1
1
  ---
2
- - name: Install sidekiq monit script
3
- template:
4
- src: sidekiq-monit-rc
5
- dest: /etc/monit/conf-available/sidekiq_{{project_name}}_{{rails_env}}
2
+ - name: Install systemd sidekiq script
6
3
  become: true
4
+ template:
5
+ src: sidekiq-systemd.service
6
+ dest: /etc/systemd/system/sidekiq.service
7
7
 
8
- - name: Clean up old sidekiq monit scripts
9
- shell: rm -f /etc/monit/conf.d/sidekiq_*
10
-
11
- - name: Enable sidekiq monit script
12
- file:
13
- src: /etc/monit/conf-available/sidekiq_{{project_name}}_{{rails_env}}
14
- dest: /etc/monit/conf-enabled/sidekiq_{{project_name}}_{{rails_env}}
15
- state: link
16
- notify:
17
- - reload_monit
18
- - restart_monit
8
+ - name: Enable systemd sidekiq service
9
+ become: true
10
+ systemd:
11
+ name: sidekiq
12
+ enabled: yes
13
+ daemon_reload: true
19
14
 
15
+ # TODO Read the gemfile and make sure they have sidekiq 6????
@@ -1,4 +1,4 @@
1
1
  check process sidekiq
2
2
  with pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid
3
- start program = "/bin/su - deploy -c 'cd /u/apps/{{project_name}}/current && bundle exec sidekiq --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }} -c {{sidekiq_concurrency}} --pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid --environment {{rails_env}} --logfile /u/apps/{{project_name}}/shared/log/sidekiq.log --daemon'" with timeout 30 seconds
3
+ start program = "/bin/su - deploy -c 'cd /u/apps/{{project_name}}/current && bundle exec sidekiq --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }} -c {{sidekiq_workers}} --pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid --environment {{rails_env}} --logfile /u/apps/{{project_name}}/shared/log/sidekiq.log --daemon'" with timeout 30 seconds
4
4
  stop program = "/bin/su - deploy -c 'kill -s TERM `cat /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid`'" with timeout 30 seconds
@@ -0,0 +1,63 @@
1
+ #
2
+ # This file tells systemd how to run Sidekiq as a 24/7 long-running daemon.
3
+ #
4
+ # Use `journalctl -u sidekiq -rn 100` to view the last 100 lines of log output.
5
+ #
6
+ [Unit]
7
+ Description=sidekiq
8
+ # start us only once the network and logging subsystems are available,
9
+ # consider adding redis-server.service if Redis is local and systemd-managed.
10
+ After=syslog.target network.target
11
+
12
+ # See these pages for lots of options:
13
+ #
14
+ # https://www.freedesktop.org/software/systemd/man/systemd.service.html
15
+ # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
16
+ #
17
+ # THOSE PAGES ARE CRITICAL FOR ANY LINUX DEVOPS WORK; read them multiple
18
+ # times! systemd is a critical tool for all developers to know and understand.
19
+ #
20
+ [Service]
21
+ #
22
+ # !!!! !!!! !!!!
23
+ #
24
+ # As of v6.0.6, Sidekiq automatically supports systemd's `Type=notify` and watchdog service
25
+ # monitoring. If you are using an earlier version of Sidekiq, change this to `Type=simple`
26
+ # and remove the `WatchdogSec` line.
27
+ #
28
+ # !!!! !!!! !!!!
29
+ #
30
+ Type=notify
31
+ # If your Sidekiq process locks up, systemd's watchdog will restart it within seconds.
32
+ WatchdogSec=10
33
+
34
+ WorkingDirectory=/u/apps/{{project_name}}/current
35
+ ExecStart=/usr/local/bin/bundle exec sidekiq -e {{rails_env}} --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }} -c {{sidekiq_workers}}
36
+
37
+ # Use `systemctl kill -s TSTP sidekiq` to quiet the Sidekiq process
38
+
39
+ # Uncomment this if you are going to use this as a system service
40
+ # if using as a user service then leave commented out, or you will get an error trying to start the service
41
+ # !!! Change this to your deploy user account if you are using this as a system service !!!
42
+ User=deploy
43
+ Group=deploy
44
+ UMask=0002
45
+
46
+ # Greatly reduce Ruby memory fragmentation and heap usage
47
+ # https://www.mikeperham.com/2018/04/25/taming-rails-memory-bloat/
48
+ Environment=MALLOC_ARENA_MAX=2
49
+ Environment=RAILS_ENV={{rails_env}}
50
+
51
+ # if we crash, restart
52
+ RestartSec=1
53
+ Restart=on-failure
54
+
55
+ # output goes to /var/log/syslog (Ubuntu) or /var/log/messages (CentOS)
56
+ StandardOutput=syslog
57
+ StandardError=syslog
58
+
59
+ # This will default to "bundler" if we don't specify it
60
+ SyslogIdentifier=sidekiq
61
+
62
+ [Install]
63
+ WantedBy=multi-user.target
@@ -0,0 +1,2 @@
1
+ ---
2
+ tailscale_options: ""
@@ -0,0 +1,22 @@
1
+ ---
2
+ - name: "Add Tailscale apt key"
3
+ become: true
4
+ apt_key:
5
+ url: https://pkgs.tailscale.com/stable/ubuntu/{{ansible_distribution_release}}.gpg
6
+ state: present
7
+
8
+ - name: "Add Tailscale apt repos"
9
+ become: true
10
+ apt_repository:
11
+ repo: "deb https://pkgs.tailscale.com/stable/ubuntu {{ansible_distribution_release}} main"
12
+ state: present
13
+
14
+ - name: "Install tailscale from api"
15
+ apt:
16
+ name: tailscale
17
+ state: latest
18
+ update_cache: yes
19
+
20
+ - name: "Join the tailnet"
21
+ become: true
22
+ command: tailscale up --ssh --auth-key={{tailscale_auth_key}} --hostname={{project_name}}-{{hostname}} --accept-risk=lose-ssh {{tailscale_options}}
data/bin/console CHANGED
@@ -6,9 +6,5 @@ require "subspace"
6
6
  # You can add fixtures and/or initialization code here to make experimenting
7
7
  # with your gem easier. You can also use a different console, if you like.
8
8
 
9
- # (If you use this, don't forget to add pry to your Gemfile!)
10
- # require "pry"
11
- # Pry.start
12
-
13
9
  require "irb"
14
10
  IRB.start
data/exe/subspace CHANGED
@@ -1,5 +1,4 @@
1
- #!/usr/bin/env ruby
2
-
1
+ #!/usr/bin/env ruby_executable_hooks
3
2
 
4
3
  require 'subspace/cli'
5
4
  Subspace::Cli.new.run
data/lib/subspace/cli.rb CHANGED
@@ -7,11 +7,14 @@ require 'subspace'
7
7
  require 'subspace/commands/base'
8
8
  require 'subspace/commands/bootstrap'
9
9
  require 'subspace/commands/configure'
10
+ require 'subspace/commands/exec'
10
11
  require 'subspace/commands/init'
12
+ require 'subspace/commands/inventory'
11
13
  require 'subspace/commands/override'
12
14
  require 'subspace/commands/provision'
13
15
  require 'subspace/commands/ssh'
14
- require 'subspace/commands/vars'
16
+ require 'subspace/commands/secrets'
17
+ require 'subspace/commands/terraform'
15
18
  require 'subspace/commands/maintain'
16
19
  require 'subspace/commands/maintenance_mode.rb'
17
20
 
@@ -30,11 +33,15 @@ class Subspace::Cli
30
33
  end
31
34
 
32
35
  command :init do |c|
33
- c.syntax = 'subspace init [vars]'
36
+ c.syntax = 'subspace init'
34
37
  c.summary = 'Run without options to initialize subspace.'
35
38
  c.description = 'Some initialization routines can be run indiviaully, useful for upgrading'
36
- c.example 'init a new project', 'subspace init'
37
- c.example 'create the new style application.yml vars template', 'subspace init vars'
39
+ c.example 'init a new project with one default environment (default staging)', 'subspace init'
40
+ c.example 'create a new fully automated production environment configuration', 'subspace init --terraform --env production'
41
+ c.option '--ansible', 'initialize ansible for managing individual servers'
42
+ c.option '--terraform', 'Initialize terraform for managing infrastructure'
43
+ c.option '--env STRING', 'Initialize configuration for a new environment'
44
+ c.option '--template [staging|production]', 'Use non-default template for this environment (default=staging)'
38
45
  c.when_called Subspace::Commands::Init
39
46
  end
40
47
 
@@ -47,9 +54,6 @@ class Subspace::Cli
47
54
  c.option '--password', "Ask for a password instead of using ssh keys"
48
55
  c.option '--yum', "Use yum instead of apt to install python"
49
56
  c.option "-i", "--private-key PRIVATE-KEY", "Alias for private-key"
50
- Subspace::Commands::Bootstrap::PASS_THROUGH_PARAMS.each do |param_name|
51
- c.option "--#{param_name} #{param_name.upcase}", "Passed directly through to ansible-playbook command"
52
- end
53
57
  c.when_called Subspace::Commands::Bootstrap
54
58
  end
55
59
 
@@ -64,17 +68,34 @@ class Subspace::Cli
64
68
  c.when_called Subspace::Commands::Provision
65
69
  end
66
70
 
71
+ command :tf do |c|
72
+ c.syntax = 'subspace tf [environment]'
73
+ c.summary = "Execute a terraform plan with the option to apply the plan after review"
74
+ c.when_called Subspace::Commands::Terraform
75
+ end
76
+
67
77
  command :ssh do |c|
68
78
  c.syntax = 'subspace ssh [options]'
69
79
  c.summary = 'ssh to the remote server as the administrative user'
70
80
  c.description = ''
71
- c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_ssh_user"
81
+ c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_user"
72
82
  Subspace::Commands::Ssh::PASS_THROUGH_PARAMS.each do |param_name|
73
83
  c.option "-#{param_name} #{param_name.upcase}", "Passed directly through to ssh command"
74
84
  end
75
85
  c.when_called Subspace::Commands::Ssh
76
86
  end
77
87
 
88
+ command :exec do |c|
89
+ c.syntax = 'subspace exec <host-spec> "<statement>" [options]'
90
+ c.summary = 'execute <statement> on all hosts matching <host-spec>'
91
+ c.description = ''
92
+ c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_user"
93
+ Subspace::Commands::Exec::PASS_THROUGH_PARAMS.each do |param_name|
94
+ c.option "-#{param_name} #{param_name.upcase}", "Passed directly through to ssh command"
95
+ end
96
+ c.when_called Subspace::Commands::Exec
97
+ end
98
+
78
99
  command :configure do |c, args|
79
100
  c.syntax = 'subspace configure'
80
101
  c.summary = "Regenerate all of the ansible configuration files. You don't normally need to run this."
@@ -89,15 +110,17 @@ class Subspace::Cli
89
110
  c.when_called Subspace::Commands::Override
90
111
  end
91
112
 
92
- command :vars do |c, args|
93
- c.syntax = 'subspace vars [environment]'
113
+ command :secrets do |c, args|
114
+ c.syntax = 'subspace secrets [environment]'
94
115
  c.summary = 'View or edit the encrypted variables for an environment'
95
- c.description = """By default, this will simply show the variables for a specific environemnt.
96
- You can also edit variables, and we expect the functionality here to grow in the future.
97
- Running `subspace vars development --create` is usually a great way to bootstrap a new development environment."""
116
+ c.description = <<~EOS
117
+ By default, this will simply show the variables for a specific environemnt.
118
+ You can also edit variables, and we expect the functionality here to grow in the future.
119
+ Running `subspace secrets development --create` is usually a great way to bootstrap a new development environment.
120
+ EOS
98
121
  c.option '--edit', "Edit the variables instead of view"
99
122
  c.option '--create', "Create config/application.yml with the variables from the specified environment"
100
- c.when_called Subspace::Commands::Vars
123
+ c.when_called Subspace::Commands::Secrets
101
124
  end
102
125
 
103
126
  command :maintain do |c, args|
@@ -124,6 +147,20 @@ class Subspace::Cli
124
147
  c.when_called Subspace::Commands::MaintenanceMode
125
148
  end
126
149
 
150
+ command :inventory do |c, args|
151
+ c.syntax = 'subspace inventory <command>'
152
+ c.summary = 'Manage, manipulate, and other useful inventory-related functions'
153
+ c.description = <<~EOS
154
+ Available inventory commands:
155
+
156
+ capistrano - generate config/deploy/[env].rb. Requires the --env option.
157
+ list - list the current inventory as understood by subspace.
158
+ keyscan - Update ~/.known_hosts with new host key fingerprints
159
+ EOS
160
+ c.option "--env ENVIRONMENT", "Optional: Limit function to a specific environment (aka group)"
161
+ c.when_called Subspace::Commands::Inventory
162
+ end
163
+
127
164
  run!
128
165
  end
129
166
  end