subspace 2.5.10 → 3.0.0.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/.ruby-version +1 -1
  3. data/CHANGELOG.md +12 -5
  4. data/README.md +57 -24
  5. data/UPGRADING.md +10 -0
  6. data/ansible/roles/common/defaults/main.yml +0 -1
  7. data/ansible/roles/common/files/sudoers-service +1 -1
  8. data/ansible/roles/common/tasks/main.yml +18 -7
  9. data/ansible/roles/common/tasks/no_swap.yml +26 -0
  10. data/ansible/roles/common/templates/motd +1 -1
  11. data/ansible/roles/common/templates/motd2 +1 -1
  12. data/ansible/roles/delayed_job/tasks/main.yml +1 -1
  13. data/ansible/roles/memcache/defaults/main.yml +2 -0
  14. data/ansible/roles/memcache/tasks/main.yml +16 -1
  15. data/ansible/roles/newrelic-infra/tasks/main.yml +3 -3
  16. data/ansible/roles/nginx/tasks/main.yml +12 -3
  17. data/ansible/roles/puma/tasks/main.yml +32 -20
  18. data/ansible/roles/puma/templates/puma-systemd.service +36 -0
  19. data/ansible/roles/puma/templates/puma-systemd.socket +14 -0
  20. data/ansible/roles/puma/templates/puma.rb +4 -2
  21. data/ansible/roles/rails/defaults/main.yml +0 -7
  22. data/ansible/roles/redis/tasks/main.yml +7 -0
  23. data/ansible/roles/resque/tasks/main.yml +11 -12
  24. data/ansible/roles/resque/templates/resque-systemd.service +10 -3
  25. data/ansible/roles/ruby-common/README.md +1 -1
  26. data/ansible/roles/ruby-common/tasks/main.yml +2 -17
  27. data/ansible/roles/sidekiq/defaults/main.yml +1 -1
  28. data/ansible/roles/sidekiq/tasks/main.yml +11 -15
  29. data/ansible/roles/sidekiq/templates/sidekiq-monit-rc +1 -1
  30. data/ansible/roles/sidekiq/templates/sidekiq-systemd.service +62 -0
  31. data/ansible/roles/tailscale/defaults/main.yml +2 -0
  32. data/ansible/roles/tailscale/tasks/main.yml +22 -0
  33. data/exe/subspace +1 -2
  34. data/lib/subspace/cli.rb +50 -14
  35. data/lib/subspace/commands/ansible.rb +11 -2
  36. data/lib/subspace/commands/base.rb +20 -5
  37. data/lib/subspace/commands/bootstrap.rb +16 -21
  38. data/lib/subspace/commands/configure.rb +2 -2
  39. data/lib/subspace/commands/exec.rb +20 -0
  40. data/lib/subspace/commands/init.rb +94 -45
  41. data/lib/subspace/commands/inventory.rb +45 -0
  42. data/lib/subspace/commands/maintain.rb +1 -1
  43. data/lib/subspace/commands/provision.rb +1 -3
  44. data/lib/subspace/commands/{vars.rb → secrets.rb} +6 -5
  45. data/lib/subspace/commands/ssh.rb +10 -8
  46. data/lib/subspace/commands/terraform.rb +83 -0
  47. data/lib/subspace/inventory.rb +144 -0
  48. data/lib/subspace/version.rb +1 -1
  49. data/subspace.gemspec +8 -2
  50. data/template/{provision → subspace}/.gitignore +3 -0
  51. data/template/{provision → subspace}/ansible.cfg.erb +2 -2
  52. data/template/subspace/group_vars/all.erb +28 -0
  53. data/template/subspace/group_vars/template.erb +26 -0
  54. data/template/subspace/inventory.yml.erb +11 -0
  55. data/template/{provision → subspace}/playbook.yml.erb +2 -5
  56. data/template/subspace/templates/authorized_keys.erb +1 -0
  57. data/template/subspace/terraform/.gitignore +2 -0
  58. data/template/subspace/terraform/template/main-oxenwagen.tf.erb +116 -0
  59. data/template/subspace/terraform/template/main-workhorse.tf.erb +41 -0
  60. data/template/subspace/terraformrc.erb +9 -0
  61. data/terraform/modules/s3_backend/README +2 -0
  62. data/terraform/modules/s3_backend/dynamodb.tf +1 -0
  63. data/terraform/modules/s3_backend/iam_user.tf +38 -0
  64. data/terraform/modules/s3_backend/main.tf +39 -0
  65. data/terraform/modules/s3_backend/state_bucket.tf +14 -0
  66. metadata +42 -53
  67. data/ansible/roles/monit/files/monit-http.conf +0 -3
  68. data/ansible/roles/monit/files/sudoers-monit +0 -1
  69. data/ansible/roles/monit/handlers/main.yml +0 -14
  70. data/ansible/roles/monit/tasks/main.yml +0 -34
  71. data/ansible/roles/mtpereira.passenger/.bumpversion.cfg +0 -7
  72. data/ansible/roles/mtpereira.passenger/.gitignore +0 -2
  73. data/ansible/roles/mtpereira.passenger/LICENSE +0 -20
  74. data/ansible/roles/mtpereira.passenger/README.md +0 -31
  75. data/ansible/roles/mtpereira.passenger/defaults/main.yml +0 -5
  76. data/ansible/roles/mtpereira.passenger/handlers/main.yml +0 -8
  77. data/ansible/roles/mtpereira.passenger/meta/.galaxy_install_info +0 -1
  78. data/ansible/roles/mtpereira.passenger/meta/main.yml +0 -21
  79. data/ansible/roles/mtpereira.passenger/tasks/apt.yml +0 -13
  80. data/ansible/roles/mtpereira.passenger/tasks/main.yml +0 -8
  81. data/ansible/roles/mtpereira.passenger/tasks/pkg.yml +0 -35
  82. data/ansible/roles/mtpereira.passenger/tasks/service.yml +0 -8
  83. data/ansible/roles/passenger/files/sudoers-passenger +0 -1
  84. data/ansible/roles/passenger/meta/main.yml +0 -6
  85. data/ansible/roles/passenger/tasks/main.yml +0 -5
  86. data/ansible/roles/postgis/defaults/main.yml +0 -2
  87. data/ansible/roles/puma/defaults/main.yml +0 -5
  88. data/ansible/roles/puma/meta/main.yml +0 -5
  89. data/ansible/roles/sidekiq/meta/main.yml +0 -5
  90. data/template/provision/group_vars/all.erb +0 -17
  91. data/template/provision/group_vars/template.erb +0 -11
  92. data/template/provision/host_vars/template.erb +0 -4
  93. /data/template/{provision → subspace}/hosts.erb +0 -0
  94. /data/template/{provision/vars → subspace/secrets}/template.erb +0 -0
  95. /data/template/{provision → subspace}/templates/application.yml.template +0 -0
@@ -15,11 +15,13 @@ After=syslog.target network.target
15
15
  #
16
16
  # !!!! !!!! !!!!
17
17
  #
18
- Type=simple
18
+ Type=forking
19
19
 
20
20
  WorkingDirectory=/u/apps/{{project_name}}/current
21
21
 
22
- ExecStart="RAILS_ENV={{rails_env}} COUNT={{resque_concurrency}} QUEUES={{hostname}},{{ job_queues | join(',') }} BACKGROUND=yes PIDFILE=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid bundle exec rake resque:work"
22
+ ExecStart=/usr/local/bin/bundle exec rake resque:work
23
+ ExecStop=/bin/kill -s QUIT $MAINPID
24
+ PIDFile=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid
23
25
 
24
26
  # Uncomment this if you are going to use this as a system service
25
27
  # if using as a user service then leave commented out, or you will get an error trying to start the service
@@ -31,6 +33,11 @@ UMask=0002
31
33
  # Greatly reduce Ruby memory fragmentation and heap usage
32
34
  # https://www.mikeperham.com/2018/04/25/taming-rails-memory-bloat/
33
35
  Environment=MALLOC_ARENA_MAX=2
36
+ Environment=RAILS_ENV={{rails_env}}
37
+ Environment=COUNT=1
38
+ Environment=QUEUES={{ job_queues | join(',') }}
39
+ Environment=BACKGROUND=yes
40
+ Environment=PIDFILE=/u/apps/{{project_name}}/shared/tmp/pids/resque.pid
34
41
 
35
42
  # if we crash, restart
36
43
  RestartSec=1
@@ -44,4 +51,4 @@ StandardError=syslog
44
51
  SyslogIdentifier=resque
45
52
 
46
53
  [Install]
47
- WantedBy=multi-user.target
54
+ WantedBy=multi-user.target
@@ -23,7 +23,7 @@ Role Variables
23
23
 
24
24
  > ruby_version: This variable controls the version of Ruby that will be compiled and installed. It should correspond with the tarball filename excluding the ".tar.gz" extension (e.g. "ruby-1.9.3-p484").
25
25
 
26
- > ruby_checksum: The checksum of the gzipped tarball that will be downloaded and compiled. (prefix with algorithm, e.g. sha256:abcdef01234567890)
26
+ > ruby_checksum: The SHA256 checksum of the gzipped tarball that will be downloaded and compiled.
27
27
 
28
28
  > ruby_download_location: The URL that the tarball should be retrieved from. Using the ruby_version variable within this variable is a good practice (e.g. "http://cache.ruby-lang.org/pub/ruby/1.9/{{ ruby_version }}.tar.gz").
29
29
 
@@ -40,7 +40,7 @@
40
40
  - name: Download the Ruby source code
41
41
  get_url: url={{ ruby_download_location }}
42
42
  dest=/usr/local/src/
43
- checksum={{ ruby_checksum }}
43
+ sha256sum={{ ruby_checksum }}
44
44
  become: true
45
45
 
46
46
  - name: Generate the Ruby installation script
@@ -71,22 +71,7 @@
71
71
  command: "{{ ruby_location }}/bin/gem update --system"
72
72
  become: true
73
73
 
74
- - name: Remove old bundler bin
75
- file:
76
- path: "{{ ruby_location }}/bin/bundle"
77
- state: absent
78
- become: true
79
-
80
- - name: Uninstall Bundler
81
- gem:
82
- name: bundler
83
- state: absent
84
- user_install: no
85
- executable: "{{ ruby_location }}/bin/gem"
86
- become: true
87
- ignore_errors: yes
88
-
89
- - name: Install Bundler
74
+ - name: Install/update Bundler
90
75
  shell: "{{ ruby_location }}/bin/gem install bundler -v {{ bundler_version }}"
91
76
  become: true
92
77
 
@@ -1,2 +1,2 @@
1
1
  ---
2
- sidekiq_concurrency: 1
2
+ sidekiq_workers: 1
@@ -1,19 +1,15 @@
1
1
  ---
2
- - name: Install sidekiq monit script
3
- template:
4
- src: sidekiq-monit-rc
5
- dest: /etc/monit/conf-available/sidekiq_{{project_name}}_{{rails_env}}
2
+ - name: Install systemd sidekiq script
6
3
  become: true
4
+ template:
5
+ src: sidekiq-systemd.service
6
+ dest: /etc/systemd/system/sidekiq.service
7
7
 
8
- - name: Clean up old sidekiq monit scripts
9
- shell: rm -f /etc/monit/conf.d/sidekiq_*
10
-
11
- - name: Enable sidekiq monit script
12
- file:
13
- src: /etc/monit/conf-available/sidekiq_{{project_name}}_{{rails_env}}
14
- dest: /etc/monit/conf-enabled/sidekiq_{{project_name}}_{{rails_env}}
15
- state: link
16
- notify:
17
- - reload_monit
18
- - restart_monit
8
+ - name: Enable systemd sidekiq service
9
+ become: true
10
+ systemd:
11
+ name: sidekiq
12
+ enabled: yes
13
+ daemon_reload: true
19
14
 
15
+ # TODO Read the gemfile and make sure they have sidekiq 6????
@@ -1,4 +1,4 @@
1
1
  check process sidekiq
2
2
  with pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid
3
- start program = "/bin/su - deploy -c 'cd /u/apps/{{project_name}}/current && bundle exec sidekiq --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }} -c {{sidekiq_concurrency}} --pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid --environment {{rails_env}} --logfile /u/apps/{{project_name}}/shared/log/sidekiq.log --daemon'" with timeout 30 seconds
3
+ start program = "/bin/su - deploy -c 'cd /u/apps/{{project_name}}/current && bundle exec sidekiq --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }} -c {{sidekiq_workers}} --pidfile /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid --environment {{rails_env}} --logfile /u/apps/{{project_name}}/shared/log/sidekiq.log --daemon'" with timeout 30 seconds
4
4
  stop program = "/bin/su - deploy -c 'kill -s TERM `cat /u/apps/{{project_name}}/shared/tmp/pids/sidekiq.pid`'" with timeout 30 seconds
@@ -0,0 +1,62 @@
1
+ #
2
+ # This file tells systemd how to run Sidekiq as a 24/7 long-running daemon.
3
+ #
4
+ # Use `journalctl -u sidekiq -rn 100` to view the last 100 lines of log output.
5
+ #
6
+ [Unit]
7
+ Description=sidekiq
8
+ # start us only once the network and logging subsystems are available,
9
+ # consider adding redis-server.service if Redis is local and systemd-managed.
10
+ After=syslog.target network.target
11
+
12
+ # See these pages for lots of options:
13
+ #
14
+ # https://www.freedesktop.org/software/systemd/man/systemd.service.html
15
+ # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
16
+ #
17
+ # THOSE PAGES ARE CRITICAL FOR ANY LINUX DEVOPS WORK; read them multiple
18
+ # times! systemd is a critical tool for all developers to know and understand.
19
+ #
20
+ [Service]
21
+ #
22
+ # !!!! !!!! !!!!
23
+ #
24
+ # As of v6.0.6, Sidekiq automatically supports systemd's `Type=notify` and watchdog service
25
+ # monitoring. If you are using an earlier version of Sidekiq, change this to `Type=simple`
26
+ # and remove the `WatchdogSec` line.
27
+ #
28
+ # !!!! !!!! !!!!
29
+ #
30
+ Type=notify
31
+ # If your Sidekiq process locks up, systemd's watchdog will restart it within seconds.
32
+ WatchdogSec=10
33
+
34
+ WorkingDirectory=/u/apps/{{project_name}}/current
35
+ ExecStart=/usr/local/bin/bundle exec sidekiq -e {{rails_env}} --queue {{hostname}} {{ job_queues | map('regex_replace', '^(.*)$', '--queue \\1') | join(' ') }}
36
+
37
+ # Use `systemctl kill -s TSTP sidekiq` to quiet the Sidekiq process
38
+
39
+ # Uncomment this if you are going to use this as a system service
40
+ # if using as a user service then leave commented out, or you will get an error trying to start the service
41
+ # !!! Change this to your deploy user account if you are using this as a system service !!!
42
+ User=deploy
43
+ Group=deploy
44
+ UMask=0002
45
+
46
+ # Greatly reduce Ruby memory fragmentation and heap usage
47
+ # https://www.mikeperham.com/2018/04/25/taming-rails-memory-bloat/
48
+ Environment=MALLOC_ARENA_MAX=2
49
+
50
+ # if we crash, restart
51
+ RestartSec=1
52
+ Restart=on-failure
53
+
54
+ # output goes to /var/log/syslog (Ubuntu) or /var/log/messages (CentOS)
55
+ StandardOutput=syslog
56
+ StandardError=syslog
57
+
58
+ # This will default to "bundler" if we don't specify it
59
+ SyslogIdentifier=sidekiq
60
+
61
+ [Install]
62
+ WantedBy=multi-user.target
@@ -0,0 +1,2 @@
1
+ ---
2
+ tailscale_options: ""
@@ -0,0 +1,22 @@
1
+ ---
2
+ - name: "Add Tailscale apt key"
3
+ become: true
4
+ apt_key:
5
+ url: https://pkgs.tailscale.com/stable/ubuntu/{{ansible_distribution_release}}.gpg
6
+ state: present
7
+
8
+ - name: "Add Tailscale apt repos"
9
+ become: true
10
+ apt_repository:
11
+ repo: "deb https://pkgs.tailscale.com/stable/ubuntu {{ansible_distribution_release}} main"
12
+ state: present
13
+
14
+ - name: "Install tailscale from api"
15
+ apt:
16
+ name: tailscale
17
+ state: latest
18
+ update_cache: yes
19
+
20
+ - name: "Join the tailnet"
21
+ become: true
22
+ command: tailscale up --auth-key {{tailscale_auth_key}} {{tailscale_options}}
data/exe/subspace CHANGED
@@ -1,5 +1,4 @@
1
- #!/usr/bin/env ruby
2
-
1
+ #!/usr/bin/env ruby_executable_hooks
3
2
 
4
3
  require 'subspace/cli'
5
4
  Subspace::Cli.new.run
data/lib/subspace/cli.rb CHANGED
@@ -7,11 +7,14 @@ require 'subspace'
7
7
  require 'subspace/commands/base'
8
8
  require 'subspace/commands/bootstrap'
9
9
  require 'subspace/commands/configure'
10
+ require 'subspace/commands/exec'
10
11
  require 'subspace/commands/init'
12
+ require 'subspace/commands/inventory'
11
13
  require 'subspace/commands/override'
12
14
  require 'subspace/commands/provision'
13
15
  require 'subspace/commands/ssh'
14
- require 'subspace/commands/vars'
16
+ require 'subspace/commands/secrets'
17
+ require 'subspace/commands/terraform'
15
18
  require 'subspace/commands/maintain'
16
19
  require 'subspace/commands/maintenance_mode.rb'
17
20
 
@@ -30,11 +33,15 @@ class Subspace::Cli
30
33
  end
31
34
 
32
35
  command :init do |c|
33
- c.syntax = 'subspace init [vars]'
36
+ c.syntax = 'subspace init'
34
37
  c.summary = 'Run without options to initialize subspace.'
35
38
  c.description = 'Some initialization routines can be run indiviaully, useful for upgrading'
36
- c.example 'init a new project', 'subspace init'
37
- c.example 'create the new style application.yml vars template', 'subspace init vars'
39
+ c.example 'init a new project with one default environment (default staging)', 'subspace init'
40
+ c.example 'create a new fully automated production environment configuration', 'subspace init --terraform --env production'
41
+ c.option '--ansible', 'initialize ansible for managing individual servers'
42
+ c.option '--terraform', 'Initialize terraform for managing infrastructure'
43
+ c.option '--env STRING', 'Initialize configuration for a new environment'
44
+ c.option '--template [staging|production]', 'Use non-default template for this environment (default=staging)'
38
45
  c.when_called Subspace::Commands::Init
39
46
  end
40
47
 
@@ -47,9 +54,6 @@ class Subspace::Cli
47
54
  c.option '--password', "Ask for a password instead of using ssh keys"
48
55
  c.option '--yum', "Use yum instead of apt to install python"
49
56
  c.option "-i", "--private-key PRIVATE-KEY", "Alias for private-key"
50
- Subspace::Commands::Bootstrap::PASS_THROUGH_PARAMS.each do |param_name|
51
- c.option "--#{param_name} #{param_name.upcase}", "Passed directly through to ansible-playbook command"
52
- end
53
57
  c.when_called Subspace::Commands::Bootstrap
54
58
  end
55
59
 
@@ -64,17 +68,34 @@ class Subspace::Cli
64
68
  c.when_called Subspace::Commands::Provision
65
69
  end
66
70
 
71
+ command :tf do |c|
72
+ c.syntax = 'subspace tf [environment]'
73
+ c.summary = "Execute a terraform plan with the option to apply the plan after review"
74
+ c.when_called Subspace::Commands::Terraform
75
+ end
76
+
67
77
  command :ssh do |c|
68
78
  c.syntax = 'subspace ssh [options]'
69
79
  c.summary = 'ssh to the remote server as the administrative user'
70
80
  c.description = ''
71
- c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_ssh_user"
81
+ c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_user"
72
82
  Subspace::Commands::Ssh::PASS_THROUGH_PARAMS.each do |param_name|
73
83
  c.option "-#{param_name} #{param_name.upcase}", "Passed directly through to ssh command"
74
84
  end
75
85
  c.when_called Subspace::Commands::Ssh
76
86
  end
77
87
 
88
+ command :exec do |c|
89
+ c.syntax = 'subspace exec <host-spec> "<statement>" [options]'
90
+ c.summary = 'execute <statement> on all hosts matching <host-spec>'
91
+ c.description = ''
92
+ c.option '--user USER', "Use a different user (eg deploy). Default is the ansible_user"
93
+ Subspace::Commands::Exec::PASS_THROUGH_PARAMS.each do |param_name|
94
+ c.option "-#{param_name} #{param_name.upcase}", "Passed directly through to ssh command"
95
+ end
96
+ c.when_called Subspace::Commands::Exec
97
+ end
98
+
78
99
  command :configure do |c, args|
79
100
  c.syntax = 'subspace configure'
80
101
  c.summary = "Regenerate all of the ansible configuration files. You don't normally need to run this."
@@ -89,15 +110,17 @@ class Subspace::Cli
89
110
  c.when_called Subspace::Commands::Override
90
111
  end
91
112
 
92
- command :vars do |c, args|
93
- c.syntax = 'subspace vars [environment]'
113
+ command :secrets do |c, args|
114
+ c.syntax = 'subspace secrets [environment]'
94
115
  c.summary = 'View or edit the encrypted variables for an environment'
95
- c.description = """By default, this will simply show the variables for a specific environemnt.
96
- You can also edit variables, and we expect the functionality here to grow in the future.
97
- Running `subspace vars development --create` is usually a great way to bootstrap a new development environment."""
116
+ c.description = <<~EOS
117
+ By default, this will simply show the variables for a specific environemnt.
118
+ You can also edit variables, and we expect the functionality here to grow in the future.
119
+ Running `subspace secrets development --create` is usually a great way to bootstrap a new development environment.
120
+ EOS
98
121
  c.option '--edit', "Edit the variables instead of view"
99
122
  c.option '--create', "Create config/application.yml with the variables from the specified environment"
100
- c.when_called Subspace::Commands::Vars
123
+ c.when_called Subspace::Commands::Secrets
101
124
  end
102
125
 
103
126
  command :maintain do |c, args|
@@ -124,6 +147,19 @@ class Subspace::Cli
124
147
  c.when_called Subspace::Commands::MaintenanceMode
125
148
  end
126
149
 
150
+ command :inventory do |c, args|
151
+ c.syntax = 'subspace inventory <command>'
152
+ c.summary = 'Manage, manipulate, and other useful inventory-related functions'
153
+ c.description = <<~EOS
154
+ Available inventory commands:
155
+
156
+ capistrano - generate config/deploy/[env].rb. Requires the --env option.
157
+ list - list the current inventory as understood by subspace.
158
+ EOS
159
+ c.option "--env ENVIRONMENT", "Optional: Limit function to a specific environment (aka group)"
160
+ c.when_called Subspace::Commands::Inventory
161
+ end
162
+
127
163
  run!
128
164
  end
129
165
  end
@@ -1,9 +1,16 @@
1
1
  module Subspace
2
2
  module Commands
3
3
  module Ansible
4
+ def ansible_playbook(*args)
5
+ args.push "--diff"
6
+ args.push "--private-key"
7
+ args.push "subspace.pem"
8
+ ansible_command("ansible-playbook", *args)
9
+ end
10
+
4
11
  def ansible_command(command, *args)
5
12
  update_ansible_cfg
6
- Dir.chdir "config/provision" do
13
+ Dir.chdir "config/subspace" do
7
14
  say ">> Running #{command} #{args.join(' ')}"
8
15
  system(command, *args, out: $stdout, err: $stderr)
9
16
  say "<< Done"
@@ -13,7 +20,9 @@ module Subspace
13
20
  private
14
21
 
15
22
  def update_ansible_cfg
16
- if !ENV["DISABLE_MITOGEN"] && `pip show mitogen 2>&1` =~ /^Location: (.*?)$/m
23
+ if ENV["DISABLE_MITOGEN"]
24
+ puts "Mitogen explicitly disabled. Skipping detection. "
25
+ elsif `pip show mitogen 2>&1` =~ /^Location: (.*?)$/m
17
26
  @mitogen_path = $1
18
27
  puts "🏎🚀🚅Mitogen found at #{@mitogen_path}. WARP 9!....ENGAGE!🚀"
19
28
  else
@@ -13,7 +13,7 @@ module Subspace
13
13
  end
14
14
 
15
15
  def template_dir
16
- File.join(gem_path, 'template', 'provision')
16
+ File.join(gem_path, 'template', 'subspace')
17
17
  end
18
18
 
19
19
  def gem_path
@@ -21,23 +21,34 @@ module Subspace
21
21
  end
22
22
 
23
23
  def project_path
24
+ unless File.exist?(File.join(Dir.pwd, "config", "subspace"))
25
+ say "Subspace must be run from the project root"
26
+ exit
27
+ end
24
28
  Dir.pwd # TODO make sure this is correct if they for whatever reason aren't running subspace from the project root??
25
29
  end
26
30
 
31
+ def project_name
32
+ File.basename(project_path) # TODO see above, this should probably be in a configuration somewhere
33
+ end
34
+
27
35
  def dest_dir
28
- "config/provision"
36
+ "config/subspace"
29
37
  end
30
38
 
31
39
  def template(src, dest = nil, render_binding = nil)
32
40
  return unless confirm_overwrite File.join(dest_dir, dest || src)
33
41
  template! src, dest, render_binding
34
- say "Wrote #{dest}"
42
+ say "Wrote #{dest || src}"
35
43
  end
36
44
 
37
45
  def template!(src, dest = nil, render_binding = nil)
38
46
  dest ||= src
39
- template = ERB.new File.read(File.join(template_dir, "#{src}.erb")), nil, '-'
40
- File.write File.join(dest_dir, dest), template.result(render_binding || binding)
47
+ template = ERB.new File.read(File.join(template_dir, "#{src}.erb")), trim_mode: '-'
48
+ result = template.result(render_binding || binding)
49
+
50
+
51
+ File.write File.join(dest_dir, dest), result
41
52
  end
42
53
 
43
54
  def copy(src, dest = nil)
@@ -74,6 +85,10 @@ module Subspace
74
85
  def set_subspace_version
75
86
  ENV['SUBSPACE_VERSION'] = Subspace::VERSION
76
87
  end
88
+
89
+ def inventory
90
+ @inventory ||= Subspace::Inventory.read("config/subspace/inventory.yml")
91
+ end
77
92
  end
78
93
  end
79
94
  end
@@ -1,5 +1,4 @@
1
1
  class Subspace::Commands::Bootstrap < Subspace::Commands::Base
2
- PASS_THROUGH_PARAMS = ["private-key"]
3
2
 
4
3
  def initialize(args, options)
5
4
  @host_spec = args.first
@@ -11,37 +10,33 @@ class Subspace::Commands::Bootstrap < Subspace::Commands::Base
11
10
 
12
11
  def run
13
12
  # ansible atlanta -m copy -a "src=/etc/hosts dest=/tmp/hosts"
14
- install_python
15
- ensure_ssh_dir
13
+ hosts = inventory.find_hosts!(@host_spec)
14
+ update_ansible_cfg
15
+ hosts.each do |host|
16
+ say "Bootstapping #{host.vars["hostname"]}..."
17
+ learn_host(host)
18
+ install_python(host)
19
+ end
16
20
  end
17
21
 
18
22
  private
19
23
 
20
- def ensure_ssh_dir
21
- cmd = ["ansible",
22
- @host_spec,
23
- "-m",
24
- "file",
25
- "-a",
26
- "path=/home/{{ansible_ssh_user}}/.ssh state=directory mode=0700",
27
- "-vvvv"
28
- ]
29
- cmd = cmd | pass_through_params
30
- bootstrap_command cmd
24
+ def learn_host(host)
25
+ system "ssh-keygen -R #{host.vars["ansible_host"]}"
26
+ system "ssh-keyscan -H #{host.vars["ansible_host"]} >> ~/.ssh/known_hosts"
31
27
  end
32
28
 
33
- def install_python
34
- update_ansible_cfg
29
+ def install_python(host)
35
30
  cmd = ["ansible",
36
- @host_spec,
31
+ host.name,
32
+ "--private-key",
33
+ "config/subspace/subspace.pem",
37
34
  "-m",
38
35
  "raw",
39
36
  "-a",
40
- "test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)",
41
- "--become",
42
- "-vvvv"
37
+ "test -e /usr/bin/python3 || (apt -y update && apt install -y python3)",
38
+ "--become"
43
39
  ]
44
- cmd = cmd | pass_through_params
45
40
  bootstrap_command cmd
46
41
  end
47
42
 
@@ -16,12 +16,12 @@ class Subspace::Commands::Configure < Subspace::Commands::Base
16
16
  private
17
17
 
18
18
  def update_host_configuration(host)
19
- say "Generating config/provisiong/host_vars/#{host}"
19
+ say "Generating config/subspace/host_vars/#{host}"
20
20
  template "host_vars/template", "host_vars/#{host}", Subspace.config.binding_for(host: host)
21
21
  end
22
22
 
23
23
  def update_group_configuration(group)
24
- say "Generating config/provisiong/group_vars/#{group}"
24
+ say "Generating config/subspace/group_vars/#{group}"
25
25
  template "group_vars/template", "group_vars/#{group}", Subspace.config.binding_for(group: group)
26
26
  end
27
27
  end
@@ -0,0 +1,20 @@
1
+ require 'yaml'
2
+ require 'subspace/inventory'
3
+ class Subspace::Commands::Exec < Subspace::Commands::Base
4
+ PASS_THROUGH_PARAMS = ["i"]
5
+
6
+ def initialize(args, options)
7
+ @host_spec = args[0]
8
+ @command = args[1]
9
+ @user = options.user
10
+ @options = options
11
+ run
12
+ end
13
+
14
+ def run
15
+ hosts = inventory.find_hosts!(@host_spec)
16
+
17
+ say "> Running `#{@command}` on #{hosts.join ','}"
18
+ ansible_command "ansible", @host_spec, "-m", "command", "-a", @command
19
+ end
20
+ end