taperole 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +5 -0
- data/.tape/ansible.cfg +4 -0
- data/CONTRIBUTING.md +22 -0
- data/README.md +100 -0
- data/Vagrantfile +26 -0
- data/ansible.cfg +2 -0
- data/bin/tape +81 -0
- data/id_rsa_sb_basebox +27 -0
- data/lib/tape/ansible_runner.rb +84 -0
- data/lib/tape/installer.rb +154 -0
- data/lib/tape/qemu_provisioner.rb +167 -0
- data/lib/tape/vagrant_provisioner.rb +42 -0
- data/lib/tape.rb +76 -0
- data/requirements.yml +16 -0
- data/roles/after_deploy/tasks/main.yml +1 -0
- data/roles/backend_checkout/tasks/main.yml +21 -0
- data/roles/backend_config/defaults/main.yml +1 -0
- data/roles/backend_config/tasks/main.yml +49 -0
- data/roles/backend_config/templates/database.yml.j2 +8 -0
- data/roles/backend_config/templates/env_config.yml.j2 +2 -0
- data/roles/backend_install_essentials/meta/main.yml +5 -0
- data/roles/backend_install_essentials/tasks/main.yml +24 -0
- data/roles/backend_install_essentials/templates/memcached.j2 +7 -0
- data/roles/database_load/defaults/main.yml +3 -0
- data/roles/database_load/meta/main.yml +3 -0
- data/roles/database_load/tasks/db_reset.yml +14 -0
- data/roles/database_load/tasks/main.yml +21 -0
- data/roles/delayed_job/defaults/main.yml +2 -0
- data/roles/delayed_job/library/sudo_upstart +101 -0
- data/roles/delayed_job/tasks/main.yml +35 -0
- data/roles/delayed_job/templates/dj_runner_upstart.j2 +17 -0
- data/roles/deployer_user/files/id_rsa_digital_ocean.pub +1 -0
- data/roles/deployer_user/tasks/keys.yml +19 -0
- data/roles/deployer_user/tasks/main.yml +18 -0
- data/roles/frontend_deploy/handlers/main.yml +2 -0
- data/roles/frontend_deploy/tasks/main.yml +8 -0
- data/roles/general/meta/main.yml +3 -0
- data/roles/general/tasks/basic_packages.yml +3 -0
- data/roles/general/tasks/main.yml +6 -0
- data/roles/general/tasks/swapfile.yml +21 -0
- data/roles/monit_activate/tasks/main.yml +2 -0
- data/roles/monit_install/tasks/main.yml +19 -0
- data/roles/monit_install/templates/web_interface.j2 +2 -0
- data/roles/nginx/handlers/main.yml +2 -0
- data/roles/nginx/tasks/main.yml +30 -0
- data/roles/nginx/templates/nginx_monit.j2 +3 -0
- data/roles/nginx/templates/nginx_unicorn.j2 +55 -0
- data/roles/postgres/meta/main.yml +15 -0
- data/roles/redis/tasks/main.yml +15 -0
- data/roles/redis/templates/redis.j2 +10 -0
- data/roles/sidekiq/defaults/main.yml +2 -0
- data/roles/sidekiq/meta/main.yml +3 -0
- data/roles/sidekiq/tasks/main.yml +19 -0
- data/roles/sidekiq/templates/sidekiq.j2 +4 -0
- data/roles/unicorn_activate/defaults/main.yml +3 -0
- data/roles/unicorn_activate/tasks/main.yml +25 -0
- data/roles/unicorn_install/tasks/main.yml +24 -0
- data/roles/unicorn_install/templates/unicorn.rb.j2 +47 -0
- data/roles/unicorn_install/templates/unicorn_init.j2 +70 -0
- data/roles/unicorn_install/templates/unicorn_monit.j2 +5 -0
- data/taperole.gemspec +11 -0
- data/templates/base/deploy.example.yml +17 -0
- data/templates/base/hosts.example +7 -0
- data/templates/base/omnibox.example.yml +25 -0
- data/templates/base/tape_vars.example.yml +13 -0
- data/templates/static_html/deploy.example.yml +12 -0
- data/templates/static_html/omnibox.example.yml +15 -0
- data/templates/static_html/tape_vars.example.yml +7 -0
- data/vars/defaults.yml +31 -0
- metadata +117 -0
@@ -0,0 +1,42 @@
|
|
1
|
+
module TapeBoxer
|
2
|
+
class VagrantProvisioner < ExecutionModule
|
3
|
+
TapeBoxer.register_module :vagrant, self
|
4
|
+
|
5
|
+
action :create,
|
6
|
+
proc {create_box}
|
7
|
+
'Create a new vargant box with given name'
|
8
|
+
action :start,
|
9
|
+
proc {start_box},
|
10
|
+
'Starts the vagrant box with given name'
|
11
|
+
action :stop,
|
12
|
+
proc {stop_box},
|
13
|
+
'Stops the vagrant box with given name'
|
14
|
+
action :ssh,
|
15
|
+
proc {ssh_to_box},
|
16
|
+
'Stops the vagrant box with given name'
|
17
|
+
action :destroy,
|
18
|
+
proc {destroy_box},
|
19
|
+
'Stops the vagrant box with given name'
|
20
|
+
|
21
|
+
protected
|
22
|
+
def create_box
|
23
|
+
Kernel.exec "vagrant up"
|
24
|
+
end
|
25
|
+
|
26
|
+
def stop_box
|
27
|
+
Kernel.exec "vagrant halt #{opts[:name]}"
|
28
|
+
end
|
29
|
+
|
30
|
+
def start_box
|
31
|
+
Kernel.exec "vagrant up #{opts[:name]}"
|
32
|
+
end
|
33
|
+
|
34
|
+
def ssh_to_box
|
35
|
+
Kernel.exec "vagrant ssh #{opts[:name]}"
|
36
|
+
end
|
37
|
+
|
38
|
+
def destroy_box
|
39
|
+
Kernel.exec "vagrant destroy #{opts[:name]}"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
data/lib/tape.rb
ADDED
@@ -0,0 +1,76 @@
|
|
1
|
+
require 'erb'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
module TapeBoxer
|
5
|
+
class InvalidAction < StandardError; end
|
6
|
+
class ActionError < StandardError; end
|
7
|
+
class UnspecifiedOption < StandardError; end
|
8
|
+
|
9
|
+
class Action < Struct.new(:name, :proc, :description); end
|
10
|
+
class RegisteredModule < Struct.new(:name, :klass); end
|
11
|
+
|
12
|
+
def self.register_module(name, klass)
|
13
|
+
self.registered_modules[name] = RegisteredModule.new(name, klass)
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.registered_modules
|
17
|
+
@modules ||= Hash.new
|
18
|
+
end
|
19
|
+
|
20
|
+
class ExecutionModule
|
21
|
+
attr_reader :opts
|
22
|
+
def initialize(opts)
|
23
|
+
@opts = opts
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.actions
|
27
|
+
@actions
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.action(name, opts = '', doc = '')
|
31
|
+
@actions ||= Hash.new
|
32
|
+
@actions[name.to_sym] = Action.new(name, opts, doc)
|
33
|
+
end
|
34
|
+
|
35
|
+
# Set or return module_name
|
36
|
+
def self.module_name(name = nil)
|
37
|
+
@module_name = (name || @module_name)
|
38
|
+
end
|
39
|
+
|
40
|
+
def actions
|
41
|
+
self.class.actions
|
42
|
+
end
|
43
|
+
|
44
|
+
def execute_action(action)
|
45
|
+
action = action.to_sym
|
46
|
+
unless actions.include?(action)
|
47
|
+
raise InvalidAction, "#{action} is not a valid action!"
|
48
|
+
end
|
49
|
+
|
50
|
+
unless system("ansible-playbook --version >/dev/null")
|
51
|
+
raise InvalidAction, "ansible-playbook must be on your PATH to use this tool"
|
52
|
+
end
|
53
|
+
|
54
|
+
self.instance_eval &actions[action].proc
|
55
|
+
end
|
56
|
+
|
57
|
+
protected
|
58
|
+
|
59
|
+
def require_opt(name)
|
60
|
+
unless opts[name.to_sym]
|
61
|
+
raise UnspecifiedOption, "Option --#{name} must be specified to do this!"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
private
|
66
|
+
def tape_dir
|
67
|
+
File.realpath(File.join(__dir__, '../'))
|
68
|
+
end
|
69
|
+
|
70
|
+
def local_dir
|
71
|
+
Dir.pwd
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
Dir[File.dirname(__FILE__) + "/tape/*.rb"].each {|file| require file }
|
data/requirements.yml
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
---
|
2
|
+
- src: jnv.unattended-upgrades
|
3
|
+
version: v1.0.1
|
4
|
+
|
5
|
+
- src: zzet.rbenv
|
6
|
+
version: 2.0.0
|
7
|
+
|
8
|
+
- src: lxhunter.apt
|
9
|
+
|
10
|
+
- src: https://github.com/ANXS/postgresql
|
11
|
+
name: ANXS.postgresql
|
12
|
+
|
13
|
+
- src: leonelgalan.node
|
14
|
+
version: '0.2'
|
15
|
+
|
16
|
+
- src: bennojoy.memcached
|
@@ -0,0 +1 @@
|
|
1
|
+
- include: '{{local_dir}}/roles/after_deploy.yml'
|
@@ -0,0 +1,21 @@
|
|
1
|
+
- name: Check out application
|
2
|
+
remote_user: "{{ deployer_user.name }}"
|
3
|
+
git: dest={{ be_app_path }}
|
4
|
+
repo={{ be_app_repo }}
|
5
|
+
version={{ be_app_branch }}
|
6
|
+
accept_hostkey=true
|
7
|
+
register: app_checkout
|
8
|
+
tags: [be_deploy]
|
9
|
+
|
10
|
+
- name: check that secrets is ignored
|
11
|
+
shell: cat {{ be_app_path }}/.gitignore | grep {{ item }}
|
12
|
+
with_items:
|
13
|
+
- config/secrets.yml
|
14
|
+
register: secrets_ignore_check
|
15
|
+
ignore_errors: true
|
16
|
+
tags: [be_deploy]
|
17
|
+
|
18
|
+
- name: ignore secrets
|
19
|
+
shell: /bin/bash -c 'echo "config/secrets.yml" > {{ be_app_path }}/.git/info/exclude'
|
20
|
+
when: secrets_ignore_check|failed
|
21
|
+
tags: [be_deploy]
|
@@ -0,0 +1 @@
|
|
1
|
+
force_bundle: false
|
@@ -0,0 +1,49 @@
|
|
1
|
+
- name: Set up db config
|
2
|
+
template: src=database.yml.j2 dest={{be_app_path}}/config/database.yml
|
3
|
+
|
4
|
+
# zzet.rbenv puts all the rbenv stuff in profile for some reason
|
5
|
+
# so we gotta use login shells to do this stuff
|
6
|
+
- name: Install bundle gems
|
7
|
+
remote_user: "{{ deployer_user.name }}"
|
8
|
+
command: chdir={{ be_app_path }}
|
9
|
+
bash -lc "RAILS_ENV={{be_app_env}} bundle install
|
10
|
+
--without test development contests_development --deployment"
|
11
|
+
tags: [be_deploy, bundle]
|
12
|
+
|
13
|
+
- name: Ensure env_config.yml file present
|
14
|
+
stat: path={{ be_app_path }}/config/env_config.yml
|
15
|
+
register: env_config_file
|
16
|
+
tags: [be_deploy]
|
17
|
+
|
18
|
+
- name: Ask for env_config.yml
|
19
|
+
debug: msg="You've got to upload env_config.yml to {{be_app_path}}/config to continue"
|
20
|
+
when: env_config_file.stat.exists != true
|
21
|
+
tags: [be_deploy]
|
22
|
+
|
23
|
+
- name: Wait one day for env_config.yml to get put on the server
|
24
|
+
wait_for: path={{be_app_path}}/config/env_config.yml state=present timeout=86400
|
25
|
+
when: env_config_file.stat.exists != true
|
26
|
+
tags: [be_deploy]
|
27
|
+
|
28
|
+
- name: Ensure secrets.yml file present
|
29
|
+
stat: path={{ be_app_path }}/config/secrets.yml
|
30
|
+
register: secrets_file
|
31
|
+
tags: [be_deploy]
|
32
|
+
|
33
|
+
- name: Ask for secrets.yml
|
34
|
+
debug: msg="You've got to upload secrets.yml to {{be_app_path}}/config to continue"
|
35
|
+
when: secrets_file.stat.exists != true
|
36
|
+
tags: [be_deploy]
|
37
|
+
|
38
|
+
- name: Wait one day for secrets.yml to get put on the server
|
39
|
+
wait_for: path={{be_app_path}}/config/secrets.yml state=present timeout=86400
|
40
|
+
when: secrets_file.stat.exists != true
|
41
|
+
|
42
|
+
- name: Precompile Assets
|
43
|
+
remote_user: "{{ deployer_user.name }}"
|
44
|
+
command: chdir={{ be_app_path }}
|
45
|
+
bash -lc "bundle exec rake assets:precompile RAILS_ENV={{be_app_env}}"
|
46
|
+
tags: [precompile_assets,be_deploy]
|
47
|
+
when: precompile_assets
|
48
|
+
|
49
|
+
tags: [be_deploy]
|
@@ -0,0 +1,24 @@
|
|
1
|
+
- name: Install imagemagick
|
2
|
+
apt: name={{ item }} state=present
|
3
|
+
with_items:
|
4
|
+
- imagemagick
|
5
|
+
- libmagickcore-dev
|
6
|
+
- libmagickwand-dev
|
7
|
+
|
8
|
+
- name: Register monit memcached config files
|
9
|
+
template: src=memcached.j2
|
10
|
+
dest=/etc/monit/conf.d/memcached
|
11
|
+
mode=u=rw,g=r,o=r
|
12
|
+
register: memcached_monit_config
|
13
|
+
|
14
|
+
- name: Reload Monit
|
15
|
+
command: bash -lc "monit reload"
|
16
|
+
when: memcached_monit_config.changed
|
17
|
+
|
18
|
+
# zzet.rbenv puts all the rbenv stuff in profile for some reason
|
19
|
+
# so we gotta use login shells to do this stuff
|
20
|
+
- name: Install bundler
|
21
|
+
shell: bash -lc "gem install bundler"
|
22
|
+
|
23
|
+
- name: Rbenv rehash
|
24
|
+
shell: bash -lc "rbenv rehash"
|
@@ -0,0 +1,7 @@
|
|
1
|
+
check process memcached with pidfile /var/run/memcached.pid
|
2
|
+
start program = "/etc/init.d/memcached start"
|
3
|
+
stop program = "/etc/init.d/memcached stop"
|
4
|
+
if failed host 127.0.0.1 port 11211 then restart
|
5
|
+
if cpu > 70% for 2 cycles then alert
|
6
|
+
if cpu > 98% for 5 cycles then restart
|
7
|
+
if 2 restarts within 3 cycles then timeout
|
@@ -0,0 +1,14 @@
|
|
1
|
+
- name: stop unicorns
|
2
|
+
service: name=unicorn_{{app_name}} state=stopped
|
3
|
+
|
4
|
+
- name: Reset DB
|
5
|
+
command: chdir={{ be_app_path }}
|
6
|
+
bash -lc "bundle exec rake db:drop db:create RAILS_ENV={{be_app_env}}"
|
7
|
+
register: db_reset
|
8
|
+
|
9
|
+
- name: DB Reset Failed
|
10
|
+
fail: msg="{{db_reset.stderr}}"
|
11
|
+
when: db_reset.stderr
|
12
|
+
|
13
|
+
- name: start unicorns
|
14
|
+
service: name=unicorn_{{app_name}} state=started
|
@@ -0,0 +1,21 @@
|
|
1
|
+
- include: db_reset.yml
|
2
|
+
when: force_db_reset is defined and force_db_reset
|
3
|
+
tags: [db_reset]
|
4
|
+
|
5
|
+
- name: Migrate DB
|
6
|
+
remote_user: "{{ deployer_user.name }}"
|
7
|
+
command: chdir={{ be_app_path }}
|
8
|
+
bash -lc "bundle exec rake db:migrate RAILS_ENV={{be_app_env}}"
|
9
|
+
tags: [be_deploy,migrate,db_reset]
|
10
|
+
when: (app_checkout is defined and app_checkout.changed)
|
11
|
+
or rake.force_migrate
|
12
|
+
or (db_reset is defined and db_reset.changed)
|
13
|
+
|
14
|
+
- name: Seed DB
|
15
|
+
remote_user: "{{ deployer_user.name }}"
|
16
|
+
command: chdir={{ be_app_path }}
|
17
|
+
bash -lc "bundle exec rake db:seed RAILS_ENV={{be_app_env}}"
|
18
|
+
tags: [be_deploy,seed,db_reset]
|
19
|
+
when: (app_checkout is defined and app_checkout.changed)
|
20
|
+
or rake.force_seed
|
21
|
+
or (db_reset is defined and db_reset.changed)
|
@@ -0,0 +1,101 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
|
3
|
+
# This ansible module runs upstart tasks with sudo.
|
4
|
+
# This is necessary when specific jobs are allowed with
|
5
|
+
# the sudoers file, as using the ansible service module will
|
6
|
+
# attempt to execute the entire service module as root instead
|
7
|
+
# of just the /sbin/{start,stop,...} jobs
|
8
|
+
#
|
9
|
+
# This module also wraps restart and reload, ensuring the service
|
10
|
+
# is started
|
11
|
+
|
12
|
+
# Ansible provides its module opts in a file that needs to be sourced
|
13
|
+
# to get the env we run in
|
14
|
+
. $1
|
15
|
+
|
16
|
+
# Avoid stalling on password prompts
|
17
|
+
sudocmd="echo '' | sudo -S"
|
18
|
+
|
19
|
+
# holds the result of our operations for reporting state changes
|
20
|
+
res=none
|
21
|
+
|
22
|
+
sudoex() { eval "$sudocmd $@ >/dev/null";}
|
23
|
+
|
24
|
+
# Failure JSON that ansible expects
|
25
|
+
fail() {
|
26
|
+
echo '{"failed": true, "msg": "' $1 '", "status": "'`us_status $name`'"}'
|
27
|
+
exit 1
|
28
|
+
}
|
29
|
+
|
30
|
+
# Make sure certain vars are set when sourcing $1
|
31
|
+
require_opt() {
|
32
|
+
eval val=\$$1
|
33
|
+
if [ ! -n "$val" ]; then
|
34
|
+
fail "Opt: $1 is required!"
|
35
|
+
fi
|
36
|
+
}
|
37
|
+
|
38
|
+
# All we need for now is name and state
|
39
|
+
require_opt name
|
40
|
+
require_opt state
|
41
|
+
|
42
|
+
if ! status $name; then
|
43
|
+
fail "Invalid service: $name"
|
44
|
+
fi
|
45
|
+
|
46
|
+
reload() {
|
47
|
+
if sudoex /sbin/reload $name; then
|
48
|
+
res=reloaded
|
49
|
+
else
|
50
|
+
sudoex /sbin/start $name || fail "Could not reload or start! $name"
|
51
|
+
res=started
|
52
|
+
fi
|
53
|
+
}
|
54
|
+
|
55
|
+
restart() {
|
56
|
+
if sudoex /sbin/restart $name; then
|
57
|
+
res=restarted
|
58
|
+
else
|
59
|
+
sudoex /sbin/start $name || fail "Could not restart or start! $name"
|
60
|
+
res=started
|
61
|
+
fi
|
62
|
+
}
|
63
|
+
|
64
|
+
stopit() {
|
65
|
+
if [ "`us_status $name`" = "stop/waiting" ]; then
|
66
|
+
res=none
|
67
|
+
else
|
68
|
+
sudoex /sbin/stop $name || fail "Could not stop $name"
|
69
|
+
res=stopped
|
70
|
+
fi
|
71
|
+
}
|
72
|
+
|
73
|
+
start() {
|
74
|
+
if [ "`us_status $name`" = "start/running" ]; then
|
75
|
+
res=none
|
76
|
+
else
|
77
|
+
sudoex /sbin/start $name || fail "Could not start $name"
|
78
|
+
res=started
|
79
|
+
fi
|
80
|
+
}
|
81
|
+
|
82
|
+
# Return status string for the given service
|
83
|
+
us_status() {
|
84
|
+
status $1 | cut -f 2 -d ' ' | tr -d ','
|
85
|
+
}
|
86
|
+
|
87
|
+
res=''
|
88
|
+
case $state in
|
89
|
+
reloaded) reload;;
|
90
|
+
restarted) restart;;
|
91
|
+
stopped) stopit;;
|
92
|
+
started) start;;
|
93
|
+
*) fail "Invalid state: $state";;
|
94
|
+
esac
|
95
|
+
|
96
|
+
# Were there changes applied?
|
97
|
+
if [ "$res" = none ]; then
|
98
|
+
echo '{"changed": false}'
|
99
|
+
else
|
100
|
+
echo '{"change": "'$res'", "changed": true}'
|
101
|
+
fi
|
@@ -0,0 +1,35 @@
|
|
1
|
+
- name: Install upstart job
|
2
|
+
template: src=dj_runner_upstart.j2
|
3
|
+
dest=/etc/init/dj_runner_{{app_name}}.conf
|
4
|
+
tags: [configure_dj_runner]
|
5
|
+
register: dj_runner_installation
|
6
|
+
when: enabled_delayed_job
|
7
|
+
|
8
|
+
- name: register unicorn upstart script
|
9
|
+
command: initctl reload-configuration
|
10
|
+
when: dj_runner_installation.changed and
|
11
|
+
enabled_delayed_job
|
12
|
+
tags: [configure_dj_runner]
|
13
|
+
|
14
|
+
- name: Give deployer user access to DJ upstart jobs
|
15
|
+
lineinfile: 'dest=/etc/sudoers
|
16
|
+
state=present
|
17
|
+
line="{{ deployer_user.name }} ALL = (root) NOPASSWD: /sbin/{{item}} dj_runner_{{app_name}}"'
|
18
|
+
with_items:
|
19
|
+
- start
|
20
|
+
- stop
|
21
|
+
- restart
|
22
|
+
- status
|
23
|
+
- reload
|
24
|
+
tags: [configure_dj_runner]
|
25
|
+
when: enabled_delayed_job
|
26
|
+
|
27
|
+
- name: restart delayed job runner
|
28
|
+
sudo_upstart: name=dj_runner_{{app_name}} state=restarted
|
29
|
+
remote_user: "{{ deployer_user.name }}"
|
30
|
+
when: force_dj_runner_restart or
|
31
|
+
(dj_runner_installation is defined and dj_runner_installation.changed) or
|
32
|
+
force_db_reset or
|
33
|
+
(app_checkout is defined and app_checkout.changed) and
|
34
|
+
enabled_delayed_job
|
35
|
+
tags: [configure_dj_runner, be_deploy, reset_db]
|
@@ -0,0 +1,17 @@
|
|
1
|
+
description "{{app_name}} delayed job runner daemon"
|
2
|
+
|
3
|
+
start on virtual-filesystems
|
4
|
+
stop on runlevel [06]
|
5
|
+
|
6
|
+
env PATH={{ rbenv_root }}/shims:{{ rbenv_root }}/bin:/usr/bin:/sbin:/bin
|
7
|
+
env RAILS_ENV={{be_app_env}}
|
8
|
+
setuid {{ deployer_user.name }}
|
9
|
+
setgid {{ deployer_user.name }}
|
10
|
+
|
11
|
+
chdir {{be_app_path}}
|
12
|
+
|
13
|
+
respawn limit 5 5
|
14
|
+
|
15
|
+
kill timeout 5
|
16
|
+
|
17
|
+
exec bundle exec bin/delayed_job run
|
@@ -0,0 +1 @@
|
|
1
|
+
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlxLbugDNrEg0fjchsaBG6XYLTOh5u3miY8gYMR0xQXFGsxipsyzYd2HSdSp1SPJyGs6aIXVfbeMsQVvDbWQcJTZYYviO2Rj6olf13gjA094CAlDCyTVgRYddiTrirFZiMCzLJrXfyGKiQcQ50BhpYYcO8QwPkwDo6Fs6AhuVMxlYc7MqHTxUwiuVsiC3xbgVnGszB8fI3v0531KOl7tJAxI1M53uexH3rQrEdpRwNqQAXoH9a8HQTaxvtSip1HrNvUumStt1Pu8tP6b3KwuHPwTnJtc2fXYatLjfbAf9KSCCPFhLfwVdAGTI/De6GU4D9lh39sFt2E63qV7mnSn+f
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# - name: Ensure key files exist
|
2
|
+
# fail: msg="Key file {{local_dir + '/' + item}} is empty!"
|
3
|
+
# when: lookup('file', local_dir + '/' + item)|default(None) == None
|
4
|
+
# with_items: dev_key_files
|
5
|
+
# tags: [deployer]
|
6
|
+
|
7
|
+
- name: Ensure devs keys are present
|
8
|
+
authorized_key: key="{{ lookup('file', local_dir + '/' + item) }}"
|
9
|
+
manage_dir=yes
|
10
|
+
state=present
|
11
|
+
user=deployer
|
12
|
+
with_items: dev_key_files
|
13
|
+
|
14
|
+
- name: Ensure DO pubkey is present
|
15
|
+
authorized_key: key="{{ lookup('file', 'id_rsa_digital_ocean.pub') }}"
|
16
|
+
manage_dir=yes
|
17
|
+
state=present
|
18
|
+
user=deployer
|
19
|
+
# tags: [deployer]
|
@@ -0,0 +1,18 @@
|
|
1
|
+
- name: Create deployer groups
|
2
|
+
group: name={{ item }} state=present
|
3
|
+
with_items: deployer_user.groups
|
4
|
+
|
5
|
+
- name: Ensure deployer user is present
|
6
|
+
user: name={{ deployer_user.name }} state=present append=yes shell=/bin/bash
|
7
|
+
|
8
|
+
- name: Ensure deployer user is in its groups
|
9
|
+
user: name={{ deployer_user.name }} groups={{ item }} state=present append=yes shell=/bin/bash
|
10
|
+
with_items: deployer_user.groups
|
11
|
+
|
12
|
+
# It's possible for the deployer's homedir to get created on accident by
|
13
|
+
# a deploy script or something getting run before this. This just ensures
|
14
|
+
# the env is sane moving forward
|
15
|
+
- name: Ensure deployer user owns his own homedir
|
16
|
+
file: path=/home/deployer state=directory owner=deployer
|
17
|
+
|
18
|
+
- include: keys.yml
|
@@ -0,0 +1,21 @@
|
|
1
|
+
- name: Make zerod swap file
|
2
|
+
command: dd if=/dev/zero of={{ swap_file.path }} bs=1024 count={{swap_file.size_kb}}
|
3
|
+
creates={{ swap_file.path }}
|
4
|
+
- name: Set swap permissions
|
5
|
+
file: path={{ swap_file.path }} owner=root group=root mode=0600
|
6
|
+
|
7
|
+
- name: Check swap type
|
8
|
+
command: file {{ swap_file.path }}
|
9
|
+
register: swapfile_info
|
10
|
+
changed_when: swapfile_info.stdout.find('swap file') == -1
|
11
|
+
|
12
|
+
- name: mkswap on swap file
|
13
|
+
command: mkswap {{ swap_file.path }}
|
14
|
+
when: swapfile_info.stdout.find('swap file') == -1
|
15
|
+
|
16
|
+
- name: Put the swap entry into fstab
|
17
|
+
mount: name=none src={{ swap_file.path }} fstype=swap opts=sw passno=0 dump=0 state=present
|
18
|
+
|
19
|
+
- name: Swapon swapfile
|
20
|
+
command: swapon {{ swap_file.path }}
|
21
|
+
when: ansible_swaptotal_mb < 1
|
@@ -0,0 +1,19 @@
|
|
1
|
+
- name: Install monit
|
2
|
+
apt: name=monit state=present
|
3
|
+
|
4
|
+
- name: Register monit config files
|
5
|
+
template: src={{ item }}.j2
|
6
|
+
dest=/etc/monit/conf.d/{{ item }}
|
7
|
+
mode=u=rw,g=r,o=r
|
8
|
+
with_items:
|
9
|
+
- web_interface
|
10
|
+
register: web_interface_monit_config
|
11
|
+
|
12
|
+
- name: Reload Monit
|
13
|
+
command: bash -lc "monit reload"
|
14
|
+
when: web_interface_monit_config.changed
|
15
|
+
|
16
|
+
- name: Give deployer user access to monit
|
17
|
+
lineinfile: 'dest=/etc/sudoers
|
18
|
+
state=present
|
19
|
+
line="{{ deployer_user.name }} ALL = (ALL) ALL, NOPASSWD: /usr/bin/monit"'
|
@@ -0,0 +1,30 @@
|
|
1
|
+
- name: Enable nginx PPA
|
2
|
+
apt_repository: repo=ppa:nginx/stable
|
3
|
+
tags: [nginx]
|
4
|
+
|
5
|
+
- name: Install nginx
|
6
|
+
apt: name=nginx state=present
|
7
|
+
tags: [nginx]
|
8
|
+
|
9
|
+
- name: Ditch default nginx site enabled
|
10
|
+
file: path=/etc/nginx/sites-enabled/default state=absent
|
11
|
+
tags: [nginx]
|
12
|
+
|
13
|
+
- name: Configure App nginx
|
14
|
+
template: src=nginx_unicorn.j2 dest=/etc/nginx/sites-enabled/{{ app_name }}
|
15
|
+
notify: restart nginx
|
16
|
+
tags: [nginx]
|
17
|
+
|
18
|
+
- name: Install monit nginx config
|
19
|
+
template: src=nginx_monit.j2
|
20
|
+
dest=/etc/monit/conf.d/nginx
|
21
|
+
mode=u=rw,g=r,o=r
|
22
|
+
register: nginx_monit_config
|
23
|
+
|
24
|
+
- name: Reload Monit
|
25
|
+
command: bash -lc "monit reload"
|
26
|
+
when: nginx_monit_config.changed
|
27
|
+
|
28
|
+
- name: Restart nginx
|
29
|
+
command: bash -lc "sudo monit restart nginx"
|
30
|
+
tags: [restart_nginx]
|