slugforge 4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (72) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +316 -0
  3. data/bin/slugforge +9 -0
  4. data/lib/slugforge.rb +19 -0
  5. data/lib/slugforge/build.rb +4 -0
  6. data/lib/slugforge/build/build_project.rb +31 -0
  7. data/lib/slugforge/build/export_upstart.rb +85 -0
  8. data/lib/slugforge/build/package.rb +63 -0
  9. data/lib/slugforge/cli.rb +125 -0
  10. data/lib/slugforge/commands.rb +130 -0
  11. data/lib/slugforge/commands/build.rb +20 -0
  12. data/lib/slugforge/commands/config.rb +24 -0
  13. data/lib/slugforge/commands/deploy.rb +383 -0
  14. data/lib/slugforge/commands/project.rb +21 -0
  15. data/lib/slugforge/commands/tag.rb +148 -0
  16. data/lib/slugforge/commands/wrangler.rb +142 -0
  17. data/lib/slugforge/configuration.rb +125 -0
  18. data/lib/slugforge/helper.rb +186 -0
  19. data/lib/slugforge/helper/build.rb +46 -0
  20. data/lib/slugforge/helper/config.rb +37 -0
  21. data/lib/slugforge/helper/enumerable.rb +46 -0
  22. data/lib/slugforge/helper/fog.rb +90 -0
  23. data/lib/slugforge/helper/git.rb +89 -0
  24. data/lib/slugforge/helper/path.rb +76 -0
  25. data/lib/slugforge/helper/project.rb +86 -0
  26. data/lib/slugforge/models/host.rb +233 -0
  27. data/lib/slugforge/models/host/fog_host.rb +33 -0
  28. data/lib/slugforge/models/host/hostname_host.rb +9 -0
  29. data/lib/slugforge/models/host/ip_address_host.rb +9 -0
  30. data/lib/slugforge/models/host_group.rb +65 -0
  31. data/lib/slugforge/models/host_group/aws_tag_group.rb +22 -0
  32. data/lib/slugforge/models/host_group/ec2_instance_group.rb +21 -0
  33. data/lib/slugforge/models/host_group/hostname_group.rb +16 -0
  34. data/lib/slugforge/models/host_group/ip_address_group.rb +16 -0
  35. data/lib/slugforge/models/host_group/security_group_group.rb +20 -0
  36. data/lib/slugforge/models/logger.rb +36 -0
  37. data/lib/slugforge/models/tag_manager.rb +125 -0
  38. data/lib/slugforge/slugins.rb +125 -0
  39. data/lib/slugforge/version.rb +9 -0
  40. data/scripts/post-install.sh +143 -0
  41. data/scripts/unicorn-shepherd.sh +305 -0
  42. data/spec/fixtures/array.yaml +3 -0
  43. data/spec/fixtures/fog_credentials.yaml +4 -0
  44. data/spec/fixtures/invalid_syntax.yaml +1 -0
  45. data/spec/fixtures/one.yaml +3 -0
  46. data/spec/fixtures/two.yaml +3 -0
  47. data/spec/fixtures/valid.yaml +4 -0
  48. data/spec/slugforge/commands/deploy_spec.rb +72 -0
  49. data/spec/slugforge/commands_spec.rb +33 -0
  50. data/spec/slugforge/configuration_spec.rb +200 -0
  51. data/spec/slugforge/helper/fog_spec.rb +81 -0
  52. data/spec/slugforge/helper/git_spec.rb +152 -0
  53. data/spec/slugforge/models/host_group/aws_tag_group_spec.rb +54 -0
  54. data/spec/slugforge/models/host_group/ec2_instance_group_spec.rb +51 -0
  55. data/spec/slugforge/models/host_group/hostname_group_spec.rb +20 -0
  56. data/spec/slugforge/models/host_group/ip_address_group_spec.rb +54 -0
  57. data/spec/slugforge/models/host_group/security_group_group_spec.rb +52 -0
  58. data/spec/slugforge/models/tag_manager_spec.rb +75 -0
  59. data/spec/spec_helper.rb +37 -0
  60. data/spec/support/env.rb +3 -0
  61. data/spec/support/example_groups/configuration_writer.rb +24 -0
  62. data/spec/support/example_groups/helper_provider.rb +10 -0
  63. data/spec/support/factories.rb +18 -0
  64. data/spec/support/fog.rb +15 -0
  65. data/spec/support/helpers.rb +18 -0
  66. data/spec/support/mock_logger.rb +6 -0
  67. data/spec/support/ssh.rb +8 -0
  68. data/spec/support/streams.rb +13 -0
  69. data/templates/foreman/master.conf.erb +21 -0
  70. data/templates/foreman/process-master.conf.erb +2 -0
  71. data/templates/foreman/process.conf.erb +19 -0
  72. metadata +344 -0
@@ -0,0 +1,36 @@
1
+ module Slugforge
2
+ class Logger
3
+ def initialize(thor_shell, log_level = :info)
4
+ @thor_shell = thor_shell
5
+ @log_level = log_level
6
+ end
7
+
8
+ def log(message="", opts={})
9
+ return if @log_level != :verbose && opts[:log_level] == :verbose
10
+ if opts[:status]
11
+ say_status opts[:status], message, opts[:color]
12
+ else
13
+ if opts[:force_new_line]
14
+ say message, opts[:color], true
15
+ else
16
+ say message, opts[:color]
17
+ end
18
+ end
19
+ end
20
+
21
+ def say(message="", color=nil, force_new_line=(message.to_s !~ /( |\t)\z/))
22
+ return if [:quiet, :json].include?(@log_level)
23
+ @thor_shell.say message, color, force_new_line
24
+ end
25
+
26
+ def say_status(status, message, log_status=true)
27
+ return if [:quiet, :json].include?(@log_level)
28
+ @thor_shell.say_status status, message, log_status
29
+ end
30
+
31
+ def say_json(message)
32
+ return unless @log_level == :json
33
+ @thor_shell.say message.to_json
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,125 @@
1
+ module Slugforge
2
+ class TagManager
3
+ def initialize(opts)
4
+ bucket(opts)
5
+ end
6
+
7
+ def bucket(opts={})
8
+ if @bucket.nil? || (true == (opts[:refresh] || @bucket_dirty))
9
+ @s3 = opts[:s3] || @s3
10
+ @aws_bucket = opts[:bucket] || @aws_bucket
11
+ @bucket = @s3.directories.get(@aws_bucket)
12
+ @slugs_for_tag = {}
13
+ @tags = {}
14
+ @bucket_dirty = false
15
+ end
16
+ @bucket
17
+ end
18
+
19
+ def projects
20
+ return [] if bucket.files.nil?
21
+ result = {}
22
+ bucket.files.each do |file|
23
+ result[$~[1]] = true if (file.key =~ /^([^\/]+)\//)
24
+ end
25
+ result.keys
26
+ end
27
+
28
+ def tags(project_name)
29
+ @tags[project_name] ||= begin
30
+ return [] if bucket.files.nil?
31
+ result = {}
32
+ bucket.files.each do |file|
33
+ result[$~[1]] = true if file.key =~ /^#{project_name}\/tags\/(.+)/
34
+ end
35
+ result.keys
36
+ end
37
+ end
38
+
39
+ # This method should be called before iterating over a large set of slugs and
40
+ # calling #tags_for_slug on them. By doing this you are able to query all the
41
+ # data from AWS in advance using parallelized threads, rather than in serial.
42
+ def memoize_slugs_for_tags(project_name)
43
+ @slugs_for_tag[project_name] ||= {}
44
+ tag_map = tags(project_name).parallel_map do |tag|
45
+ next if @slugs_for_tag[project_name][tag]
46
+ file = nil
47
+ begin
48
+ file = bucket.files.get(tag_file_name(project_name, tag))
49
+ rescue Excon::Errors::Forbidden
50
+ # ignore 403's
51
+ end
52
+ slugs = file.nil? ? [] : file.body.split("\n")
53
+ [tag, slugs]
54
+ end
55
+ tag_map.each do |tag, slugs|
56
+ @slugs_for_tag[project_name][tag] = slugs
57
+ end
58
+ end
59
+
60
+ def slugs_for_tag(project_name, tag)
61
+ @slugs_for_tag[project_name] ||= {}
62
+ @slugs_for_tag[project_name][tag] ||= begin
63
+ return [] if bucket.files.nil?
64
+ file = nil
65
+ begin
66
+ file = bucket.files.get(tag_file_name(project_name, tag))
67
+ rescue Excon::Errors::Forbidden
68
+ # ignore 403's
69
+ end
70
+ file.nil? ? [] : file.body.split("\n")
71
+ end
72
+ end
73
+
74
+ def rollback_slug_for_tag(project_name, tag)
75
+ slugs = slugs_for_tag(project_name, tag)
76
+ slugs.shift
77
+ save_tag(project_name, tag, slugs) unless slugs.empty?
78
+ slugs.first
79
+ end
80
+
81
+ def slug_for_tag(project_name, tag)
82
+ slugs = slugs_for_tag(project_name, tag)
83
+ slugs.first
84
+ end
85
+
86
+ def tags_for_slug(project_name, slug_name)
87
+ tags = tags(project_name)
88
+
89
+ tags.select do |tag|
90
+ slug_for_tag(project_name, tag) == slug_name
91
+ end
92
+ end
93
+
94
+ def clone_tag(project_name, old_tag, new_tag)
95
+ slugs = slugs_for_tag(project_name, old_tag)
96
+ save_tag(project_name, new_tag, slugs)
97
+ end
98
+
99
+ def create_tag(project_name, tag, slug_name)
100
+ slugs = [slug_name]
101
+ slugs += slugs_for_tag(project_name, tag)
102
+ slugs = slugs.slice(0,10)
103
+ save_tag(project_name, tag, slugs)
104
+ end
105
+
106
+ def delete_tag(project_name, tag)
107
+ return nil if bucket.files.nil?
108
+ bucket.files.head(tag_file_name(project_name, tag)).destroy
109
+ @bucket_dirty = true
110
+ end
111
+
112
+ def save_tag(project_name, tag, slugs)
113
+ bucket.files.create(
114
+ :key => tag_file_name(project_name, tag),
115
+ :body => slugs.join("\n"),
116
+ :public => false
117
+ )
118
+ @bucket_dirty = true
119
+ end
120
+
121
+ def tag_file_name(project_name, tag)
122
+ [project_name, 'tags', tag].join('/')
123
+ end
124
+ end
125
+ end
@@ -0,0 +1,125 @@
1
+ # Copied from Pry under the terms of the MIT license.
2
+ # https://github.com/pry/pry/blob/0f207450a968e9e72b6e8cc8b2c21e7029569d3b/lib/pry/slugins.rb
3
+
4
+ module Slugforge
5
+ class SluginManager
6
+ PREFIX = /^slugforge-/
7
+
8
+ # Placeholder when no associated gem found, displays warning
9
+ class NoSlugin
10
+ def initialize(name)
11
+ @name = name
12
+ end
13
+
14
+ def method_missing(*args)
15
+ warn "Warning: The slugin '#{@name}' was not found! (no gem found)"
16
+ end
17
+ end
18
+
19
+ class Slugin
20
+ attr_accessor :name, :gem_name, :enabled, :spec, :active
21
+
22
+ def initialize(name, gem_name, spec, enabled)
23
+ @name, @gem_name, @enabled, @spec = name, gem_name, enabled, spec
24
+ end
25
+
26
+ # Disable a slugin. (prevents slugin from being loaded, cannot
27
+ # disable an already activated slugin)
28
+ def disable!
29
+ self.enabled = false
30
+ end
31
+
32
+ # Enable a slugin. (does not load it immediately but puts on
33
+ # 'white list' to be loaded)
34
+ def enable!
35
+ self.enabled = true
36
+ end
37
+
38
+ # Load the slugin (require the gem - enables/loads the
39
+ # slugin immediately at point of call, even if slugin is
40
+ # disabled)
41
+ # Does not reload slugin if it's already loaded.
42
+ def load!
43
+ begin
44
+ require gem_name
45
+ rescue LoadError => e
46
+ warn "Found slugin #{gem_name}, but could not require '#{gem_name}.rb'"
47
+ warn e
48
+ rescue => e
49
+ warn "require '#{gem_name}' failed, saying: #{e}"
50
+ end
51
+
52
+ self.enabled = true
53
+ end
54
+
55
+ # Activate the slugin (run its defined activation method)
56
+ # Does not reactivate if already active.
57
+ def activate!(config)
58
+ return if active?
59
+
60
+ if klass = slugin_class
61
+ klass.activate(config) if klass.respond_to?(:activate)
62
+ end
63
+
64
+ self.active = true
65
+ end
66
+
67
+ alias active? active
68
+ alias enabled? enabled
69
+
70
+ private
71
+
72
+ def slugin_class
73
+ name = spec.name.gsub(/^slugforge-/, '').camelize
74
+ name = "Slugforge#{name}"
75
+ begin
76
+ name.constantize
77
+ rescue NameError
78
+ warn "Slugin #{gem_name} cannot be activated. Expected module named #{name}."
79
+ end
80
+ end
81
+ end
82
+
83
+ def initialize
84
+ @slugins = []
85
+ locate_slugins
86
+ end
87
+
88
+ # @return [Hash] A hash with all slugin names (minus the prefix) as
89
+ # keys and slugin objects as values.
90
+ def slugins
91
+ h = Hash.new { |_, key| NoSlugin.new(key) }
92
+ @slugins.each do |slugin|
93
+ h[slugin.name] = slugin
94
+ end
95
+ h
96
+ end
97
+
98
+ # Require all enabled slugins, disabled slugins are skipped.
99
+ def load_slugins
100
+ @slugins.each(&:load!)
101
+ end
102
+
103
+ def activate_slugins(config)
104
+ @slugins.each { |s| s.activate!(config) if s.enabled? }
105
+ end
106
+
107
+ private
108
+
109
+ # Find all installed Pry slugins and store them in an internal array.
110
+ def locate_slugins
111
+ Gem.refresh
112
+ (Gem::Specification.respond_to?(:each) ? Gem::Specification : Gem.source_index.find_name('')).each do |gem|
113
+ next if gem.name !~ PREFIX
114
+ slugin_name = gem.name.split('-', 2).last
115
+ @slugins << Slugin.new(slugin_name, gem.name, gem, true) if !gem_located?(gem.name)
116
+ end
117
+ @slugins
118
+ end
119
+
120
+ def gem_located?(gem_name)
121
+ @slugins.any? { |slugin| slugin.gem_name == gem_name }
122
+ end
123
+ end
124
+
125
+ end
@@ -0,0 +1,9 @@
1
+ module Slugforge
2
+ module Version
3
+ MAJOR = 4
4
+ MINOR = 0
5
+ PATCH = 0
6
+ end
7
+
8
+ VERSION = [Version::MAJOR, Version::MINOR, Version::PATCH].join('.')
9
+ end
@@ -0,0 +1,143 @@
1
+ #!/bin/bash
2
+
3
+ # Environment variables can be used to modify the behavior of this script. They
4
+ # must be present in the environment of the slug when it is installed
5
+ #
6
+ # KILL_TIMEOUT - change the upstart kill timeout which is how long upstart will
7
+ # wait upon stopping a service for child processes to die before
8
+ # sending kill -9
9
+ # CONCURRENCY - Concurrency used for foreman export. Same format as concurrency
10
+ # argument passed for foreman
11
+ # RUNTIME_RUBY_VERSION - The ruby version to put in the upstart templates to run the service.
12
+ # If specified 'rvm use $RUNTIME_RUBY_VERSION do' will prefix the process
13
+ # command in the Procfile
14
+ # LOGROTATE_POSTROTATE - The shell code to run in the logrotate 'postrotate' stanza
15
+ # in the logrotate config file created for this slug's upstart
16
+ # service. Default is 'restart <servicename>'
17
+
18
+ set -e
19
+
20
+ SHARED_DIR="${INSTALL_ROOT}/shared"
21
+ # make shared folders, if needed
22
+ mkdir -p ${SHARED_DIR}/config
23
+
24
+ linked_dirs=(log pids tmp)
25
+
26
+ for d in ${linked_dirs[@]} ; do
27
+ mkdir -p "${SHARED_DIR}/${d}"
28
+ # create the symlinks for shared folders, if needed
29
+ if [ ! -h "$INSTALL_DIR/${d}" ] ; then
30
+ rm -rf "$INSTALL_DIR/${d}" # delete local copy, use shared
31
+ ln -s -f "${SHARED_DIR}/${d}" "${INSTALL_DIR}/${d}"
32
+ fi
33
+ done
34
+ chmod -R 775 ${SHARED_DIR}
35
+
36
+ # set owner for project tree
37
+ chown -R $OWNER ${INSTALL_ROOT}
38
+
39
+ # make sure all deploy scripts are executable
40
+ if [ -d "${INSTALL_DIR}/deploy" ] ; then
41
+ chmod +x ${INSTALL_DIR}/deploy/*
42
+ fi
43
+
44
+ # if environment file exists, link it into current directory so DotEnv and foreman run work
45
+ if [ -r "${SHARED_DIR}/env" ] ; then
46
+ ln -s -f "${SHARED_DIR}/env" "${INSTALL_DIR}/.env"
47
+ fi
48
+
49
+ # run post_install script, if present
50
+ if [ -r "${INSTALL_DIR}/deploy/post_install" ] ; then
51
+ echo "Running post_install script..."
52
+ # change into INSTALL_DIR so that folks can use pwd to get package install location
53
+ su - $OWNER -c "cd ${INSTALL_DIR}; deploy/post_install"
54
+ fi
55
+
56
+ if which service && which start && which stop > /dev/null 2>&1 ; then
57
+ UPSTART_PRESENT=true
58
+ else
59
+ UPSTART_PRESENT=false
60
+ fi
61
+
62
+ if $UPSTART_PRESENT ; then
63
+ if [ -n "$CONCURRENCY" ] ; then
64
+ CONCURRENCY="-c $CONCURRENCY"
65
+
66
+ # split up the concurrency string into its parts and check each app to see if its
67
+ # unicorn or rainbows. We can only have one at a time because if they share a pid
68
+ # directory unicorn-upstart can't tell which process to watch.
69
+ # e.g.
70
+ # web=1,other=1 gets split on the command then app gets split on the = to be web and other
71
+ FOUND_UNICORN=false
72
+ for app in $(echo ${CONCURRENCY/,/ }) ; do
73
+ app=${app%=*}
74
+ if egrep -q "^${app}:.*(unicorn|rainbows)" "${INSTALL_DIR}/Procfile" ; then
75
+ if $FOUND_UNICORN ; then
76
+ echo "The concurrency you have set of '$CONCURRENCY' will result in two unicorn or rainbows servers running at the same time. Slug deploys do not support that."
77
+ echo "Update your concurrency to only run one. You can deploy also this slug again in another directory with a different concurrency to run both simultaneously."
78
+ exit 1
79
+ fi
80
+ FOUND_UNICORN=true
81
+ fi
82
+ done
83
+ fi
84
+
85
+ PROJECT_NAME=$(basename $INSTALL_ROOT)
86
+
87
+ # upstart has problems with services with dashes in them
88
+ PROJECT_NAME=${PROJECT_NAME/-/_}
89
+
90
+ if [ -n "$RUNTIME_RUBY_VERSION" ] ; then
91
+ # used inside the foreman template
92
+ export RUBY_CMD="rvm use $RUNTIME_RUBY_VERSION do"
93
+ elif [ -r "${INSTALL_DIR}/.ruby-version" ] ; then
94
+ export RUBY_CMD="rvm use $(head -n 1 ${INSTALL_DIR}/.ruby-version) do"
95
+ fi
96
+
97
+ # check for a Procfile that is not zero size
98
+ if [ -s "$INSTALL_DIR/Procfile" ] ; then
99
+ # run foreman export to export the upstart scripts
100
+ EXPORT_COMMAND="foreman export upstart /etc/init -a $PROJECT_NAME -f $INSTALL_DIR/Procfile -l $INSTALL_DIR/log $CONCURRENCY -t $INSTALL_DIR/deploy/upstart-templates -d $INSTALL_ROOT -u $OWNER"
101
+ echo "Running foreman export command '$EXPORT_COMMAND'"
102
+ $EXPORT_COMMAND
103
+
104
+ # start or restart the service
105
+ if status ${PROJECT_NAME} | grep -q running ; then
106
+ # restart the service
107
+ echo "Post install complete. Restarting ${PROJECT_NAME} service... "
108
+ restart ${PROJECT_NAME}
109
+ else
110
+ # start the new service
111
+ echo "Post install complete. Starting ${PROJECT_NAME} service... "
112
+ start ${PROJECT_NAME}
113
+ fi
114
+ else
115
+ echo "Procfile is missing or zero size. *NOT* running foreman export command."
116
+ fi
117
+ else
118
+ echo "This machine does not appear to have upstart installed so we're skipping"
119
+ echo "exporting the upstart service config files."
120
+ fi
121
+
122
+ if [ -d "/etc/logrotate.d" ] ; then
123
+ LOGROTATE_FILE="/etc/logrotate.d/${PROJECT_NAME}"
124
+ LOG_DIR="${INSTALL_DIR}/log"
125
+ echo "Installing logrotate config ${LOGROTATE_FILE}"
126
+ : ${LOGROTATE_POSTROTATE:="restart ${PROJECT_NAME}"}
127
+ cat <<EOF > "${LOGROTATE_FILE}"
128
+ ${LOG_DIR}/*log ${LOG_DIR}/*/*.log {
129
+ size=10G
130
+ rotate 2
131
+ missingok
132
+ notifempty
133
+ sharedscripts
134
+ postrotate
135
+ ${LOGROTATE_POSTROTATE}
136
+ endscript
137
+ }
138
+
139
+ EOF
140
+ else
141
+ echo "This machine does not appear to have logrotate installed so we're skipping"
142
+ echo "log rotation config."
143
+ fi
@@ -0,0 +1,305 @@
1
+ #!/bin/bash
2
+
3
+ # This script is a bridge between upstart and unicorn/rainbows, hereafter referred to as unicorn.
4
+ #
5
+ # The reason this is necessary is that upstart wants to start and watch a pid for its entire
6
+ # lifecycle. However, unicorn's cool no-downtime restart feature creates a new unicorn master
7
+ # which will create new workers and then kill the original master. This makes upstart think that
8
+ # unicorn died and it gets wonky from there.
9
+ #
10
+ # So this script is started by upstart. It can detect if a unicorn master is already running
11
+ # and will wait for it to exit. Then upstart will restart this script which will see if
12
+ # a unicorn master is running again. On no-downtime restarts it will find the new unicorn master
13
+ # and wait on it to exit, and so on. So unicorn is managing its own lifecycle and this script
14
+ # gives upstart a single pid to start and watch.
15
+ #
16
+ # This script also handles the signals sent by upstart to stop and restart and sends them to the
17
+ # running unicorn master to initiate a no-downtime restart when the upstart 'restart' command
18
+ # is given to this service.
19
+ #
20
+ # We do some crazy magic in is_restarting to determine if we are restarting or stopping.
21
+
22
+
23
+ #############################################################
24
+ ##
25
+ ## Set up environment
26
+ ##
27
+ #############################################################
28
+
29
+ COMMAND=$1
30
+ SERVICE=$2
31
+
32
+ # logs to syslog with service name and the pid of this script
33
+ log() {
34
+ # we have to send this to syslog ourselves instead of relying on whoever launched
35
+ # us because the exit signal handler log output never shows up in the output stream
36
+ # unless we do this explicitly
37
+ echo "$@" | logger -t "${SERVICE}[$$]"
38
+ }
39
+
40
+ # assume upstart config cd's us into project root dir.
41
+ BASE_DIR=$PWD
42
+ LOG_DIR="${BASE_DIR}/log/unicorn"
43
+ TRY_RESTART=true
44
+
45
+ #############################################################
46
+ ##
47
+ ## Support functions
48
+ ##
49
+ #############################################################
50
+
51
+ # Bail out if all is not well
52
+ check_environment(){
53
+ if [ "x" = "x${COMMAND}" ] ; then
54
+ log "Missing required argument: Command to launch unicorn or rainbows. [unicorn|rainbows]"
55
+ exit 1
56
+ fi
57
+
58
+ if [ "x" = "x${SERVICE}" ] ; then
59
+ log "Missing required second argument: Upstart service name that launched this script"
60
+ exit 1
61
+ fi
62
+
63
+ if [ -r $BASE_DIR/config/unicorn.rb ] ; then
64
+ CONFIG_FILE=$BASE_DIR/config/unicorn.rb
65
+ elif [ -r $BASE_DIR/config/rainbows.rb ] ; then
66
+ CONFIG_FILE=$BASE_DIR/config/rainbows.rb
67
+ else
68
+ log "No unicorn or rainbows config file found in '$BASE_DIR/config'. Exiting"
69
+ exit 1
70
+ fi
71
+
72
+ # default to RAILS_ENV if RACK_ENV isn't set
73
+ export RACK_ENV="${RACK_ENV:-$RAILS_ENV}"
74
+
75
+ if [ ! -n "$RACK_ENV" ] ; then
76
+ log "Neither RACK_ENV nor RAILS_ENV environment variable are set. Exiting."
77
+ exit 1
78
+ fi
79
+
80
+ }
81
+
82
+ # Return the pid of the new master unicorn. If there are two master unicorns running, not
83
+ # a new one and one marked old which is exiting, but two that think they are the master
84
+ # then exit with an error. How could we handle this better? When would it happen?
85
+ # Delete any pid files found which have no corresponding running processes.
86
+ master_pid() {
87
+ local pid=''
88
+ local extra_pids=''
89
+ local multi_master=false
90
+
91
+ for PID_FILE in $(find $BASE_DIR/pids/ -name "*.pid") ; do
92
+ local p=`cat ${PID_FILE}`
93
+
94
+ if is_pid_running $p ; then
95
+ if [ -n "$pid" ] ; then
96
+ multi_master=true
97
+ extra_pids="$extra_pids $p"
98
+ else
99
+ pid="$p"
100
+ fi
101
+ else
102
+ log "Deleting ${COMMAND} pid file with no running process '$PID_FILE'"
103
+ rm $PID_FILE 2> /dev/null || log "Failed to delete pid file '$PID_FILE': $!"
104
+ fi
105
+ done
106
+ if $multi_master ; then
107
+ log "Found more than one not old ${COMMAND} master process running. Pids are '$pid $extra_pids'."
108
+ log "Killing them all and restarting."
109
+ kill -9 $pid $extra_pids
110
+ exit 1
111
+ fi
112
+
113
+ echo $pid
114
+ # return status so we can use this function to see if the master is running
115
+ [ -n "$pid" ]
116
+ }
117
+
118
+ is_pid_running() {
119
+ local pid=$1
120
+ if [ ! -n "$pid" ] || ! [ -d "/proc/$pid" ] ; then
121
+ return 1
122
+ fi
123
+ return 0
124
+ }
125
+
126
+
127
+ # output parent process id of argument
128
+ ppid() {
129
+ ps -p $1 -o ppid=
130
+ }
131
+
132
+ free_mem() {
133
+ free -m | grep "buffers/cache:" | awk '{print $4};'
134
+ }
135
+
136
+ # kills off workers whose master have died. This is indicated by a worker whose
137
+ # parent process is the init process.
138
+ kill_orphaned_workers() {
139
+ local workers=`ps aux | egrep "${COMMAND}.*worker" | grep -v grep | awk '{print $2}'`
140
+ for worker in $workers ; do
141
+ # if the worker's parent process is init, its master is dead.
142
+ if [ "1" = `ppid $worker` ] ; then
143
+ log "Found ${COMMAND} worker process with no master. Killing $worker"
144
+ kill -QUIT $worker
145
+ fi
146
+ done
147
+ }
148
+
149
+ # This is the on exit handler. It checks if we are restarting or not and either sends the USR2
150
+ # signal to unicorn or, if the service is being stopped, kill the unicorn master.
151
+ respawn_new_master() {
152
+ # TRY_RESTART is set to false on exit where we didn't recieve TERM.
153
+ # When we used "trap command TERM" it did not always trap propertly
154
+ # but "trap command EXIT" runs command every time no matter why the script
155
+ # ends. So we set this env var to false if we don't need to respawn which is if unicorn
156
+ # dies by itself or is restarted externally, usually through the deploy script
157
+ # or we never succesfully started it.
158
+ # If we receive a TERM, like from upstart on stop/restart, this won't be set
159
+ # and we'll send USR2 to restart unicorn.
160
+ if $TRY_RESTART ; then
161
+ if is_service_in_state "restart" ; then
162
+ local pid=`master_pid`
163
+ if [ -n "$pid" ] ; then
164
+ # free memory before restart. Restart is unreliable with not enough memory.
165
+ # New master crashes during startup etc.
166
+ let min_mem=1500
167
+ let workers_to_kill=8
168
+ let count=0
169
+
170
+ while [ `free_mem` -lt $min_mem ] && [ $count -lt $workers_to_kill ] ; do
171
+ log "Sending master ${pid} TTOU to drop workers to free up memory for restart"
172
+ kill -TTOU ${pid}
173
+ sleep 2
174
+ count=$((count + 1))
175
+ done
176
+
177
+ if [ `free_mem` -lt $min_mem ] ; then
178
+ log "Still not enough memory to restart. Killing the master and allowing upstart to restart."
179
+ kill -9 ${pid}
180
+ else
181
+ # gracefully restart all current workers to free up RAM,
182
+ # then respawn master
183
+ kill -USR2 ${pid}
184
+ log "Respawn signals HUP + USR2 sent to ${COMMAND} master ${pid}"
185
+ fi
186
+ else
187
+ log "No ${COMMAND} master found. Exiting. A new one will launch when we are restarted."
188
+ fi
189
+ elif is_service_in_state "stop" ; then
190
+ local pid=`master_pid`
191
+ if [ -n "$pid" ] ; then
192
+ tries=1
193
+ while is_pid_running ${pid} && [ $tries -le 5 ] ; do
194
+ log "Service is STOPPING. Trying to kill '${COMMAND}' at pid '${pid}'. Try ${tries}"
195
+ kill ${pid}
196
+ tries=$(( $tries + 1 ))
197
+ sleep 1
198
+ done
199
+
200
+ if is_pid_running ${pid} ; then
201
+ log "Done waiting for '${COMMAND}' process '${pid}' to die. Killing for realz"
202
+ kill -9 ${pid}
203
+ else
204
+ log "${COMMAND} process '${pid}' is dead."
205
+ fi
206
+ fi
207
+ else
208
+ log "Service is neither stopping nor restarting. Exiting."
209
+ fi
210
+ else
211
+ log "Not checking for restart"
212
+ fi
213
+ }
214
+
215
+ # Upstart does not have the concept of "restart". When you restart a service it is simply
216
+ # stopped and started. But this defeats the purpose of unicorn's USR2 no downtime trick.
217
+ # So we check the service states of the foreman exported services. If any of them are
218
+ # start/stopping or start/post-stop it means that they are stopping but that the service
219
+ # itself is still schedule to run. This means restart. We can use this to differentiate between
220
+ # restarting and stopping so we can signal unicorn to restart or actually kill it appropriately.
221
+ is_service_in_state() {
222
+ local STATE=$1
223
+ if [ "$STATE" = "restart" ] ; then
224
+ PATTERN="(start/stopping|start/post-stop)"
225
+ elif [ "$STATE" = "stop" ] ; then
226
+ PATTERN="/stop"
227
+ else
228
+ log "is_service_in_state: State must be one of 'stop' or 'restart'. Got '${STATE}'"
229
+ exit 1
230
+ fi
231
+ # the service that started us and the foreman parent services, pruning off everything
232
+ # after each successive dash to find parent service
233
+ # e.g. myservice-web-1 myservice-web myservice
234
+ services=( ${SERVICE} ${SERVICE%-*} ${SERVICE%%-*} )
235
+
236
+ IN_STATE=false
237
+
238
+ for service in "${services[@]}" ; do
239
+ if /sbin/status ${service} | egrep -q "${PATTERN}" ; then
240
+ log "Service ${service} is in state '${STATE}'. - '$(/sbin/status ${service})'"
241
+ IN_STATE=true
242
+ fi
243
+ done
244
+
245
+ $IN_STATE # this is the return code for this function
246
+ }
247
+
248
+ #############################################################
249
+ ##
250
+ ## Trap incoming signals
251
+ ##
252
+ #############################################################
253
+
254
+ # trap TERM which is what upstart uses to both stop and restart (stop/start)
255
+ trap "respawn_new_master" EXIT
256
+
257
+ #############################################################
258
+ ##
259
+ ## Main execution
260
+ ##
261
+ #############################################################
262
+
263
+ check_environment
264
+
265
+ kill_orphaned_workers
266
+
267
+ if ! master_pid ; then
268
+
269
+ # make sure it uses the 'currrent' symlink and not the actual path
270
+ export BUNDLE_GEMFILE=${BASE_DIR}/Gemfile
271
+
272
+ log "No ${COMMAND} master found. Launching new ${COMMAND} master in env '$RACK_ENV' in directory '$BASE_DIR', BUNDLE_GEMFILE=$BUNDLE_GEMFILE"
273
+
274
+ mkdir -p "${LOG_DIR}"
275
+
276
+ # setsid to start this process in a new session because when upstart stops or restarts
277
+ # a service it kills the entire process group of the service and relaunches it. Because
278
+ # we are managing the unicorn separately from upstart it needs to be in its own
279
+ # session (group of process groups) so that it survives the original process group
280
+ setsid bundle exec ${COMMAND} -E ${RACK_ENV} -c ${CONFIG_FILE} >> ${LOG_DIR}/unicorn.log 2>&1 &
281
+
282
+ tries=1
283
+ while [ $tries -le 10 ] && ! master_pid ; do
284
+ log "Waiting for unicorn to launch master"
285
+ tries=$(( $tries + 1 ))
286
+ sleep 1
287
+ done
288
+ fi
289
+
290
+ PID=`master_pid`
291
+
292
+ if is_pid_running $PID ; then
293
+ # hang out while the unicorn process is alive. Once its gone we will exit
294
+ # this script. When upstart respawns us we will end up in the if statement above
295
+ # to relaunch a new unicorn master.
296
+ log "Found running ${COMMAND} master $PID. Awaiting its demise..."
297
+ while is_pid_running $PID ; do
298
+ sleep 5
299
+ done
300
+ log "${COMMAND} master $PID has exited."
301
+ else
302
+ log "Failed to start ${COMMAND} master. Will try again on respawn. Exiting"
303
+ fi
304
+
305
+ TRY_RESTART=false