resque-cluster 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ module Resque
2
+ class Cluster
3
+ VERSION = '0.0.1'
4
+ end
5
+ end
@@ -0,0 +1,65 @@
1
+ require 'resque/cluster/member'
2
+ require 'yaml'
3
+
4
+ module Resque
5
+ class Pool
6
+ # Command Line Interface monkey patch for resque-pool
7
+ module CLI
8
+ module_function
9
+
10
+ original_setup_environment = instance_method(:setup_environment)
11
+
12
+ define_method(:setup_environment) do |opts|
13
+ original_setup_environment.bind(self).call(opts)
14
+ if opts[:cluster]
15
+ puts "Starting as a cluster: #{opts[:cluster]} in #{opts[:environment]} environment"
16
+ Resque::Cluster.config = {
17
+ cluster_name: opts[:cluster],
18
+ environment: opts[:environment],
19
+ local_config_path: opts[:config],
20
+ global_config_path: opts[:global_config]
21
+ }
22
+ end
23
+ end
24
+
25
+ # rubocop:disable all
26
+ def parse_options
27
+ opts = Trollop::options do
28
+ version "resque-pool #{VERSION} (c) nicholas a. evans"
29
+ banner <<-EOS
30
+ resque-pool is the best way to manage a group (pool) of resque workers
31
+
32
+ When daemonized, stdout and stderr default to resque-pool.stdxxx.log files in
33
+ the log directory and pidfile defaults to resque-pool.pid in the current dir.
34
+
35
+ Usage:
36
+ resque-pool [options]
37
+ where [options] are:
38
+ EOS
39
+ opt :config, 'Alternate path to config file', type: String, short: '-c'
40
+ opt :appname, 'Alternate appname', type: String, short: '-a'
41
+ opt :daemon, 'Run as a background daemon', default: false, short: '-d'
42
+ opt :stdout, 'Redirect stdout to logfile', type: String, short: '-o'
43
+ opt :stderr, 'Redirect stderr to logfile', type: String, short: '-e'
44
+ opt :nosync, 'Don\'t sync logfiles on every write'
45
+ opt :pidfile, 'PID file location', type: String, short: '-p'
46
+ opt :environment, 'Set RAILS_ENV/RACK_ENV/RESQUE_ENV', type: String, short: '-E'
47
+ opt :spawn_delay, 'Delay in milliseconds between spawning missing workers', type: Integer, short: '-s'
48
+ opt :term_graceful_wait, 'On TERM signal, wait for workers to shut down gracefully'
49
+ opt :term_graceful, 'On TERM signal, shut down workers gracefully'
50
+ opt :term_immediate, 'On TERM signal, shut down workers immediately (default)'
51
+ opt :single_process_group, 'Workers remain in the same process group as the master', default: false
52
+ opt :cluster, 'Name of the cluster this resque-pool belongs to', type: String, short: '-C'
53
+ opt :global_config, 'Alternate path to the global config file', type: String, short: '-G'
54
+ end
55
+ if opts[:daemon]
56
+ opts[:stdout] ||= 'log/resque-pool.stdout.log'
57
+ opts[:stderr] ||= 'log/resque-pool.stderr.log'
58
+ opts[:pidfile] ||= 'tmp/pids/resque-pool.pid'
59
+ end
60
+ opts
61
+ end
62
+ # rubocop:enable all
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,40 @@
1
+ require 'resque/cluster'
2
+
3
+ module Resque
4
+ # Resque Pool monkey patched methods for resque-pool
5
+ class Pool
6
+ # add the running pool to distributed pool in order to manipulate it
7
+ def self.run
8
+ if GC.respond_to?(:copy_on_write_friendly=)
9
+ GC.copy_on_write_friendly = true
10
+ end
11
+ pool_config = Resque::Cluster.config ? {} : choose_config_file
12
+ started_pool = Resque::Pool.new(pool_config).start
13
+ Resque::Cluster.init(started_pool) if Resque::Cluster.config
14
+ started_pool.join
15
+ Resque::Cluster.member.unregister if Resque::Cluster.member
16
+ end
17
+
18
+ # performed inside the run loop, must check for any distributed pool updates
19
+ original_maintain_worker_count = instance_method(:maintain_worker_count)
20
+ define_method(:maintain_worker_count) do
21
+ cluster_update
22
+ original_maintain_worker_count.bind(self).call
23
+ end
24
+
25
+ def cluster_update
26
+ Resque::Cluster.member.perform if Resque::Cluster.member
27
+ end
28
+
29
+ def adjust_worker_counts(worker, number)
30
+ over_adjustment = ''
31
+ if @config[worker].to_i + number < 0
32
+ over_adjustment = "#{worker}:#{@config[worker].to_i + number}"
33
+ @config[worker] = 0
34
+ else
35
+ @config[worker] = @config[worker].to_i + number
36
+ end
37
+ over_adjustment
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,33 @@
1
+ lib = File.expand_path('../lib', __FILE__)
2
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
3
+ require 'resque/cluster/version'
4
+
5
+ Gem::Specification.new do |s|
6
+ s.name = 'resque-cluster'
7
+ s.version = Resque::Cluster::VERSION
8
+ s.date = '2015-07-23'
9
+ s.summary = %q{Creates and manages resque worker in a distributed cluster}
10
+ s.description = %q{A management tool for resque workers. Allows spinning up and managing resque workers across multiple machines sharing the same Redis server}
11
+ s.authors = ["Yasha Portnoy"]
12
+ s.email = 'yash.portnoy@gmail.com'
13
+ s.homepage = 'https://github.com/yportnoy/resque-cluster'
14
+ s.license = 'MIT'
15
+
16
+ s.files = `git ls-files -z`.split("\x0")
17
+ s.executables = s.files.grep(%r{^bin/}) { |f| File.basename(f) }
18
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
19
+ s.require_paths = ["lib"]
20
+
21
+ s.add_dependency 'resque-pool', '~> 0.5.0'
22
+ s.add_dependency 'gru', '0.0.3'
23
+
24
+ s.add_development_dependency 'pry', '> 0.0'
25
+ s.add_development_dependency 'awesome_print', '> 0.0'
26
+ s.add_development_dependency 'rspec', '~> 3.1.0'
27
+ s.add_development_dependency 'rdoc', '~> 3.12'
28
+ s.add_development_dependency 'bundler', '~> 1.7'
29
+ s.add_development_dependency 'jeweler', '~> 2.0.1'
30
+ s.add_development_dependency 'simplecov', '>= 0'
31
+ s.add_development_dependency 'rubocop', '~> 0.31'
32
+ s.add_development_dependency 'mock_redis', '~> 0.15.0'
33
+ end
@@ -0,0 +1,3 @@
1
+ foo: 2
2
+ bar: 50
3
+ "foo,bar,baz": 1
@@ -0,0 +1,5 @@
1
+ global_maximums:
2
+ foo: 2
3
+ bar: 50
4
+ "foo,bar,baz": 1
5
+ rebalance_cluster: true
@@ -0,0 +1,37 @@
1
+ #!/usr/bin/env ruby
2
+ # -*- encoding: utf-8 -*-
3
+
4
+ gem 'resque-pool'
5
+
6
+ require 'resque/pool/cli'
7
+
8
+ $LOAD_PATH.unshift File.expand_path('../../../../lib', __FILE__)
9
+
10
+ require 'resque/pool/patches'
11
+ require 'resque/pool/cli_patches'
12
+ require 'resque/cluster'
13
+
14
+ gem 'gru'
15
+
16
+ module Gru
17
+ module Adapters
18
+ class RedisAdapter
19
+ def hostname
20
+ @hostname ||= ENV['GRU_HOSTNAME']
21
+ end
22
+ end
23
+ end
24
+ end
25
+
26
+ module Resque
27
+ class Cluster
28
+ class Member
29
+ private
30
+ def hostname
31
+ @hostname ||= ENV['GRU_HOSTNAME']
32
+ end
33
+ end
34
+ end
35
+ end
36
+
37
+ Resque::Pool::CLI.run
@@ -0,0 +1,181 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ LOCAL_CONFIG = "spec/integration/config/local_config.yml"
4
+ GLOBAL_CONFIG = "spec/integration/config/global_config.yml"
5
+ LOCAL_CONFIG2 = "spec/integration/config/local_config2.yml"
6
+ GLOBAL_CONFIG2 = "spec/integration/config/global_config2.yml"
7
+ GLOBAL_REBALANCE_CONFIG2 = "spec/integration/config/global_rebalance_config2.yml"
8
+
9
+ RSpec.describe "Resque test-cluster" do
10
+ context "Spin Up and Down" do
11
+ before :all do
12
+ @a = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG)
13
+ @b = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG)
14
+ @c = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG)
15
+ end
16
+
17
+ it 'expects no workers to be running' do
18
+ expect(TestMemberManager.counts).to be_empty
19
+ expect(@a.counts).to be_empty
20
+ expect(@b.counts).to be_empty
21
+ expect(@c.counts).to be_empty
22
+ end
23
+
24
+ it 'expects counts to be correct after workers get spun up' do
25
+ @a.start
26
+ @b.start
27
+ @c.start
28
+ expect(TestMemberManager.counts).to eq({"par"=>2, "tar"=>8, "par,tar,var"=>1})
29
+ end
30
+
31
+ it 'cluster adjusts correctly when a member stops' do
32
+ @a.stop
33
+ expect(TestMemberManager.counts).to eq({"tar"=>6, "par"=>2, "par,tar,var"=>1})
34
+ expect(@a.counts).to be_empty
35
+ @b.stop
36
+ expect(TestMemberManager.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
37
+ expect(@b.counts).to be_empty
38
+ @c.stop
39
+ end
40
+
41
+ after :all do
42
+ TestMemberManager.stop_all
43
+ end
44
+
45
+ end
46
+
47
+ context "Cluster with Rebalancing" do
48
+ before :all do
49
+ @d = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_REBALANCE_CONFIG2)
50
+ @e = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_REBALANCE_CONFIG2)
51
+ @f = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_REBALANCE_CONFIG2)
52
+ @d.start
53
+ @e.start
54
+ @f.start
55
+ sleep(5) # rebalance time
56
+ end
57
+
58
+ it 'expects counts to be correct after workers get spun up' do
59
+ expect(TestMemberManager.counts).to eq({"star"=>12})
60
+ expect(@d.counts).to eq({"star"=>4})
61
+ expect(@e.counts).to eq({"star"=>4})
62
+ expect(@f.counts).to eq({"star"=>4})
63
+ end
64
+
65
+ it 'adjusts correctly when a member stops' do
66
+ @d.stop
67
+ expect(TestMemberManager.counts).to eq({"star"=>12})
68
+ expect(@d.counts).to be_empty
69
+ expect(@e.counts).to eq({"star"=>6})
70
+ expect(@f.counts).to eq({"star"=>6})
71
+ @e.stop
72
+ @f.stop
73
+ end
74
+
75
+ after :all do
76
+ TestMemberManager.stop_all
77
+ end
78
+
79
+ end
80
+
81
+ context "Multiple Clusters and Environments" do
82
+ before :all do
83
+ @a = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG)
84
+ @b = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG, "test1-cluster")
85
+ @c = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG, "test-cluster", "test1")
86
+ @a.start
87
+ @b.start
88
+ @c.start
89
+ sleep(5) # rebalance time
90
+ end
91
+
92
+ it 'expects counts to be independent of each other' do
93
+ expect(TestMemberManager.counts).to eq({"tar"=>9, "par"=>3, "par,tar,var"=>3})
94
+ expect(@a.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
95
+ expect(@b.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
96
+ expect(@c.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
97
+ end
98
+
99
+ after :all do
100
+ TestMemberManager.stop_all
101
+ end
102
+ end
103
+
104
+ context "Multiple Configs in the same cluster" do
105
+ before :all do
106
+ @a = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG)
107
+ @b = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_CONFIG)
108
+ @c = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_REBALANCE_CONFIG2)
109
+ @a.start
110
+ @b.start
111
+ sleep(3) # rebalance time
112
+ end
113
+
114
+ it 'expects to have each cluster member only running workers in it\'s config' do
115
+ expect(TestMemberManager.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
116
+ expect(@a.counts).to eq({"tar"=>3, "par"=>1, "par,tar,var"=>1})
117
+ expect(@b.counts).to be_empty
118
+ end
119
+
120
+ it 'expects the cluster to redistribute correctly after global config change' do
121
+ @c.start
122
+ sleep(8) # rebalance time
123
+ expect(TestMemberManager.counts).to eq({"star"=>4})
124
+ expect(@a.counts).to be_empty
125
+ expect(@b.counts).to eq({"star"=>4})
126
+ expect(@c.counts).to be_empty
127
+ end
128
+
129
+ after :all do
130
+ TestMemberManager.stop_all
131
+ end
132
+ end
133
+
134
+ context "Rebalance and non rebalance global configs switching in a cluster" do
135
+ before :all do
136
+ @a = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_REBALANCE_CONFIG2)
137
+ @b = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_CONFIG2)
138
+ @c = TestMemberManager.new(LOCAL_CONFIG2, GLOBAL_REBALANCE_CONFIG2)
139
+ @a.start
140
+ @b.start
141
+ @c.start
142
+ sleep(5) # rebalance time
143
+ end
144
+
145
+ it 'expects to have a correct number of workers in the cluster after multiple restarts' do
146
+ expect(TestMemberManager.counts).to eq({"star"=>12})
147
+ 2.times do
148
+ sleep(5)
149
+ @a.stop
150
+ @a.start
151
+ sleep(5)
152
+ @b.stop
153
+ @b.start
154
+ sleep(5)
155
+ @c.stop
156
+ @c.start
157
+ end
158
+ sleep(8) # rebalance time
159
+ expect(TestMemberManager.counts).to eq({"star"=>12})
160
+ expect(@a.counts).to eq({"star"=>4})
161
+ expect(@b.counts).to eq({"star"=>4})
162
+ expect(@c.counts).to eq({"star"=>4})
163
+ end
164
+
165
+ it 'will not rebalance after the cluster is switched to rebalance-cluster false' do
166
+ @b.stop
167
+ sleep(2)
168
+ @b.start
169
+ sleep(8)
170
+ expect(TestMemberManager.counts).to eq({"star"=>12})
171
+ expect(@a.counts).to eq({"star"=>6})
172
+ expect(@b.counts).to eq({})
173
+ expect(@c.counts).to eq({"star"=>6})
174
+ end
175
+
176
+ after :all do
177
+ TestMemberManager.stop_all
178
+ end
179
+ end
180
+
181
+ end
@@ -0,0 +1,3 @@
1
+ par: 2
2
+ tar: 8
3
+ "par,tar,var": 1
@@ -0,0 +1,3 @@
1
+ global_maximums:
2
+ star: 12
3
+ rebalance_cluster: false
@@ -0,0 +1,3 @@
1
+ global_maximums:
2
+ star: 12
3
+ rebalance_cluster: true
@@ -0,0 +1,3 @@
1
+ par: 1
2
+ tar: 3
3
+ "par,tar,var": 1
@@ -0,0 +1 @@
1
+ star: 6
@@ -0,0 +1,3 @@
1
+ require 'pry'
2
+ require 'rspec'
3
+ require File.expand_path(File.dirname(__FILE__) + '/test_member_manager')
@@ -0,0 +1,38 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ LOCAL_CONFIG = "spec/integration/config/local_config.yml"
4
+ GLOBAL_CONFIG = "spec/integration/config/global_config.yml"
5
+
6
+ RSpec.describe "resque-cluster" do
7
+ context "running 3 resque-cluster members in a standalone mode" do
8
+ before :all do
9
+ @a = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG, nil)
10
+ @b = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG, nil)
11
+ @c = TestMemberManager.new(LOCAL_CONFIG, GLOBAL_CONFIG, nil)
12
+ end
13
+
14
+ it 'expects no workers to be running' do
15
+ expect(@a.counts).to be_empty
16
+ expect(@b.counts).to be_empty
17
+ expect(@c.counts).to be_empty
18
+ end
19
+
20
+ it 'expects total counts to be correct after workers get spun up' do
21
+ @a.start
22
+ @b.start
23
+ @c.start
24
+ sleep(5)
25
+ expect(TestMemberManager.counts).to eq({"par"=>3, "tar"=>9, "par,tar,var"=>3})
26
+ end
27
+
28
+ it 'expects each resque-pool to have the same counts' do
29
+ expect(@a.counts).to eq({"par"=>1, "tar"=>3, "par,tar,var"=>1})
30
+ expect(@b.counts).to eq({"par"=>1, "tar"=>3, "par,tar,var"=>1})
31
+ expect(@c.counts).to eq({"par"=>1, "tar"=>3, "par,tar,var"=>1})
32
+ end
33
+
34
+ after :all do
35
+ TestMemberManager.stop_all
36
+ end
37
+ end
38
+ end