autoscale 0.9.3 → 0.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -2
- data/README.md +19 -13
- data/examples/complex.rb +3 -3
- data/examples/simple.rb +2 -2
- data/lib/autoscaler/binary_scaling_strategy.rb +1 -1
- data/lib/autoscaler/heroku_platform_scaler.rb +84 -0
- data/lib/autoscaler/ignore_scheduled_and_retrying.rb +5 -0
- data/lib/autoscaler/linear_scaling_strategy.rb +1 -1
- data/lib/autoscaler/sidekiq.rb +2 -2
- data/lib/autoscaler/sidekiq/client.rb +1 -1
- data/lib/autoscaler/sidekiq/entire_queue_system.rb +10 -0
- data/lib/autoscaler/sidekiq/sleep_wait_server.rb +2 -2
- data/lib/autoscaler/sidekiq/specified_queue_system.rb +10 -0
- data/lib/autoscaler/sidekiq/thread_server.rb +90 -0
- data/lib/autoscaler/version.rb +1 -1
- data/spec/autoscaler/binary_scaling_strategy_spec.rb +2 -2
- data/spec/autoscaler/counter_cache_memory_spec.rb +3 -3
- data/spec/autoscaler/counter_cache_redis_spec.rb +6 -6
- data/spec/autoscaler/delayed_shutdown_spec.rb +4 -4
- data/spec/autoscaler/heroku_platform_scaler_spec.rb +47 -0
- data/spec/autoscaler/heroku_scaler_spec.rb +8 -8
- data/spec/autoscaler/ignore_scheduled_and_retrying_spec.rb +4 -4
- data/spec/autoscaler/linear_scaling_strategy_spec.rb +13 -13
- data/spec/autoscaler/sidekiq/activity_spec.rb +4 -4
- data/spec/autoscaler/sidekiq/client_spec.rb +5 -5
- data/spec/autoscaler/sidekiq/entire_queue_system_spec.rb +11 -11
- data/spec/autoscaler/sidekiq/sleep_wait_server_spec.rb +21 -21
- data/spec/autoscaler/sidekiq/specified_queue_system_spec.rb +10 -10
- data/spec/autoscaler/sidekiq/thread_server_spec.rb +44 -0
- data/spec/spec_helper.rb +4 -2
- data/spec/test_system.rb +6 -0
- metadata +71 -15
- data/lib/autoscaler/sidekiq/celluloid_monitor.rb +0 -68
- data/lib/autoscaler/sidekiq/monitor_middleware_adapter.rb +0 -46
- data/spec/autoscaler/sidekiq/celluloid_monitor_spec.rb +0 -39
- data/spec/autoscaler/sidekiq/monitor_middleware_adapter_spec.rb +0 -16
@@ -8,12 +8,12 @@ describe Autoscaler::BinaryScalingStrategy do
|
|
8
8
|
it "scales with no work" do
|
9
9
|
system = TestSystem.new(0)
|
10
10
|
strategy = cut.new
|
11
|
-
strategy.call(system, 1).
|
11
|
+
expect(strategy.call(system, 1)).to eq 0
|
12
12
|
end
|
13
13
|
|
14
14
|
it "does not scale with pending work" do
|
15
15
|
system = TestSystem.new(1)
|
16
16
|
strategy = cut.new(2)
|
17
|
-
strategy.call(system, 1).
|
17
|
+
expect(strategy.call(system, 1)).to eq 2
|
18
18
|
end
|
19
19
|
end
|
@@ -5,17 +5,17 @@ describe Autoscaler::CounterCacheMemory do
|
|
5
5
|
let(:cut) {Autoscaler::CounterCacheMemory}
|
6
6
|
|
7
7
|
it {expect{cut.new.counter}.to raise_error(cut::Expired)}
|
8
|
-
it {cut.new.counter{1}.
|
8
|
+
it {expect(cut.new.counter{1}).to eq 1}
|
9
9
|
|
10
10
|
it 'set and store' do
|
11
11
|
cache = cut.new
|
12
12
|
cache.counter = 1
|
13
|
-
cache.counter.
|
13
|
+
expect(cache.counter).to eq 1
|
14
14
|
end
|
15
15
|
|
16
16
|
it 'times out' do
|
17
17
|
cache = cut.new(0)
|
18
18
|
cache.counter = 1
|
19
|
-
expect{cache.counter
|
19
|
+
expect{cache.counter}.to raise_error(cut::Expired)
|
20
20
|
end
|
21
21
|
end
|
@@ -11,11 +11,11 @@ describe Autoscaler::CounterCacheRedis do
|
|
11
11
|
subject {cut.new(Sidekiq.method(:redis))}
|
12
12
|
|
13
13
|
it {expect{subject.counter}.to raise_error(cut::Expired)}
|
14
|
-
it {subject.counter{1}.
|
14
|
+
it {expect(subject.counter{1}).to eq 1}
|
15
15
|
|
16
16
|
it 'set and store' do
|
17
17
|
subject.counter = 2
|
18
|
-
subject.counter.
|
18
|
+
expect(subject.counter).to eq 2
|
19
19
|
end
|
20
20
|
|
21
21
|
it 'does not conflict with multiple worker types' do
|
@@ -23,7 +23,7 @@ describe Autoscaler::CounterCacheRedis do
|
|
23
23
|
subject.counter = 1
|
24
24
|
other_worker_cache.counter = 2
|
25
25
|
|
26
|
-
subject.counter.
|
26
|
+
expect(subject.counter).to eq 1
|
27
27
|
other_worker_cache.counter = 2
|
28
28
|
end
|
29
29
|
|
@@ -37,13 +37,13 @@ describe Autoscaler::CounterCacheRedis do
|
|
37
37
|
it 'passed a connection pool' do
|
38
38
|
cache = cut.new(@redis)
|
39
39
|
cache.counter = 4
|
40
|
-
cache.counter.
|
40
|
+
expect(cache.counter).to eq 4
|
41
41
|
end
|
42
42
|
|
43
43
|
it 'passed a plain connection' do
|
44
|
-
connection = Redis.connect(:url => '
|
44
|
+
connection = Redis.connect(:url => 'redis://localhost:9736', :namespace => 'autoscaler')
|
45
45
|
cache = cut.new connection
|
46
46
|
cache.counter = 5
|
47
|
-
cache.counter.
|
47
|
+
expect(cache.counter).to eq 5
|
48
48
|
end
|
49
49
|
end
|
@@ -7,17 +7,17 @@ describe Autoscaler::DelayedShutdown do
|
|
7
7
|
|
8
8
|
it "returns normal values" do
|
9
9
|
strategy = cut.new(lambda{|s,t| 2}, 0)
|
10
|
-
strategy.call(nil, 1).
|
10
|
+
expect(strategy.call(nil, 1)).to eq 2
|
11
11
|
end
|
12
12
|
|
13
13
|
it "delays zeros" do
|
14
14
|
strategy = cut.new(lambda{|s,t| 0}, 60)
|
15
|
-
strategy.call(nil, 1).
|
15
|
+
expect(strategy.call(nil, 1)).to eq 1
|
16
16
|
end
|
17
17
|
|
18
18
|
it "eventually returns zero" do
|
19
19
|
strategy = cut.new(lambda{|s,t| 0}, 60)
|
20
|
-
strategy.
|
21
|
-
strategy.call(nil, 61).
|
20
|
+
allow(strategy).to receive(:level_idle_time).and_return(61)
|
21
|
+
expect(strategy.call(nil, 61)).to eq 0
|
22
22
|
end
|
23
23
|
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
require 'autoscaler/heroku_platform_scaler'
|
3
|
+
|
4
|
+
describe Autoscaler::HerokuPlatformScaler, :platform_api => true do
|
5
|
+
let(:cut) {Autoscaler::HerokuPlatformScaler}
|
6
|
+
let(:client) {cut.new}
|
7
|
+
subject {client}
|
8
|
+
|
9
|
+
its(:workers) {should eq(0)}
|
10
|
+
|
11
|
+
describe 'scaled' do
|
12
|
+
around do |example|
|
13
|
+
client.workers = 1
|
14
|
+
example.call
|
15
|
+
client.workers = 0
|
16
|
+
end
|
17
|
+
|
18
|
+
its(:workers) {should eq(1)}
|
19
|
+
end
|
20
|
+
|
21
|
+
shared_examples 'exception handler' do |exception_class|
|
22
|
+
before do
|
23
|
+
expect(client).to receive(:client){
|
24
|
+
raise exception_class.new(Exception.new('oops'))
|
25
|
+
}
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "default handler" do
|
29
|
+
it {expect{client.workers}.to_not raise_error}
|
30
|
+
it {expect(client.workers).to eq(0)}
|
31
|
+
it {expect{client.workers = 2}.to_not raise_error}
|
32
|
+
end
|
33
|
+
|
34
|
+
describe "custom handler" do
|
35
|
+
before do
|
36
|
+
@caught = false
|
37
|
+
client.exception_handler = lambda {|exception| @caught = true}
|
38
|
+
end
|
39
|
+
|
40
|
+
it {client.workers; expect(@caught).to be(true)}
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
describe 'exception handling', :focus => true do
|
45
|
+
it_behaves_like 'exception handler', Excon::Errors::Error
|
46
|
+
end
|
47
|
+
end
|
@@ -2,33 +2,33 @@ require 'spec_helper'
|
|
2
2
|
require 'autoscaler/heroku_scaler'
|
3
3
|
require 'heroku/api/errors'
|
4
4
|
|
5
|
-
describe Autoscaler::HerokuScaler, :
|
5
|
+
describe Autoscaler::HerokuScaler, :api1 => true do
|
6
6
|
let(:cut) {Autoscaler::HerokuScaler}
|
7
7
|
let(:client) {cut.new}
|
8
8
|
subject {client}
|
9
9
|
|
10
|
-
its(:workers) {should
|
10
|
+
its(:workers) {should eq(0)}
|
11
11
|
|
12
12
|
describe 'scaled' do
|
13
13
|
around do |example|
|
14
14
|
client.workers = 1
|
15
|
-
example.
|
15
|
+
example.call
|
16
16
|
client.workers = 0
|
17
17
|
end
|
18
18
|
|
19
|
-
its(:workers) {should
|
19
|
+
its(:workers) {should eq(1)}
|
20
20
|
end
|
21
21
|
|
22
22
|
shared_examples 'exception handler' do |exception_class|
|
23
23
|
before do
|
24
|
-
client.
|
24
|
+
expect(client).to receive(:client){
|
25
25
|
raise exception_class.new(Exception.new('oops'))
|
26
26
|
}
|
27
27
|
end
|
28
28
|
|
29
29
|
describe "default handler" do
|
30
30
|
it {expect{client.workers}.to_not raise_error}
|
31
|
-
it {client.workers.
|
31
|
+
it {expect(client.workers).to eq(0)}
|
32
32
|
it {expect{client.workers = 2}.to_not raise_error}
|
33
33
|
end
|
34
34
|
|
@@ -38,7 +38,7 @@ describe Autoscaler::HerokuScaler, :online => true do
|
|
38
38
|
client.exception_handler = lambda {|exception| @caught = true}
|
39
39
|
end
|
40
40
|
|
41
|
-
it {client.workers; @caught.
|
41
|
+
it {client.workers; expect(@caught).to be(true)}
|
42
42
|
end
|
43
43
|
end
|
44
44
|
|
@@ -46,4 +46,4 @@ describe Autoscaler::HerokuScaler, :online => true do
|
|
46
46
|
it_behaves_like 'exception handler', Excon::Errors::SocketError
|
47
47
|
it_behaves_like 'exception handler', Heroku::API::Errors::Error
|
48
48
|
end
|
49
|
-
end
|
49
|
+
end
|
@@ -8,25 +8,25 @@ describe Autoscaler::IgnoreScheduledAndRetrying do
|
|
8
8
|
it "passes through enqueued" do
|
9
9
|
system = Struct.new(:enqueued).new(3)
|
10
10
|
strategy = proc {|system, time| system.enqueued}
|
11
|
-
cut.new(strategy).call(system, 0).
|
11
|
+
expect(cut.new(strategy).call(system, 0)).to eq 3
|
12
12
|
end
|
13
13
|
|
14
14
|
it "passes through workers" do
|
15
15
|
system = Struct.new(:workers).new(3)
|
16
16
|
strategy = proc {|system, time| system.workers}
|
17
|
-
cut.new(strategy).call(system, 0).
|
17
|
+
expect(cut.new(strategy).call(system, 0)).to eq 3
|
18
18
|
end
|
19
19
|
|
20
20
|
it "ignores scheduled" do
|
21
21
|
system = Struct.new(:scheduled).new(3)
|
22
22
|
strategy = proc {|system, time| system.scheduled}
|
23
|
-
cut.new(strategy).call(system, 0).
|
23
|
+
expect(cut.new(strategy).call(system, 0)).to eq 0
|
24
24
|
end
|
25
25
|
|
26
26
|
it "ignores retrying" do
|
27
27
|
system = Struct.new(:retrying).new(3)
|
28
28
|
strategy = proc {|system, time| system.retrying}
|
29
|
-
cut.new(strategy).call(system, 0).
|
29
|
+
expect(cut.new(strategy).call(system, 0)).to eq 0
|
30
30
|
end
|
31
31
|
end
|
32
32
|
|
@@ -8,78 +8,78 @@ describe Autoscaler::LinearScalingStrategy do
|
|
8
8
|
it "deactivates with no work" do
|
9
9
|
system = TestSystem.new(0)
|
10
10
|
strategy = cut.new(1)
|
11
|
-
strategy.call(system, 1).
|
11
|
+
expect(strategy.call(system, 1)).to eq 0
|
12
12
|
end
|
13
13
|
|
14
14
|
it "activates with some work" do
|
15
15
|
system = TestSystem.new(1)
|
16
16
|
strategy = cut.new(1)
|
17
|
-
strategy.call(system, 1).
|
17
|
+
expect(strategy.call(system, 1)).to be > 0
|
18
18
|
end
|
19
19
|
|
20
20
|
it "minimally scales with minimal work" do
|
21
21
|
system = TestSystem.new(1)
|
22
22
|
strategy = cut.new(2, 2)
|
23
|
-
strategy.call(system, 1).
|
23
|
+
expect(strategy.call(system, 1)).to eq 1
|
24
24
|
end
|
25
25
|
|
26
26
|
it "maximally scales with too much work" do
|
27
27
|
system = TestSystem.new(5)
|
28
28
|
strategy = cut.new(2, 2)
|
29
|
-
strategy.call(system, 1).
|
29
|
+
expect(strategy.call(system, 1)).to eq 2
|
30
30
|
end
|
31
31
|
|
32
32
|
it "proportionally scales with some work" do
|
33
33
|
system = TestSystem.new(5)
|
34
34
|
strategy = cut.new(5, 2)
|
35
|
-
strategy.call(system, 1).
|
35
|
+
expect(strategy.call(system, 1)).to eq 3
|
36
36
|
end
|
37
37
|
|
38
38
|
it "doesn't scale unless minimum is met" do
|
39
39
|
system = TestSystem.new(2)
|
40
40
|
strategy = cut.new(10, 4, 0.5)
|
41
|
-
strategy.call(system, 1).
|
41
|
+
expect(strategy.call(system, 1)).to eq 0
|
42
42
|
end
|
43
43
|
|
44
44
|
it "scales proprotionally with a minimum" do
|
45
45
|
system = TestSystem.new(3)
|
46
46
|
strategy = cut.new(10, 4, 0.5)
|
47
|
-
strategy.call(system, 1).
|
47
|
+
expect(strategy.call(system, 1)).to eq 1
|
48
48
|
end
|
49
49
|
|
50
50
|
it "scales maximally with a minimum" do
|
51
51
|
system = TestSystem.new(25)
|
52
52
|
strategy = cut.new(5, 4, 0.5)
|
53
|
-
strategy.call(system, 1).
|
53
|
+
expect(strategy.call(system, 1)).to eq 5
|
54
54
|
end
|
55
55
|
|
56
56
|
it "scales proportionally with a minimum > 1" do
|
57
57
|
system = TestSystem.new(12)
|
58
58
|
strategy = cut.new(5, 4, 2)
|
59
|
-
strategy.call(system, 1).
|
59
|
+
expect(strategy.call(system, 1)).to eq 2
|
60
60
|
end
|
61
61
|
|
62
62
|
it "scales maximally with a minimum factor > 1" do
|
63
63
|
system = TestSystem.new(30)
|
64
64
|
strategy = cut.new(5, 4, 2)
|
65
|
-
strategy.call(system, 1).
|
65
|
+
expect(strategy.call(system, 1)).to eq 5
|
66
66
|
end
|
67
67
|
|
68
68
|
it "doesn't scale down engaged workers" do
|
69
69
|
system = TestSystem.new(0, 2)
|
70
70
|
strategy = cut.new(5, 4)
|
71
|
-
strategy.call(system, 1).
|
71
|
+
expect(strategy.call(system, 1)).to eq 2
|
72
72
|
end
|
73
73
|
|
74
74
|
it "doesn't scale above max workers even if engaged workers is greater" do
|
75
75
|
system = TestSystem.new(40, 6)
|
76
76
|
strategy = cut.new(5, 4)
|
77
|
-
strategy.call(system, 1).
|
77
|
+
expect(strategy.call(system, 1)).to eq 5
|
78
78
|
end
|
79
79
|
|
80
80
|
it "returns zero if requested capacity is zero" do
|
81
81
|
system = TestSystem.new(0, 0)
|
82
82
|
strategy = cut.new(0, 0)
|
83
|
-
strategy.call(system, 5).
|
83
|
+
expect(strategy.call(system, 5)).to eq 0
|
84
84
|
end
|
85
85
|
end
|
@@ -16,19 +16,19 @@ describe Autoscaler::Sidekiq::Activity do
|
|
16
16
|
activity.idle!('queue')
|
17
17
|
other_process.working!('other_queue')
|
18
18
|
end
|
19
|
-
it {activity.
|
19
|
+
it {expect(activity).to be_idle(['queue'])}
|
20
20
|
end
|
21
21
|
|
22
22
|
it 'passed a connection pool' do
|
23
23
|
activity = cut.new(5, @redis)
|
24
24
|
activity.working!('queue')
|
25
|
-
activity.
|
25
|
+
expect(activity).to_not be_idle(['queue'])
|
26
26
|
end
|
27
27
|
|
28
28
|
it 'passed a plain connection' do
|
29
|
-
connection = Redis.connect(:url => '
|
29
|
+
connection = Redis.connect(:url => 'redis://localhost:9736', :namespace => 'autoscaler')
|
30
30
|
activity = cut.new(5, connection)
|
31
31
|
activity.working!('queue')
|
32
|
-
activity.
|
32
|
+
expect(activity).to_not be_idle(['queue'])
|
33
33
|
end
|
34
34
|
end
|
@@ -10,26 +10,26 @@ describe Autoscaler::Sidekiq::Client do
|
|
10
10
|
describe 'call' do
|
11
11
|
it 'scales' do
|
12
12
|
client.call(Class, {}, 'queue') {}
|
13
|
-
scaler.workers.
|
13
|
+
expect(scaler.workers).to eq 1
|
14
14
|
end
|
15
15
|
|
16
16
|
it 'scales with a redis pool' do
|
17
17
|
client.call(Class, {}, 'queue', ::Sidekiq.method(:redis)) {}
|
18
|
-
scaler.workers.
|
18
|
+
expect(scaler.workers).to eq 1
|
19
19
|
end
|
20
20
|
|
21
|
-
it('yields') {client.call(Class, {}, 'queue') {:foo}.
|
21
|
+
it('yields') {expect(client.call(Class, {}, 'queue') {:foo}).to eq :foo}
|
22
22
|
end
|
23
23
|
|
24
24
|
describe 'initial workers' do
|
25
25
|
it 'works with default arguments' do
|
26
26
|
client.set_initial_workers
|
27
|
-
scaler.workers.
|
27
|
+
expect(scaler.workers).to eq 0
|
28
28
|
end
|
29
29
|
|
30
30
|
it 'scales when necessary' do
|
31
31
|
client.set_initial_workers {|q| TestSystem.new(1)}
|
32
|
-
scaler.workers.
|
32
|
+
expect(scaler.workers).to eq 1
|
33
33
|
end
|
34
34
|
end
|
35
35
|
end
|
@@ -24,42 +24,42 @@ describe Autoscaler::Sidekiq::EntireQueueSystem do
|
|
24
24
|
|
25
25
|
subject {cut.new}
|
26
26
|
|
27
|
-
it {subject.queue_names.
|
28
|
-
it {subject.workers.
|
27
|
+
it {expect(subject.queue_names).to eq []}
|
28
|
+
it {expect(subject.workers).to eq 0}
|
29
29
|
|
30
30
|
describe 'no queued work' do
|
31
31
|
it "with no work" do
|
32
|
-
subject.
|
33
|
-
subject.queued.
|
32
|
+
allow(subject).to receive(:sidekiq_queues).and_return({'queue' => 0, 'another_queue' => 0})
|
33
|
+
expect(subject.queued).to eq 0
|
34
34
|
end
|
35
35
|
|
36
36
|
it "with no work and no queues" do
|
37
|
-
subject.queued.
|
37
|
+
expect(subject.queued).to eq 0
|
38
38
|
end
|
39
39
|
|
40
40
|
it "with no scheduled work" do
|
41
|
-
subject.scheduled.
|
41
|
+
expect(subject.scheduled).to eq 0
|
42
42
|
end
|
43
43
|
|
44
44
|
it "with no retry work" do
|
45
|
-
subject.retrying.
|
45
|
+
expect(subject.retrying).to eq 0
|
46
46
|
end
|
47
47
|
end
|
48
48
|
|
49
49
|
describe 'with queued work' do
|
50
50
|
it "with enqueued work" do
|
51
|
-
subject.
|
52
|
-
subject.queued.
|
51
|
+
allow(subject).to receive(:sidekiq_queues).and_return({'queue' => 1})
|
52
|
+
expect(subject.queued).to eq 1
|
53
53
|
end
|
54
54
|
|
55
55
|
it "with schedule work" do
|
56
56
|
with_scheduled_work_in('queue')
|
57
|
-
subject.scheduled.
|
57
|
+
expect(subject.scheduled).to eq 1
|
58
58
|
end
|
59
59
|
|
60
60
|
it "with retry work" do
|
61
61
|
with_retry_work_in('queue')
|
62
|
-
subject.retrying.
|
62
|
+
expect(subject.retrying).to eq 1
|
63
63
|
end
|
64
64
|
end
|
65
65
|
end
|
@@ -12,34 +12,34 @@ describe Autoscaler::Sidekiq::SleepWaitServer do
|
|
12
12
|
let(:server) {cut.new(scaler, 0, ['queue'])}
|
13
13
|
|
14
14
|
shared_examples "a sleepwait server" do
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
15
|
+
it "scales with no work" do
|
16
|
+
allow(server).to receive(:pending_work?).and_return(false)
|
17
|
+
when_run
|
18
|
+
expect(scaler.workers).to eq 0
|
19
|
+
end
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
21
|
+
it "does not scale with pending work" do
|
22
|
+
allow(server).to receive(:pending_work?).and_return(true)
|
23
|
+
when_run
|
24
|
+
expect(scaler.workers).to eq 1
|
25
|
+
end
|
26
26
|
end
|
27
27
|
|
28
28
|
describe "a middleware with no redis specified" do
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
29
|
+
it_behaves_like "a sleepwait server" do
|
30
|
+
def when_run
|
31
|
+
server.call(Object.new, {}, 'queue') {}
|
32
|
+
end
|
33
|
+
end
|
34
34
|
end
|
35
35
|
|
36
36
|
describe "a middleware with redis specified" do
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
37
|
+
it_behaves_like "a sleepwait server" do
|
38
|
+
def when_run
|
39
|
+
server.call(Object.new, {}, 'queue', Sidekiq.method(:redis)) {}
|
40
|
+
end
|
41
|
+
end
|
42
42
|
end
|
43
43
|
|
44
|
-
it('yields') {server.call(Object.new, {}, 'queue') {:foo}.
|
44
|
+
it('yields') {expect(server.call(Object.new, {}, 'queue') {:foo}).to eq :foo}
|
45
45
|
end
|