logstash-output-cassandra 0.9.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CONTRIBUTORS +7 -0
- data/Gemfile +4 -0
- data/LICENSE +218 -0
- data/README.md +148 -0
- data/lib/logstash/outputs/cassandra.rb +164 -0
- data/lib/logstash/outputs/cassandra/backoff_retry_policy.rb +65 -0
- data/lib/logstash/outputs/cassandra/buffer.rb +125 -0
- data/lib/logstash/outputs/cassandra/event_parser.rb +161 -0
- data/lib/logstash/outputs/cassandra/safe_submitter.rb +118 -0
- data/logstash-output-cassandra.gemspec +35 -0
- data/spec/cassandra_spec_helper.rb +14 -0
- data/spec/integration/outputs/cassandra_spec.rb +115 -0
- data/spec/integration/outputs/integration_helper.rb +91 -0
- data/spec/unit/outputs/backoff_retry_policy_spec.rb +131 -0
- data/spec/unit/outputs/buffer_spec.rb +119 -0
- data/spec/unit/outputs/cassandra_spec.rb +5 -0
- data/spec/unit/outputs/event_parser_spec.rb +304 -0
- data/spec/unit/outputs/safe_submitter_spec.rb +201 -0
- metadata +266 -0
@@ -0,0 +1,35 @@
|
|
1
|
+
Gem::Specification.new do |s|
|
2
|
+
|
3
|
+
s.name = 'logstash-output-cassandra'
|
4
|
+
s.version = '0.9.0'
|
5
|
+
s.licenses = [ 'Apache License (2.0)' ]
|
6
|
+
s.summary = 'Store events into Cassandra'
|
7
|
+
s.description = 'This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program'
|
8
|
+
s.authors = [ 'PerimeterX' ]
|
9
|
+
s.email = [ 'elad@perimeterx.com' ]
|
10
|
+
s.homepage = 'https://github.com/PerimeterX/logstash-output-cassandra'
|
11
|
+
s.require_paths = [ 'lib' ]
|
12
|
+
|
13
|
+
# Files
|
14
|
+
s.files = Dir[ 'lib/**/*', 'spec/**/*', 'vendor/**/*', '*.gemspec', '*.md', 'CONTRIBUTORS', 'Gemfile', 'LICENSE', 'NOTICE.TXT' ]
|
15
|
+
# Tests
|
16
|
+
s.test_files = s.files.grep(%r{^(test|spec|features)/})
|
17
|
+
|
18
|
+
# Special flag to let us know this is actually a logstash plugin
|
19
|
+
s.metadata = { 'logstash_plugin' => 'true', 'logstash_group' => 'output' }
|
20
|
+
|
21
|
+
# Gem dependencies
|
22
|
+
s.add_runtime_dependency 'concurrent-ruby'
|
23
|
+
s.add_runtime_dependency 'logstash-core', '>= 2.0.0', '< 3.0.0'
|
24
|
+
s.add_runtime_dependency 'cassandra-driver', '>= 2.0.0', '< 3.0.0'
|
25
|
+
s.add_development_dependency 'cabin', ['~> 0.6']
|
26
|
+
s.add_development_dependency 'longshoreman'
|
27
|
+
s.add_development_dependency 'logstash-devutils'
|
28
|
+
s.add_development_dependency 'logstash-codec-plain'
|
29
|
+
s.add_development_dependency 'simplecov'
|
30
|
+
s.add_development_dependency 'simplecov-rcov'
|
31
|
+
s.add_development_dependency 'unparser', '0.2.4'
|
32
|
+
s.add_development_dependency 'metric_fu'
|
33
|
+
s.add_development_dependency 'coveralls'
|
34
|
+
s.add_development_dependency 'gems'
|
35
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require 'logstash/devutils/rspec/spec_helper'
|
3
|
+
require 'logstash/event'
|
4
|
+
require 'simplecov'
|
5
|
+
require 'simplecov-rcov'
|
6
|
+
|
7
|
+
SimpleCov.formatter = SimpleCov::Formatter::MultiFormatter.new([
|
8
|
+
SimpleCov::Formatter::HTMLFormatter,
|
9
|
+
SimpleCov::Formatter::RcovFormatter
|
10
|
+
])
|
11
|
+
|
12
|
+
SimpleCov.start do
|
13
|
+
add_filter '/spec/'
|
14
|
+
end
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative './integration_helper'
|
3
|
+
require 'logstash/outputs/cassandra'
|
4
|
+
|
5
|
+
module Helper
|
6
|
+
def self.get_assert_timestamp_equallity
|
7
|
+
Proc.new do |expect, row, type_to_test|
|
8
|
+
expect.call(row['value_column'].to_s).to(eq(Time.at(type_to_test[:value]).to_s))
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def self.get_assert_set_equallity
|
13
|
+
Proc.new do |expect, row, type_to_test|
|
14
|
+
set_from_cassandra = row['value_column']
|
15
|
+
original_value = type_to_test[:value]
|
16
|
+
expect.call(set_from_cassandra.size).to(eq(original_value.size))
|
17
|
+
set_from_cassandra.to_a.each { |item|
|
18
|
+
expect.call(original_value).to(include(item.to_s))
|
19
|
+
}
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
describe 'client create actions', :docker => true do
|
25
|
+
before(:each) do
|
26
|
+
get_session.execute("CREATE KEYSPACE test WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };")
|
27
|
+
end
|
28
|
+
|
29
|
+
after(:each) do
|
30
|
+
get_session.execute('DROP KEYSPACE test;')
|
31
|
+
end
|
32
|
+
|
33
|
+
def get_sut
|
34
|
+
options = {
|
35
|
+
'hosts' => [get_host_ip],
|
36
|
+
'port' => get_port,
|
37
|
+
'keyspace' => 'test',
|
38
|
+
'table' => '%{[cassandra_table]}',
|
39
|
+
'username' => 'cassandra',
|
40
|
+
'password' => 'cassandra',
|
41
|
+
'filter_transform_event_key' => 'cassandra_filter'
|
42
|
+
}
|
43
|
+
sut = LogStash::Outputs::CassandraOutput.new(options)
|
44
|
+
return sut
|
45
|
+
end
|
46
|
+
|
47
|
+
def create_table(type_to_test)
|
48
|
+
get_session.execute("
|
49
|
+
CREATE TABLE test.simple(
|
50
|
+
idish_column text,
|
51
|
+
value_column #{type_to_test[:type]},
|
52
|
+
PRIMARY KEY (idish_column)
|
53
|
+
);")
|
54
|
+
end
|
55
|
+
|
56
|
+
def build_event(type_to_test)
|
57
|
+
options = {
|
58
|
+
'cassandra_table' => 'simple',
|
59
|
+
'idish_field' => 'some text',
|
60
|
+
'value_field' => type_to_test[:value],
|
61
|
+
'cassandra_filter' => [
|
62
|
+
{ 'event_key' => 'idish_field', 'column_name' => 'idish_column' },
|
63
|
+
{ 'event_key' => 'value_field', 'column_name' => 'value_column', 'cassandra_type' => type_to_test[:type] }
|
64
|
+
]
|
65
|
+
}
|
66
|
+
LogStash::Event.new(options)
|
67
|
+
end
|
68
|
+
|
69
|
+
def assert_proper_insert(type_to_test)
|
70
|
+
result = get_session.execute('SELECT * FROM test.simple')
|
71
|
+
expect(result.size).to((eq(1)))
|
72
|
+
result.each { |row|
|
73
|
+
expect(row['idish_column']).to(eq('some text'))
|
74
|
+
if type_to_test.has_key?(:assert_override)
|
75
|
+
expect_proc = Proc.new do |value|
|
76
|
+
return expect(value)
|
77
|
+
end
|
78
|
+
type_to_test[:assert_override].call(expect_proc, row, type_to_test)
|
79
|
+
else
|
80
|
+
expect(row['value_column'].to_s).to(eq(type_to_test[:value].to_s))
|
81
|
+
end
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
[
|
86
|
+
{ type: 'timestamp', value: 1457606758, assert_override: Helper::get_assert_timestamp_equallity() },
|
87
|
+
{ type: 'inet', value: '192.168.99.100' },
|
88
|
+
{ type: 'float', value: '10.050000190734863' },
|
89
|
+
{ type: 'varchar', value: 'some chars' },
|
90
|
+
{ type: 'text', value: 'some text' },
|
91
|
+
{ type: 'blob', value: 'a blob' },
|
92
|
+
{ type: 'ascii', value: 'some ascii' },
|
93
|
+
{ type: 'bigint', value: '123456789' },
|
94
|
+
{ type: 'int', value: '12345' },
|
95
|
+
{ type: 'varint', value: '12345678' },
|
96
|
+
{ type: 'boolean', value: 'true' },
|
97
|
+
{ type: 'decimal', value: '0.1015E2' },
|
98
|
+
{ type: 'double', value: '200.54' },
|
99
|
+
{ type: 'timeuuid', value: 'd2177dd0-eaa2-11de-a572-001b779c76e3' },
|
100
|
+
{ type: 'set<timeuuid>',
|
101
|
+
value: %w(d2177dd0-eaa2-11de-a572-001b779c76e3 d2177dd0-eaa2-11de-a572-001b779c76e4 d2177dd0-eaa2-11de-a572-001b779c76e5), assert_override: Helper::get_assert_set_equallity }
|
102
|
+
].each { |type_to_test|
|
103
|
+
it "properly inserts data of type #{type_to_test[:type]}" do
|
104
|
+
create_table(type_to_test)
|
105
|
+
sut = get_sut
|
106
|
+
sut.register
|
107
|
+
event = build_event(type_to_test)
|
108
|
+
|
109
|
+
sut.receive(event)
|
110
|
+
sut.flush
|
111
|
+
|
112
|
+
assert_proper_insert(type_to_test)
|
113
|
+
end
|
114
|
+
}
|
115
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative '../../cassandra_spec_helper'
|
3
|
+
require 'longshoreman'
|
4
|
+
require 'cassandra'
|
5
|
+
|
6
|
+
CONTAINER_NAME = "logstash-output-cassandra-#{rand(999).to_s}"
|
7
|
+
CONTAINER_IMAGE = 'cassandra'
|
8
|
+
CONTAINER_TAG = '2.2'
|
9
|
+
|
10
|
+
module CassandraHelper
|
11
|
+
def get_host_ip
|
12
|
+
Longshoreman.new.get_host_ip
|
13
|
+
end
|
14
|
+
|
15
|
+
def get_port
|
16
|
+
container = Longshoreman::Container.new
|
17
|
+
container.get(CONTAINER_NAME)
|
18
|
+
container.rport(9042)
|
19
|
+
end
|
20
|
+
|
21
|
+
def get_session
|
22
|
+
cluster = ::Cassandra.cluster(
|
23
|
+
username: 'cassandra',
|
24
|
+
password: 'cassandra',
|
25
|
+
port: get_port,
|
26
|
+
hosts: [get_host_ip]
|
27
|
+
)
|
28
|
+
cluster.connect
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
|
33
|
+
RSpec.configure do |config|
|
34
|
+
config.include CassandraHelper
|
35
|
+
|
36
|
+
# this :all hook gets run before every describe block that is tagged with :integration => true.
|
37
|
+
config.before(:all, :docker => true) do
|
38
|
+
# check if container exists already before creating new one.
|
39
|
+
begin
|
40
|
+
ls = Longshoreman::new
|
41
|
+
ls.container.get(CONTAINER_NAME)
|
42
|
+
rescue Docker::Error::NotFoundError
|
43
|
+
create_retry = 0
|
44
|
+
begin
|
45
|
+
Longshoreman.new("#{CONTAINER_IMAGE}:#{CONTAINER_TAG}", CONTAINER_NAME, {
|
46
|
+
'HostConfig' => {
|
47
|
+
'PublishAllPorts' => true
|
48
|
+
}
|
49
|
+
})
|
50
|
+
connect_retry = 0
|
51
|
+
begin
|
52
|
+
get_session
|
53
|
+
rescue ::Cassandra::Errors::NoHostsAvailable
|
54
|
+
# retry connecting for a minute
|
55
|
+
connect_retry += 1
|
56
|
+
if connect_retry <= 60
|
57
|
+
sleep(1)
|
58
|
+
retry
|
59
|
+
else
|
60
|
+
raise
|
61
|
+
end
|
62
|
+
end
|
63
|
+
rescue Docker::Error::NotFoundError
|
64
|
+
# try to pull the image once if it does not exist
|
65
|
+
create_retry += 1
|
66
|
+
if create_retry <= 1
|
67
|
+
Longshoreman.pull_image(CONTAINER_IMAGE, CONTAINER_TAG)
|
68
|
+
retry
|
69
|
+
else
|
70
|
+
raise
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# we want to do a final cleanup after all :integration runs,
|
77
|
+
# but we don't want to clean up before the last block.
|
78
|
+
# This is a final blind check to see if the ES docker container is running and
|
79
|
+
# needs to be cleaned up. If no container can be found and/or docker is not
|
80
|
+
# running on the system, we do nothing.
|
81
|
+
config.after(:suite) do
|
82
|
+
# only cleanup docker container if system has docker and the container is running
|
83
|
+
begin
|
84
|
+
ls = Longshoreman::new
|
85
|
+
ls.container.get(CONTAINER_NAME)
|
86
|
+
ls.cleanup
|
87
|
+
rescue Docker::Error::NotFoundError, Excon::Errors::SocketError
|
88
|
+
# do nothing
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,131 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require_relative '../../cassandra_spec_helper'
|
3
|
+
require 'logstash/outputs/cassandra/backoff_retry_policy'
|
4
|
+
|
5
|
+
RSpec.describe ::Cassandra::Retry::Policies::Backoff do
|
6
|
+
let(:sut) { ::Cassandra::Retry::Policies::Backoff }
|
7
|
+
let(:linear_backoff) {
|
8
|
+
logger = double
|
9
|
+
allow(logger).to(receive(:error))
|
10
|
+
{
|
11
|
+
'logger' => logger,
|
12
|
+
'backoff_type' => '*',
|
13
|
+
'backoff_size' => 5,
|
14
|
+
'retry_limit' => 10
|
15
|
+
}
|
16
|
+
}
|
17
|
+
let(:exponential_backoff) {
|
18
|
+
linear_backoff.merge({
|
19
|
+
'backoff_type' => '**',
|
20
|
+
'backoff_size' => 2,
|
21
|
+
'retry_limit' => 10
|
22
|
+
})
|
23
|
+
}
|
24
|
+
|
25
|
+
describe '#retry_with_backoff' do
|
26
|
+
describe 'retry limit not reached' do
|
27
|
+
it 'decides to try again with the same consistency level' do
|
28
|
+
sut_instance = sut.new(linear_backoff)
|
29
|
+
|
30
|
+
decision = sut_instance.retry_with_backoff({ :retries => 0, :consistency => :one })
|
31
|
+
|
32
|
+
expect(decision).to(be_an_instance_of(::Cassandra::Retry::Decisions::Retry))
|
33
|
+
expect(decision.consistency).to(be(:one))
|
34
|
+
end
|
35
|
+
|
36
|
+
it 'waits _before_ retrying' do
|
37
|
+
sut_instance = sut.new(linear_backoff)
|
38
|
+
expect(Kernel).to(receive(:sleep))
|
39
|
+
|
40
|
+
sut_instance.retry_with_backoff({ :retries => 0 })
|
41
|
+
end
|
42
|
+
|
43
|
+
it 'allows for an infinite amount of retries if configured with -1 as the retry limit' do
|
44
|
+
sut_instance = sut.new(linear_backoff.merge({ 'retry_limit' => -1 }))
|
45
|
+
expect(Kernel).to(receive(:sleep))
|
46
|
+
|
47
|
+
sut_instance.retry_with_backoff({ :retries => 1000000 })
|
48
|
+
end
|
49
|
+
|
50
|
+
it 'allows for exponential backoffs' do
|
51
|
+
sut_instance = sut.new(exponential_backoff)
|
52
|
+
test_retry = exponential_backoff['retry_limit'] - 1
|
53
|
+
expect(Kernel).to(receive(:sleep).with(exponential_backoff['backoff_size'] ** test_retry))
|
54
|
+
|
55
|
+
sut_instance.retry_with_backoff({ :retries => test_retry })
|
56
|
+
end
|
57
|
+
|
58
|
+
it 'allows for linear backoffs' do
|
59
|
+
sut_instance = sut.new(linear_backoff)
|
60
|
+
test_retry = exponential_backoff['retry_limit'] - 1
|
61
|
+
expect(Kernel).to(receive(:sleep).with(linear_backoff['backoff_size'] * test_retry))
|
62
|
+
|
63
|
+
sut_instance.retry_with_backoff({ :retries => test_retry })
|
64
|
+
end
|
65
|
+
|
66
|
+
it 'fails for unknown backoff types' do
|
67
|
+
sut_instance = sut.new(linear_backoff.merge({ 'backoff_type' => '^' }))
|
68
|
+
|
69
|
+
expect { sut_instance.retry_with_backoff({ :retries => 0}) }.to raise_error ArgumentError
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
describe 'retry limit reached' do
|
74
|
+
it 'decides to reraise' do
|
75
|
+
sut_instance = sut.new(linear_backoff)
|
76
|
+
|
77
|
+
decision = sut_instance.retry_with_backoff({ :retries => linear_backoff['retry_limit'] + 1 })
|
78
|
+
|
79
|
+
expect(decision).to(be_an_instance_of(::Cassandra::Retry::Decisions::Reraise))
|
80
|
+
end
|
81
|
+
|
82
|
+
it 'does not wait' do
|
83
|
+
sut_instance = sut.new(linear_backoff)
|
84
|
+
|
85
|
+
expect(Kernel).not_to(receive(:sleep))
|
86
|
+
|
87
|
+
sut_instance.retry_with_backoff({ :retries => linear_backoff['retry_limit'] + 1 })
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
[
|
93
|
+
{
|
94
|
+
:method_name=> 'read_timeout',
|
95
|
+
:expected_opts => { :statement => 'statement', :consistency => :one, :required => 1, :received => 0,
|
96
|
+
:retrieved => false, :retries => 0 },
|
97
|
+
:call_args => ['statement', :one, 1, 0, false, 0]
|
98
|
+
},
|
99
|
+
{
|
100
|
+
:method_name=> 'write_timeout',
|
101
|
+
:expected_opts => { :statement => 'statement', :consistency => :one, :type => :prepared,
|
102
|
+
:required => 1, :received => 2, :retries => 5 },
|
103
|
+
:call_args => ['statement', :one, :prepared, 1, 2, 5]
|
104
|
+
},
|
105
|
+
{
|
106
|
+
:method_name=> 'unavailable',
|
107
|
+
:expected_opts => { :statement => 'statement', :consistency => :one, :required => 3,
|
108
|
+
:alive => 2, :retries => 4},
|
109
|
+
:call_args => ['statement', :one, 3, 2, 4]
|
110
|
+
}
|
111
|
+
].each { |use_case|
|
112
|
+
describe '#{use_case[:method_name]}' do
|
113
|
+
it 'properly calls #retry_with_backoff' do
|
114
|
+
sut_instance = sut.new(linear_backoff)
|
115
|
+
expect(sut_instance).to(receive(:retry_with_backoff).with(use_case[:expected_opts]))
|
116
|
+
|
117
|
+
sut_instance.send(use_case[:method_name], *use_case[:call_args])
|
118
|
+
end
|
119
|
+
|
120
|
+
it 'returns the decision it got' do
|
121
|
+
sut_instance = sut.new(linear_backoff)
|
122
|
+
expected_result = double
|
123
|
+
expect(sut_instance).to(receive(:retry_with_backoff).and_return(expected_result))
|
124
|
+
|
125
|
+
result = sut_instance.send(use_case[:method_name], *use_case[:call_args])
|
126
|
+
|
127
|
+
expect(result).to(be(expected_result))
|
128
|
+
end
|
129
|
+
end
|
130
|
+
}
|
131
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "logstash/outputs/cassandra/buffer"
|
3
|
+
require "cabin"
|
4
|
+
|
5
|
+
describe LogStash::Outputs::Cassandra::Buffer do
|
6
|
+
class OperationTarget # Used to track buffer flushesn
|
7
|
+
attr_reader :buffer, :buffer_history, :receive_count
|
8
|
+
def initialize
|
9
|
+
@buffer = nil
|
10
|
+
@buffer_history = []
|
11
|
+
@receive_count = 0
|
12
|
+
end
|
13
|
+
|
14
|
+
def receive(buffer)
|
15
|
+
@receive_count += 1
|
16
|
+
@buffer_history << buffer.clone
|
17
|
+
@buffer = buffer
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
let(:logger) { Cabin::Channel.get }
|
22
|
+
let(:max_size) { 10 }
|
23
|
+
let(:flush_interval) { 2 }
|
24
|
+
# Used to track flush count
|
25
|
+
let(:operation_target) { OperationTarget.new() }
|
26
|
+
let(:operation) { proc {|buffer| operation_target.receive(buffer) } }
|
27
|
+
subject(:buffer){ LogStash::Outputs::Cassandra::Buffer.new(logger, max_size, flush_interval, &operation) }
|
28
|
+
|
29
|
+
after(:each) do
|
30
|
+
buffer.stop(do_flush=false)
|
31
|
+
end
|
32
|
+
|
33
|
+
it "should initialize cleanly" do
|
34
|
+
expect(buffer).to be_a(LogStash::Outputs::Cassandra::Buffer)
|
35
|
+
end
|
36
|
+
|
37
|
+
shared_examples("a buffer with two items inside") do
|
38
|
+
it "should add a pushed item to the buffer" do
|
39
|
+
buffer.synchronize do |data|
|
40
|
+
expect(data).to include(item1)
|
41
|
+
expect(data).to include(item2)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
describe "interval flushing" do
|
46
|
+
before do
|
47
|
+
sleep flush_interval + 1
|
48
|
+
end
|
49
|
+
|
50
|
+
it "should flush the buffer after the interval has passed" do
|
51
|
+
expect(operation_target.receive_count).to eql(1)
|
52
|
+
end
|
53
|
+
|
54
|
+
it "should clear the buffer after a successful flush" do
|
55
|
+
expect(operation_target.buffer).to eql([])
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
describe "interval flushing a stopped buffer" do
|
60
|
+
before do
|
61
|
+
buffer.stop(do_flush=false)
|
62
|
+
sleep flush_interval + 1
|
63
|
+
end
|
64
|
+
|
65
|
+
it "should not flush if the buffer is stopped" do
|
66
|
+
expect(operation_target.receive_count).to eql(0)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
describe "with a buffer push" do
|
72
|
+
let(:item1) { "foo" }
|
73
|
+
let(:item2) { "bar" }
|
74
|
+
|
75
|
+
describe "a buffer with two items pushed to it separately" do
|
76
|
+
before do
|
77
|
+
buffer << item1
|
78
|
+
buffer << item2
|
79
|
+
end
|
80
|
+
|
81
|
+
include_examples("a buffer with two items inside")
|
82
|
+
end
|
83
|
+
|
84
|
+
describe "a buffer with two items pushed to it in one operation" do
|
85
|
+
before do
|
86
|
+
buffer.push_multi([item1, item2])
|
87
|
+
end
|
88
|
+
|
89
|
+
include_examples("a buffer with two items inside")
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
describe "with an empty buffer" do
|
94
|
+
it "should not perform an operation if the buffer is empty" do
|
95
|
+
buffer.flush
|
96
|
+
expect(operation_target.receive_count).to eql(0)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
describe "flushing with an operation that raises an error" do
|
101
|
+
class TestError < StandardError; end
|
102
|
+
let(:operation) { proc {|buffer| raise TestError, "A test" } }
|
103
|
+
let(:item) { double("item") }
|
104
|
+
|
105
|
+
before do
|
106
|
+
buffer << item
|
107
|
+
end
|
108
|
+
|
109
|
+
it "should raise an exception" do
|
110
|
+
expect { buffer.flush }.to raise_error(TestError)
|
111
|
+
end
|
112
|
+
|
113
|
+
it "should not clear the buffer" do
|
114
|
+
expect do
|
115
|
+
buffer.flush rescue TestError
|
116
|
+
end.not_to change(buffer, :contents)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|