plog-ruby 0.0.14
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +15 -0
- data/.gitignore +3 -0
- data/Gemfile +6 -0
- data/README.md +27 -0
- data/bin/plogstats +23 -0
- data/lib/plog-ruby.rb +1 -0
- data/lib/plog.rb +10 -0
- data/lib/plog/checksum.rb +9 -0
- data/lib/plog/client.rb +161 -0
- data/lib/plog/packets.rb +9 -0
- data/lib/plog/packets/multipart_message.rb +42 -0
- data/lib/plog/version.rb +3 -0
- data/plog-ruby.gemspec +22 -0
- data/spec/lib/plog/client_spec.rb +271 -0
- data/spec/lib/plog/packets/multipart_message_spec.rb +100 -0
- data/spec/lib/plog_spec.rb +18 -0
- data/spec/spec_helper.rb +5 -0
- metadata +78 -0
checksums.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
---
|
2
|
+
!binary "U0hBMQ==":
|
3
|
+
metadata.gz: !binary |-
|
4
|
+
NzQ5OTAwNzkxZTRkMmJjMTY0ZGFjMzczOTBmY2RiOGM2YjZjOWQyNw==
|
5
|
+
data.tar.gz: !binary |-
|
6
|
+
ZmE0Y2I1OGEzZmJiMzVjZmYzZDg0MzIxMjY4OTViZjhkMzdlOGNhYQ==
|
7
|
+
SHA512:
|
8
|
+
metadata.gz: !binary |-
|
9
|
+
NTkxNGEwZDEyMzhjM2UzYTllZmVmMTkwMmRhOWFjMjk4N2Y0ZTgxNmY0N2Yy
|
10
|
+
MGEwYmMzNWRkYTczOTZlOWRkZWI3MWRlZTljYzliMTlmNTczMmJiMGRlYjkw
|
11
|
+
Y2E0NmExY2M2YmM4NDBkOGI1YmI5OGM0YzgzZTg4NTYwNDcwMTU=
|
12
|
+
data.tar.gz: !binary |-
|
13
|
+
MzRhYjIyMmUyYjY2ZjMzOGVkNzdjODJkOTAxMWNkN2RmZmY3MTE2ZmEyYjNj
|
14
|
+
YmU3YTJiMzdhZGZjNDc0YzA4YTUzZTIzODM0N2Q3Zjk1NGYxNDE1ODA4MjJj
|
15
|
+
NDFmZmFmNTEyMDA4NjgwODZkNjQ4ZGMwM2ExYTcyNWI5ZWQ3ZDM=
|
data/.gitignore
ADDED
data/Gemfile
ADDED
data/README.md
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
# plog
|
2
|
+
|
3
|
+
Simple Ruby UDP client for the Plog Kafka forwarder.
|
4
|
+
|
5
|
+
### Build & run tests
|
6
|
+
|
7
|
+
```
|
8
|
+
bundle install
|
9
|
+
bundle exec rspec
|
10
|
+
```
|
11
|
+
|
12
|
+
### Usage
|
13
|
+
|
14
|
+
Instantiate a client and start sending messages.
|
15
|
+
|
16
|
+
```ruby
|
17
|
+
require 'plog'
|
18
|
+
client = Plog.new
|
19
|
+
client.send("My hovercraft is full of eels.")
|
20
|
+
```
|
21
|
+
|
22
|
+
You can configure the client at initialization by passing these options:
|
23
|
+
|
24
|
+
* `:host` - The host of the Plog process (default: 'localhost')
|
25
|
+
* `:port` - The port on which Plog is listening (default: 54321)
|
26
|
+
* `:chunk_size` - The maximum payload size for multipart datagrams (default: 64,000)
|
27
|
+
* `:logger` - An optional logger instance
|
data/bin/plogstats
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'optparse'
|
4
|
+
require 'plog-ruby'
|
5
|
+
require 'pp'
|
6
|
+
|
7
|
+
options = {:timeout => 3.0}
|
8
|
+
|
9
|
+
OptionParser.new do |opts|
|
10
|
+
opts.on '-h', '--host=' do |host|
|
11
|
+
options[:host] = host
|
12
|
+
end
|
13
|
+
|
14
|
+
opts.on '-p', '--port=' do |port|
|
15
|
+
options[:port] = port.to_i
|
16
|
+
end
|
17
|
+
|
18
|
+
opts.on '-t', '--timeout=' do |timeout|
|
19
|
+
options[:timeout] = timeout.to_f
|
20
|
+
end
|
21
|
+
end.parse!
|
22
|
+
|
23
|
+
pp Plog::Client.new(options).stats(options[:timeout])
|
data/lib/plog-ruby.rb
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require 'plog'
|
data/lib/plog.rb
ADDED
data/lib/plog/client.rb
ADDED
@@ -0,0 +1,161 @@
|
|
1
|
+
require 'json'
|
2
|
+
require 'socket'
|
3
|
+
require 'thread'
|
4
|
+
require 'logger'
|
5
|
+
|
6
|
+
module Plog
|
7
|
+
class TimeoutException < StandardError
|
8
|
+
end
|
9
|
+
|
10
|
+
class Client
|
11
|
+
# The protocol version spoken by this client.
|
12
|
+
PROTOCOL_VERSION = Packets::PROTOCOL_VERSION
|
13
|
+
RECV_SIZE = 65_536
|
14
|
+
|
15
|
+
DEFAULT_OPTIONS = {
|
16
|
+
:host => '127.0.0.1',
|
17
|
+
:port => 23456,
|
18
|
+
# Use the socket's default value unless this option is specified.
|
19
|
+
:send_buffer_size => nil,
|
20
|
+
:chunk_size => 64000,
|
21
|
+
|
22
|
+
:large_message_threshold => nil,
|
23
|
+
:on_large_message => nil,
|
24
|
+
:logger => Logger.new(nil)
|
25
|
+
}
|
26
|
+
|
27
|
+
attr_reader :host
|
28
|
+
attr_reader :port
|
29
|
+
attr_reader :send_buffer_size
|
30
|
+
attr_reader :chunk_size
|
31
|
+
|
32
|
+
attr_reader :large_message_threshold
|
33
|
+
attr_reader :on_large_message
|
34
|
+
attr_reader :logger
|
35
|
+
|
36
|
+
attr_reader :last_message_id
|
37
|
+
|
38
|
+
def initialize(options={})
|
39
|
+
options = DEFAULT_OPTIONS.merge(options)
|
40
|
+
@host = options[:host]
|
41
|
+
@port = options[:port]
|
42
|
+
@send_buffer_size = options[:send_buffer_size]
|
43
|
+
@chunk_size = options[:chunk_size]
|
44
|
+
@large_message_threshold = options[:large_message_threshold]
|
45
|
+
@on_large_message = options[:on_large_message]
|
46
|
+
@logger = options[:logger]
|
47
|
+
|
48
|
+
@message_id_mutex = Mutex.new
|
49
|
+
reset_message_id
|
50
|
+
end
|
51
|
+
|
52
|
+
def stats(timeout = 3.0)
|
53
|
+
send_to_socket("\0\0stats")
|
54
|
+
JSON.parse receive_packet_from_socket(timeout)
|
55
|
+
end
|
56
|
+
|
57
|
+
def send(message, options = {})
|
58
|
+
# Interpret the encoding of the string as binary so that chunking occurs
|
59
|
+
# at the byte-level and not at the character-level.
|
60
|
+
message = message.dup.force_encoding('BINARY')
|
61
|
+
notify_large_message(message) if large_message?(message)
|
62
|
+
|
63
|
+
message_id = next_message_id
|
64
|
+
message_length = message.length
|
65
|
+
message_checksum = Checksum.compute(message)
|
66
|
+
chunks = chunk_string(message, chunk_size)
|
67
|
+
|
68
|
+
logger.debug { "Plog: sending (#{message_id}; #{chunks.length} chunk(s))" }
|
69
|
+
chunks.each_with_index do |data, index|
|
70
|
+
send_to_socket(
|
71
|
+
Packets::MultipartMessage.encode(
|
72
|
+
message_id,
|
73
|
+
message_length,
|
74
|
+
message_checksum,
|
75
|
+
chunk_size,
|
76
|
+
chunks.count,
|
77
|
+
index,
|
78
|
+
data,
|
79
|
+
options
|
80
|
+
))
|
81
|
+
end
|
82
|
+
|
83
|
+
message_id
|
84
|
+
rescue => e
|
85
|
+
logger.error { "Plog: error sending message: #{e}" }
|
86
|
+
raise e
|
87
|
+
end
|
88
|
+
|
89
|
+
def reset
|
90
|
+
reset_message_id
|
91
|
+
close_socket
|
92
|
+
end
|
93
|
+
|
94
|
+
def socket
|
95
|
+
@socket ||= open_socket
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
def large_message?(message)
|
101
|
+
large_message_threshold && message.length > large_message_threshold
|
102
|
+
end
|
103
|
+
|
104
|
+
def notify_large_message(message)
|
105
|
+
on_large_message && on_large_message.call(self, message)
|
106
|
+
end
|
107
|
+
|
108
|
+
def reset_message_id
|
109
|
+
@message_id_mutex.synchronize do
|
110
|
+
@last_message_id = Random.rand(2 ** 32)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
def next_message_id
|
115
|
+
@message_id_mutex.synchronize do
|
116
|
+
@last_message_id += 1
|
117
|
+
@last_message_id %= 2 ** 32
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def chunk_string(string, size)
|
122
|
+
(0..(string.length - 1) / size).map { |i| string[i * size, size] }
|
123
|
+
end
|
124
|
+
|
125
|
+
def send_to_socket(string)
|
126
|
+
logger.debug { "Plog: writing to socket: #{string.inspect}" }
|
127
|
+
socket.send(string, 0, host, port)
|
128
|
+
rescue => e
|
129
|
+
logger.error { "Plog: error writing to socket: #{e}" }
|
130
|
+
close_socket
|
131
|
+
raise e
|
132
|
+
end
|
133
|
+
|
134
|
+
def receive_packet_from_socket(timeout)
|
135
|
+
logger.debug { "Plog: receiving from socket #{socket} with timeout #{timeout}s" }
|
136
|
+
|
137
|
+
if IO::select([socket], nil, nil, timeout).nil?
|
138
|
+
raise TimeoutException, "No answer in #{timeout}s"
|
139
|
+
end
|
140
|
+
|
141
|
+
socket.recv RECV_SIZE
|
142
|
+
end
|
143
|
+
|
144
|
+
def open_socket
|
145
|
+
socket = UDPSocket.new
|
146
|
+
if send_buffer_size
|
147
|
+
socket.setsockopt(
|
148
|
+
Socket::SOL_SOCKET,
|
149
|
+
Socket::SO_SNDBUF,
|
150
|
+
send_buffer_size
|
151
|
+
)
|
152
|
+
end
|
153
|
+
socket
|
154
|
+
end
|
155
|
+
|
156
|
+
def close_socket
|
157
|
+
@socket.close rescue nil
|
158
|
+
@socket = nil
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
data/lib/plog/packets.rb
ADDED
@@ -0,0 +1,42 @@
|
|
1
|
+
module Plog
|
2
|
+
module Packets
|
3
|
+
|
4
|
+
module MultipartMessage
|
5
|
+
def self.encode(message_id, length, checksum, chunk_size, count, index, payload, options = {})
|
6
|
+
message = [
|
7
|
+
PROTOCOL_VERSION,
|
8
|
+
TYPE_MULTIPART_MESSAGE,
|
9
|
+
count,
|
10
|
+
index,
|
11
|
+
chunk_size,
|
12
|
+
message_id,
|
13
|
+
length,
|
14
|
+
checksum]
|
15
|
+
|
16
|
+
# Plog encoding: https://github.com/airbnb/plog
|
17
|
+
template = 'CCS>S>S>L>l>L>S>x2'
|
18
|
+
|
19
|
+
# Generate pack template for tags
|
20
|
+
tags = options[:tags]
|
21
|
+
if tags.nil? || tags.empty?
|
22
|
+
message << 0
|
23
|
+
else
|
24
|
+
tag_len = 0
|
25
|
+
tags.each do |tag|
|
26
|
+
len = tag.length + 1 # extra byte for '\0'
|
27
|
+
template += "a#{len}"
|
28
|
+
tag_len += len
|
29
|
+
end
|
30
|
+
message << tag_len
|
31
|
+
message.concat(tags)
|
32
|
+
end
|
33
|
+
|
34
|
+
message << payload
|
35
|
+
template += 'a*'
|
36
|
+
|
37
|
+
return message.pack(template)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
42
|
+
end
|
data/lib/plog/version.rb
ADDED
data/plog-ruby.gemspec
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
$:.push File.expand_path("../lib", __FILE__)
|
3
|
+
|
4
|
+
require 'plog/version'
|
5
|
+
|
6
|
+
Gem::Specification.new do |s|
|
7
|
+
s.name = "plog-ruby"
|
8
|
+
s.version = Plog::VERSION
|
9
|
+
s.platform = Gem::Platform::RUBY
|
10
|
+
s.authors = ["Nelson Gauthier"]
|
11
|
+
s.email = ["nelson@airbnb.com"]
|
12
|
+
s.homepage = "https://github.com/airbnb/plog-ruby"
|
13
|
+
s.summary = "Ruby client for Plog."
|
14
|
+
s.description = "Send messages via UDP to the Plog Kafka forwarder."
|
15
|
+
|
16
|
+
s.files = `git ls-files`.split("\n")
|
17
|
+
s.test_files = `git ls-files -- spec/*`.split("\n")
|
18
|
+
s.require_path = 'lib'
|
19
|
+
s.executables = 'plogstats'
|
20
|
+
|
21
|
+
s.add_runtime_dependency 'murmurhash3', '~> 0.1'
|
22
|
+
end
|
@@ -0,0 +1,271 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Plog::Client do
|
4
|
+
|
5
|
+
let(:chunk_size) { 5 }
|
6
|
+
let(:client_options) { { :chunk_size => chunk_size } }
|
7
|
+
subject { Plog::Client.new(client_options) }
|
8
|
+
|
9
|
+
let(:udp_socket) do
|
10
|
+
double(UDPSocket).tap do |udp_socket|
|
11
|
+
udp_socket.stub(:send)
|
12
|
+
udp_socket.stub(:close)
|
13
|
+
udp_socket.stub(:recv)
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
before do
|
18
|
+
UDPSocket.stub(:new).and_return(udp_socket)
|
19
|
+
end
|
20
|
+
|
21
|
+
describe '#stats' do
|
22
|
+
let(:select_value) { [[udp_socket], [], []] }
|
23
|
+
let(:recv_value) { '{"foo":1}' }
|
24
|
+
|
25
|
+
before do
|
26
|
+
IO.stub(:select).and_return(select_value)
|
27
|
+
udp_socket.stub(:recv) { recv_value }
|
28
|
+
end
|
29
|
+
|
30
|
+
context 'timing out' do
|
31
|
+
let(:select_value) { nil }
|
32
|
+
it 'raises a TimeoutException' do
|
33
|
+
expect {subject.stats}.to raise_error Plog::TimeoutException
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
it 'sends a stats request' do
|
38
|
+
udp_socket.should_receive(:send) do |req, *extraneous|
|
39
|
+
expect(req.downcase).to start_with("\0\0stat")
|
40
|
+
end
|
41
|
+
|
42
|
+
subject.stats
|
43
|
+
end
|
44
|
+
|
45
|
+
it 'returns a statistics object from deserializing JSON' do
|
46
|
+
expect(subject.stats).to eq({'foo' => 1})
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
describe '#send' do
|
51
|
+
let(:message) { 'xxx' }
|
52
|
+
let(:checksum) { 200 }
|
53
|
+
|
54
|
+
before do
|
55
|
+
Plog::Checksum.stub(:compute).and_return(checksum)
|
56
|
+
end
|
57
|
+
|
58
|
+
it "constructs a UDP socket" do
|
59
|
+
UDPSocket.should_receive(:new).and_return(udp_socket)
|
60
|
+
subject.send(message)
|
61
|
+
end
|
62
|
+
|
63
|
+
context "when a send buffer size is specified" do
|
64
|
+
before do
|
65
|
+
client_options.merge!(:send_buffer_size => 1000)
|
66
|
+
end
|
67
|
+
|
68
|
+
it "sets the SO_SNDBUF socket option" do
|
69
|
+
udp_socket.should_receive(:setsockopt).with(
|
70
|
+
Socket::SOL_SOCKET,
|
71
|
+
Socket::SO_SNDBUF,
|
72
|
+
1000)
|
73
|
+
subject.send(message)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
it "contacts the given host and port" do
|
78
|
+
udp_socket.should_receive(:send).with(anything(), 0, subject.host, subject.port)
|
79
|
+
subject.send(message)
|
80
|
+
end
|
81
|
+
|
82
|
+
it "encodes the message id, message length and chunk size" do
|
83
|
+
first_id = subject.last_message_id
|
84
|
+
Plog::Packets::MultipartMessage.should_receive(:encode).with(
|
85
|
+
first_id + 1,
|
86
|
+
message.length,
|
87
|
+
checksum,
|
88
|
+
chunk_size,
|
89
|
+
anything(),
|
90
|
+
anything(),
|
91
|
+
message,
|
92
|
+
{}
|
93
|
+
).and_call_original
|
94
|
+
subject.send(message)
|
95
|
+
end
|
96
|
+
|
97
|
+
it "returns an monotonically increasing message id" do
|
98
|
+
first_id = subject.last_message_id
|
99
|
+
expect(subject.send(message)).to eq(first_id + 1)
|
100
|
+
expect(subject.send(message)).to eq(first_id + 2)
|
101
|
+
end
|
102
|
+
|
103
|
+
it "reuses the same socket" do
|
104
|
+
UDPSocket.should_receive(:new).once.and_return(udp_socket)
|
105
|
+
2.times { subject.send(message) }
|
106
|
+
end
|
107
|
+
|
108
|
+
describe 'large messages' do
|
109
|
+
let(:large_message_threshold) { nil }
|
110
|
+
let(:callback) { lambda { |c, m| } }
|
111
|
+
|
112
|
+
before do
|
113
|
+
client_options.merge!({
|
114
|
+
:large_message_threshold => large_message_threshold,
|
115
|
+
:on_large_message => callback
|
116
|
+
})
|
117
|
+
end
|
118
|
+
|
119
|
+
context "when the large message threshold is nil" do
|
120
|
+
let(:large_message_threshold) { nil }
|
121
|
+
|
122
|
+
it "does not invoke the callback" do
|
123
|
+
callback.should_not_receive(:call)
|
124
|
+
subject.send(message)
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
context "when the large message threshold is given" do
|
129
|
+
let(:large_message_threshold) { 2 }
|
130
|
+
let(:message) { 'xxx' }
|
131
|
+
|
132
|
+
it "invokes the callback with the client and message" do
|
133
|
+
callback.should_receive(:call).with(subject, message)
|
134
|
+
subject.send(message)
|
135
|
+
end
|
136
|
+
|
137
|
+
context "when the callback is nil" do
|
138
|
+
let(:callback) { nil }
|
139
|
+
|
140
|
+
it "doesn't raise" do
|
141
|
+
subject.send(message)
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
describe 'message id' do
|
148
|
+
before do
|
149
|
+
@message_ids = []
|
150
|
+
Plog::Packets::MultipartMessage.stub(:encode) do |message_id, _, _, _, _, _, _|
|
151
|
+
@message_ids << message_id
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
it "encodes each message with a monotonically increasing message id" do
|
156
|
+
first_id = subject.last_message_id
|
157
|
+
expected_sequence = (first_id + 1...first_id + 6).to_a
|
158
|
+
5.times { subject.send(message) }
|
159
|
+
expect(@message_ids).to eq(expected_sequence)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
describe 'chunking' do
|
164
|
+
let(:chunk_size) { 5 }
|
165
|
+
let(:message) { 'AAAA' }
|
166
|
+
let(:expected_chunks) { ['AAAA'] }
|
167
|
+
|
168
|
+
before do
|
169
|
+
@sent_datagrams = []
|
170
|
+
Plog::Packets::MultipartMessage.stub(:encode) do |_, _, _, _, count, index, data|
|
171
|
+
[count, index, data]
|
172
|
+
end
|
173
|
+
udp_socket.stub(:send) do |datagram, _, _, _|
|
174
|
+
@sent_datagrams << datagram
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
def validate_datagrams
|
179
|
+
# Reassemble the message as binary and verify the counts and indexes.
|
180
|
+
reassembled_message = "".force_encoding('BINARY')
|
181
|
+
@sent_datagrams.each_with_index do |(count, index, data), datagram_index|
|
182
|
+
expect(count.to_i).to eq(expected_chunks.count)
|
183
|
+
expect(index.to_i).to eq(datagram_index)
|
184
|
+
expect(data).to eq(expected_chunks[datagram_index].force_encoding('BINARY'))
|
185
|
+
reassembled_message += data
|
186
|
+
end
|
187
|
+
# Convert the message back to the original encoding and verify.
|
188
|
+
reassembled_message.force_encoding(message.encoding)
|
189
|
+
expect(reassembled_message).to eq(message)
|
190
|
+
end
|
191
|
+
|
192
|
+
context "when the message length is lower than the chunk size" do
|
193
|
+
let(:chunk_size) { 5 }
|
194
|
+
let(:message) { "A" * (chunk_size - 1) }
|
195
|
+
let(:expected_chunks) { [message] }
|
196
|
+
|
197
|
+
it "encodes the message and sends it as a single packet" do
|
198
|
+
subject.send(message)
|
199
|
+
validate_datagrams
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
context "when the message is large than the chunk size" do
|
204
|
+
let(:chunk_size) { 5 }
|
205
|
+
let(:message) { "A" * (chunk_size + 1) }
|
206
|
+
let(:expected_chunks) { ["A" * chunk_size, "A"] }
|
207
|
+
|
208
|
+
it "chunks the message and sends it as many packets" do
|
209
|
+
subject.send(message)
|
210
|
+
validate_datagrams
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
context "when the message contains multi-byte encoded characters" do
|
215
|
+
let(:chunk_size) { 5 }
|
216
|
+
let(:message) { "\u00E9ABCDEFGH" }
|
217
|
+
let(:expected_chunks) { [
|
218
|
+
"\u00E9ABC",
|
219
|
+
"DEFGH"
|
220
|
+
]}
|
221
|
+
|
222
|
+
it "correctly chunks the message" do
|
223
|
+
subject.send(message)
|
224
|
+
validate_datagrams
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
describe 'exceptions' do
|
230
|
+
|
231
|
+
context "when the socket operation raises" do
|
232
|
+
it "closes and re-opens the socket" do
|
233
|
+
udp_socket.stub(:send).and_raise
|
234
|
+
udp_socket.should_receive(:close).once
|
235
|
+
expect { subject.send(message) }.to raise_error
|
236
|
+
|
237
|
+
udp_socket.stub(:send) {}
|
238
|
+
UDPSocket.should_receive(:new).once.and_return(udp_socket)
|
239
|
+
subject.send(message)
|
240
|
+
end
|
241
|
+
end
|
242
|
+
|
243
|
+
end
|
244
|
+
|
245
|
+
end
|
246
|
+
|
247
|
+
describe '#reset' do
|
248
|
+
let(:message) { 'xxx' }
|
249
|
+
|
250
|
+
it "chooses a new random message id" do
|
251
|
+
Random.stub(:rand).and_return(2)
|
252
|
+
subject.send(message)
|
253
|
+
expect(subject.last_message_id).to eq(3)
|
254
|
+
|
255
|
+
Random.stub(:rand).and_return(5)
|
256
|
+
subject.reset
|
257
|
+
expect(subject.last_message_id).to eq(5)
|
258
|
+
end
|
259
|
+
|
260
|
+
context "with an initialized socket" do
|
261
|
+
before do
|
262
|
+
subject.send(message)
|
263
|
+
subject.socket.should_receive(:close)
|
264
|
+
subject.reset
|
265
|
+
end
|
266
|
+
|
267
|
+
end
|
268
|
+
|
269
|
+
end
|
270
|
+
|
271
|
+
end
|
@@ -0,0 +1,100 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Plog::Packets::MultipartMessage do
|
4
|
+
|
5
|
+
describe '.encode' do
|
6
|
+
|
7
|
+
# Each of these values were chosen to fit in a single byte.
|
8
|
+
let(:message_id) { 1 }
|
9
|
+
let(:length) { 2 }
|
10
|
+
let(:checksum) { 3 }
|
11
|
+
let(:chunk_size) { 4 }
|
12
|
+
let(:count) { 5 }
|
13
|
+
let(:index) { 6 }
|
14
|
+
let(:payload) { 'xxx' }
|
15
|
+
|
16
|
+
let(:encoded) do
|
17
|
+
Plog::Packets::MultipartMessage.encode(
|
18
|
+
message_id,
|
19
|
+
length,
|
20
|
+
checksum,
|
21
|
+
chunk_size,
|
22
|
+
count,
|
23
|
+
index,
|
24
|
+
payload
|
25
|
+
)
|
26
|
+
end
|
27
|
+
|
28
|
+
def encoded_range(first, last)
|
29
|
+
encoded[first..last].bytes.to_a
|
30
|
+
end
|
31
|
+
|
32
|
+
it "encodes a string with length 24 + payload length" do
|
33
|
+
expect(encoded.length).to eq(24 + payload.length)
|
34
|
+
end
|
35
|
+
|
36
|
+
it "encodes the protocol version as the first byte" do
|
37
|
+
expect(encoded_range(0, 0)).to eq([Plog::Client::PROTOCOL_VERSION])
|
38
|
+
end
|
39
|
+
|
40
|
+
it "encodes the command as the second byte" do
|
41
|
+
expect(encoded_range(1, 1)).to eq([Plog::Packets::TYPE_MULTIPART_MESSAGE])
|
42
|
+
end
|
43
|
+
|
44
|
+
it "encodes the multipart packet count big endian as bytes 02-03" do
|
45
|
+
expect(encoded_range(2, 3)).to eq([0, count])
|
46
|
+
end
|
47
|
+
|
48
|
+
it "encodes the multipart packet index big endian as bytes 04-05" do
|
49
|
+
expect(encoded_range(4, 5)).to eq([0, index])
|
50
|
+
end
|
51
|
+
|
52
|
+
it "encodes the chunk size big endian as bytes 06-07" do
|
53
|
+
expect(encoded_range(6, 7)).to eq([0, chunk_size])
|
54
|
+
end
|
55
|
+
|
56
|
+
it "encodes the message id big endian as bytes 08-11" do
|
57
|
+
expect(encoded_range(8, 11)).to eq([0, 0, 0, message_id])
|
58
|
+
end
|
59
|
+
|
60
|
+
it "encodes the total message length as bytes 12-15" do
|
61
|
+
expect(encoded_range(12, 15)).to eq([0, 0, 0, length])
|
62
|
+
end
|
63
|
+
|
64
|
+
it "encodes the message checksum as bytes 16-19" do
|
65
|
+
expect(encoded_range(16, 19)).to eq([0, 0, 0, checksum])
|
66
|
+
end
|
67
|
+
|
68
|
+
it "encodes zero padding for the reserved segment as bytes 20-23" do
|
69
|
+
expect(encoded_range(20, 23)).to eq([0, 0, 0, 0])
|
70
|
+
end
|
71
|
+
|
72
|
+
let(:tags) { ['tag1', 'tag2'] }
|
73
|
+
let(:encoded_with_tags) do
|
74
|
+
Plog::Packets::MultipartMessage.encode(
|
75
|
+
message_id,
|
76
|
+
length,
|
77
|
+
checksum,
|
78
|
+
chunk_size,
|
79
|
+
count,
|
80
|
+
index,
|
81
|
+
payload,
|
82
|
+
tags: tags
|
83
|
+
)
|
84
|
+
end
|
85
|
+
|
86
|
+
def encoded_with_tags_range(first, last)
|
87
|
+
encoded_with_tags[first..last].bytes.to_a
|
88
|
+
end
|
89
|
+
|
90
|
+
it "encodes the tags length as bytes 20-21" do
|
91
|
+
expect(encoded_with_tags_range(20, 21)).to eq([0, 10])
|
92
|
+
end
|
93
|
+
|
94
|
+
it "encodes a tagged string with length 24 + tag length + payload length" do
|
95
|
+
expect(encoded_with_tags.length).to eq(24 + 10 + payload.length)
|
96
|
+
end
|
97
|
+
|
98
|
+
end
|
99
|
+
|
100
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Plog do
|
4
|
+
|
5
|
+
describe '.new' do
|
6
|
+
let(:options) { { :foo => :bar } }
|
7
|
+
|
8
|
+
it "returns a plog client" do
|
9
|
+
expect(Plog.new).to be_an_instance_of(Plog::Client)
|
10
|
+
end
|
11
|
+
|
12
|
+
it "passes options to the client initializer" do
|
13
|
+
Plog::Client.should_receive(:new).with(options).and_call_original
|
14
|
+
Plog.new(options)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
data/spec/spec_helper.rb
ADDED
metadata
ADDED
@@ -0,0 +1,78 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: plog-ruby
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.14
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Nelson Gauthier
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2015-12-11 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: murmurhash3
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ~>
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '0.1'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - ~>
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0.1'
|
27
|
+
description: Send messages via UDP to the Plog Kafka forwarder.
|
28
|
+
email:
|
29
|
+
- nelson@airbnb.com
|
30
|
+
executables:
|
31
|
+
- plogstats
|
32
|
+
extensions: []
|
33
|
+
extra_rdoc_files: []
|
34
|
+
files:
|
35
|
+
- .gitignore
|
36
|
+
- Gemfile
|
37
|
+
- README.md
|
38
|
+
- bin/plogstats
|
39
|
+
- lib/plog-ruby.rb
|
40
|
+
- lib/plog.rb
|
41
|
+
- lib/plog/checksum.rb
|
42
|
+
- lib/plog/client.rb
|
43
|
+
- lib/plog/packets.rb
|
44
|
+
- lib/plog/packets/multipart_message.rb
|
45
|
+
- lib/plog/version.rb
|
46
|
+
- plog-ruby.gemspec
|
47
|
+
- spec/lib/plog/client_spec.rb
|
48
|
+
- spec/lib/plog/packets/multipart_message_spec.rb
|
49
|
+
- spec/lib/plog_spec.rb
|
50
|
+
- spec/spec_helper.rb
|
51
|
+
homepage: https://github.com/airbnb/plog-ruby
|
52
|
+
licenses: []
|
53
|
+
metadata: {}
|
54
|
+
post_install_message:
|
55
|
+
rdoc_options: []
|
56
|
+
require_paths:
|
57
|
+
- lib
|
58
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
59
|
+
requirements:
|
60
|
+
- - ! '>='
|
61
|
+
- !ruby/object:Gem::Version
|
62
|
+
version: '0'
|
63
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - ! '>='
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: '0'
|
68
|
+
requirements: []
|
69
|
+
rubyforge_project:
|
70
|
+
rubygems_version: 2.4.1
|
71
|
+
signing_key:
|
72
|
+
specification_version: 4
|
73
|
+
summary: Ruby client for Plog.
|
74
|
+
test_files:
|
75
|
+
- spec/lib/plog/client_spec.rb
|
76
|
+
- spec/lib/plog/packets/multipart_message_spec.rb
|
77
|
+
- spec/lib/plog_spec.rb
|
78
|
+
- spec/spec_helper.rb
|