codeclimate-poseidon 0.0.8
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +21 -0
- data/.rspec +2 -0
- data/.travis.yml +14 -0
- data/.yardopts +8 -0
- data/CHANGES.md +31 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +22 -0
- data/README.md +72 -0
- data/Rakefile +20 -0
- data/TODO.md +27 -0
- data/examples/consumer.rb +18 -0
- data/examples/producer.rb +9 -0
- data/lib/poseidon.rb +120 -0
- data/lib/poseidon/broker_pool.rb +86 -0
- data/lib/poseidon/cluster_metadata.rb +94 -0
- data/lib/poseidon/compressed_value.rb +23 -0
- data/lib/poseidon/compression.rb +30 -0
- data/lib/poseidon/compression/gzip_codec.rb +23 -0
- data/lib/poseidon/compression/snappy_codec.rb +29 -0
- data/lib/poseidon/connection.rb +169 -0
- data/lib/poseidon/fetched_message.rb +37 -0
- data/lib/poseidon/message.rb +151 -0
- data/lib/poseidon/message_conductor.rb +86 -0
- data/lib/poseidon/message_set.rb +80 -0
- data/lib/poseidon/message_to_send.rb +33 -0
- data/lib/poseidon/messages_for_broker.rb +56 -0
- data/lib/poseidon/messages_to_send.rb +47 -0
- data/lib/poseidon/messages_to_send_batch.rb +27 -0
- data/lib/poseidon/partition_consumer.rb +225 -0
- data/lib/poseidon/producer.rb +199 -0
- data/lib/poseidon/producer_compression_config.rb +37 -0
- data/lib/poseidon/protocol.rb +122 -0
- data/lib/poseidon/protocol/protocol_struct.rb +256 -0
- data/lib/poseidon/protocol/request_buffer.rb +77 -0
- data/lib/poseidon/protocol/response_buffer.rb +72 -0
- data/lib/poseidon/sync_producer.rb +161 -0
- data/lib/poseidon/topic_metadata.rb +89 -0
- data/lib/poseidon/version.rb +4 -0
- data/log/.gitkeep +0 -0
- data/poseidon.gemspec +27 -0
- data/spec/integration/multiple_brokers/consumer_spec.rb +45 -0
- data/spec/integration/multiple_brokers/metadata_failures_spec.rb +144 -0
- data/spec/integration/multiple_brokers/rebalance_spec.rb +69 -0
- data/spec/integration/multiple_brokers/round_robin_spec.rb +41 -0
- data/spec/integration/multiple_brokers/spec_helper.rb +60 -0
- data/spec/integration/simple/compression_spec.rb +23 -0
- data/spec/integration/simple/connection_spec.rb +35 -0
- data/spec/integration/simple/multiple_brokers_spec.rb +10 -0
- data/spec/integration/simple/simple_producer_and_consumer_spec.rb +121 -0
- data/spec/integration/simple/spec_helper.rb +16 -0
- data/spec/integration/simple/truncated_messages_spec.rb +46 -0
- data/spec/integration/simple/unavailable_broker_spec.rb +72 -0
- data/spec/spec_helper.rb +32 -0
- data/spec/test_cluster.rb +211 -0
- data/spec/unit/broker_pool_spec.rb +98 -0
- data/spec/unit/cluster_metadata_spec.rb +46 -0
- data/spec/unit/compression/gzip_codec_spec.rb +34 -0
- data/spec/unit/compression/snappy_codec_spec.rb +49 -0
- data/spec/unit/compression_spec.rb +17 -0
- data/spec/unit/connection_spec.rb +4 -0
- data/spec/unit/fetched_message_spec.rb +11 -0
- data/spec/unit/message_conductor_spec.rb +164 -0
- data/spec/unit/message_set_spec.rb +42 -0
- data/spec/unit/message_spec.rb +129 -0
- data/spec/unit/message_to_send_spec.rb +10 -0
- data/spec/unit/messages_for_broker_spec.rb +54 -0
- data/spec/unit/messages_to_send_batch_spec.rb +25 -0
- data/spec/unit/messages_to_send_spec.rb +63 -0
- data/spec/unit/partition_consumer_spec.rb +142 -0
- data/spec/unit/producer_compression_config_spec.rb +42 -0
- data/spec/unit/producer_spec.rb +51 -0
- data/spec/unit/protocol/request_buffer_spec.rb +16 -0
- data/spec/unit/protocol_spec.rb +54 -0
- data/spec/unit/sync_producer_spec.rb +156 -0
- data/spec/unit/topic_metadata_spec.rb +43 -0
- metadata +225 -0
@@ -0,0 +1,199 @@
|
|
1
|
+
module Poseidon
|
2
|
+
# Provides a high level interface for sending messages to a cluster
|
3
|
+
# of Kafka brokers.
|
4
|
+
#
|
5
|
+
# ## Producer Creation
|
6
|
+
#
|
7
|
+
# Producer requires a broker list and a client_id:
|
8
|
+
#
|
9
|
+
# producer = Producer.new(["broker1:port1", "broker2:port1"], "my_client_id",
|
10
|
+
# :type => :sync)
|
11
|
+
#
|
12
|
+
# The broker list is only used to bootstrap our knowledge of the cluster --
|
13
|
+
# it does not need to contain every broker. The client id should be unique
|
14
|
+
# across all clients in the cluster.
|
15
|
+
#
|
16
|
+
# ## Sending Messages
|
17
|
+
#
|
18
|
+
# Messages must have a topic before being sent:
|
19
|
+
#
|
20
|
+
# messages = []
|
21
|
+
# messages << Poseidon::MessageToSend.new("topic1", "Hello Word")
|
22
|
+
# messages << Poseidon::MessageToSend.new("user_updates_topic", user.update, user.id)
|
23
|
+
# producer.send_messages(messages)
|
24
|
+
#
|
25
|
+
# ## Producer Types
|
26
|
+
#
|
27
|
+
# There are two types of producers: sync and async. They can be specified
|
28
|
+
# via the :type option when creating a producer.
|
29
|
+
#
|
30
|
+
# ## Sync Producer
|
31
|
+
#
|
32
|
+
# The :sync producer blocks while sends messages to the cluster. The more
|
33
|
+
# messages you can send per #send_messages call the more efficient it will
|
34
|
+
# be.
|
35
|
+
#
|
36
|
+
# ## Compression
|
37
|
+
#
|
38
|
+
# When creating the producer you can specify a compression method:
|
39
|
+
#
|
40
|
+
# producer = Producer.new(["broker1:port1"], "my_client_id",
|
41
|
+
# :type => :sync, :compression_codec => :gzip)
|
42
|
+
#
|
43
|
+
# If you don't specify which topics to compress it will compress all topics.
|
44
|
+
# You can specify a set of topics to compress when creating the producer:
|
45
|
+
#
|
46
|
+
# producer = Producer.new(["broker1:port1"], "my_client_id",
|
47
|
+
# :type => :sync, :compression_codec => :gzip,
|
48
|
+
# :compressed_topics => ["compressed_topic_1"])
|
49
|
+
#
|
50
|
+
# ## Partitioning
|
51
|
+
#
|
52
|
+
# For keyless messages the producer will round-robin messages to all
|
53
|
+
# _available_ partitions for at topic. This means that if we are unable to
|
54
|
+
# send messages to a specific broker we'll retry sending those to a different
|
55
|
+
# broker.
|
56
|
+
#
|
57
|
+
# However, if you specify a key when creating the message, the producer
|
58
|
+
# will choose a partition based on the key and only send to that partition.
|
59
|
+
#
|
60
|
+
# ## Custom Partitioning
|
61
|
+
#
|
62
|
+
# You may also specify a custom partitioning scheme for messages by passing
|
63
|
+
# a Proc (or any object that responds to #call) to the Producer. The proc
|
64
|
+
# must return a Fixnum >= 0 and less-than partition_count.
|
65
|
+
#
|
66
|
+
# my_partitioner = Proc.new { |key, partition_count| Zlib::crc32(key) % partition_count }
|
67
|
+
#
|
68
|
+
# producer = Producer.new(["broker1:port1", "broker2:port1"], "my_client_id",
|
69
|
+
# :type => :sync, :partitioner => my_partitioner)
|
70
|
+
#
|
71
|
+
# @api public
|
72
|
+
class Producer
|
73
|
+
# @api private
|
74
|
+
VALID_OPTIONS = [
|
75
|
+
:ack_timeout_ms,
|
76
|
+
:compressed_topics,
|
77
|
+
:compression_codec,
|
78
|
+
:max_send_retries,
|
79
|
+
:metadata_refresh_interval_ms,
|
80
|
+
:partitioner,
|
81
|
+
:retry_backoff_ms,
|
82
|
+
:required_acks,
|
83
|
+
:socket_timeout_ms,
|
84
|
+
:type,
|
85
|
+
]
|
86
|
+
|
87
|
+
# @api private
|
88
|
+
OPTION_DEFAULTS = {
|
89
|
+
:type => :sync
|
90
|
+
}
|
91
|
+
|
92
|
+
# Returns a new Producer.
|
93
|
+
#
|
94
|
+
# @param [Array<String>] brokers An array of brokers in the form "host1:port1"
|
95
|
+
#
|
96
|
+
# @param [String] client_id A client_id used to indentify the producer.
|
97
|
+
#
|
98
|
+
# @param [Hash] options
|
99
|
+
#
|
100
|
+
# @option options [:sync / :async] :type (:sync)
|
101
|
+
# Whether we should send messages right away or queue them and send
|
102
|
+
# them in the background.
|
103
|
+
#
|
104
|
+
# @option options [:gzip / :snappy / :none] :compression_codec (:none)
|
105
|
+
# Type of compression to use.
|
106
|
+
#
|
107
|
+
# @option options [Enumberable<String>] :compressed_topics (nil)
|
108
|
+
# Topics to compress. If this is not specified we will compress all
|
109
|
+
# topics provided that +:compression_codec+ is set.
|
110
|
+
#
|
111
|
+
# @option options [Integer: Milliseconds] :metadata_refresh_interval_ms (600_000)
|
112
|
+
# How frequently we should update the topic metadata in milliseconds.
|
113
|
+
#
|
114
|
+
# @option options [#call, nil] :partitioner
|
115
|
+
# Object which partitions messages based on key.
|
116
|
+
# Responds to #call(key, partition_count).
|
117
|
+
#
|
118
|
+
# @option options [Integer] :max_send_retries (3)
|
119
|
+
# Number of times to retry sending of messages to a leader.
|
120
|
+
#
|
121
|
+
# @option options [Integer] :retry_backoff_ms (100)
|
122
|
+
# The amount of time (in milliseconds) to wait before refreshing the metadata
|
123
|
+
# after we are unable to send messages.
|
124
|
+
# Number of times to retry sending of messages to a leader.
|
125
|
+
#
|
126
|
+
# @option options [Integer] :required_acks (0)
|
127
|
+
# The number of acks required per request.
|
128
|
+
#
|
129
|
+
# @option options [Integer] :ack_timeout_ms (1500)
|
130
|
+
# How long the producer waits for acks.
|
131
|
+
#
|
132
|
+
# @option options [Integer] :socket_timeout_ms] (10000)
|
133
|
+
# How long the producer socket waits for any reply from server.
|
134
|
+
#
|
135
|
+
# @api public
|
136
|
+
def initialize(brokers, client_id, options = {})
|
137
|
+
options = options.dup
|
138
|
+
validate_options(options)
|
139
|
+
|
140
|
+
if !brokers.respond_to?(:each)
|
141
|
+
raise ArgumentError, "brokers must respond to #each"
|
142
|
+
end
|
143
|
+
@brokers = brokers
|
144
|
+
@client_id = client_id
|
145
|
+
@producer = build_producer(options)
|
146
|
+
@shutdown = false
|
147
|
+
end
|
148
|
+
|
149
|
+
# Send messages to the cluster. Raises an exception if the producer fails to send the messages.
|
150
|
+
#
|
151
|
+
# @param [Enumerable<MessageToSend>] messages
|
152
|
+
# Messages must have a +topic+ set and may have a +key+ set.
|
153
|
+
#
|
154
|
+
# @return [Boolean]
|
155
|
+
#
|
156
|
+
# @api public
|
157
|
+
def send_messages(messages)
|
158
|
+
raise Errors::ProducerShutdownError if @shutdown
|
159
|
+
if !messages.respond_to?(:each)
|
160
|
+
raise ArgumentError, "messages must respond to #each"
|
161
|
+
end
|
162
|
+
|
163
|
+
@producer.send_messages(convert_to_messages_objects(messages))
|
164
|
+
end
|
165
|
+
|
166
|
+
# Closes all open connections to brokers
|
167
|
+
def close
|
168
|
+
@shutdown = true
|
169
|
+
@producer.close
|
170
|
+
end
|
171
|
+
|
172
|
+
alias_method :shutdown, :close
|
173
|
+
|
174
|
+
private
|
175
|
+
def validate_options(options)
|
176
|
+
unknown_keys = options.keys - VALID_OPTIONS
|
177
|
+
if unknown_keys.any?
|
178
|
+
raise ArgumentError, "Unknown options: #{unknown_keys.inspect}"
|
179
|
+
end
|
180
|
+
|
181
|
+
@type = options.delete(:type) || :sync
|
182
|
+
end
|
183
|
+
|
184
|
+
def convert_to_messages_objects(messages)
|
185
|
+
messages.map do |m|
|
186
|
+
Message.new(:value => m.value, :topic => m.topic, :key => m.key)
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def build_producer(options)
|
191
|
+
case @type
|
192
|
+
when :sync
|
193
|
+
SyncProducer.new(@client_id, @brokers, options)
|
194
|
+
when :async
|
195
|
+
raise "Not implemented yet"
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
module Poseidon
|
2
|
+
# @api private
|
3
|
+
class ProducerCompressionConfig
|
4
|
+
COMPRESSION_CODEC_MAP = {
|
5
|
+
:gzip => Compression::GzipCodec,
|
6
|
+
:snappy => Compression::SnappyCodec,
|
7
|
+
:none => nil
|
8
|
+
}
|
9
|
+
|
10
|
+
def initialize(compression_codec, compressed_topics)
|
11
|
+
if compression_codec
|
12
|
+
unless COMPRESSION_CODEC_MAP.has_key?(compression_codec)
|
13
|
+
raise ArgumentError, "Unknown compression codec: '#{compression_codec}' (accepted: #{COMPRESSION_CODEC_MAP.keys.inspect})"
|
14
|
+
end
|
15
|
+
@compression_codec = COMPRESSION_CODEC_MAP[compression_codec]
|
16
|
+
else
|
17
|
+
@compression_codec = nil
|
18
|
+
end
|
19
|
+
|
20
|
+
if compressed_topics
|
21
|
+
@compressed_topics = Set.new(compressed_topics)
|
22
|
+
else
|
23
|
+
@compressed_topics = nil
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
def compression_codec_for_topic(topic)
|
28
|
+
return false if @compression_codec.nil?
|
29
|
+
|
30
|
+
if @compressed_topics.nil? || (@compressed_topics && @compressed_topics.include?(topic))
|
31
|
+
@compression_codec
|
32
|
+
else
|
33
|
+
false
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,122 @@
|
|
1
|
+
module Poseidon
|
2
|
+
# @api private
|
3
|
+
module Protocol
|
4
|
+
require "poseidon/protocol/protocol_struct"
|
5
|
+
require "poseidon/protocol/request_buffer"
|
6
|
+
require "poseidon/protocol/response_buffer"
|
7
|
+
|
8
|
+
API_KEYS = {
|
9
|
+
:produce => 0,
|
10
|
+
:fetch => 1,
|
11
|
+
:offset => 2,
|
12
|
+
:metadata => 3
|
13
|
+
}
|
14
|
+
|
15
|
+
# Request/Response Common Structures
|
16
|
+
RequestCommon = ProtocolStruct.new(:api_key => :int16,
|
17
|
+
:api_version => :int16,
|
18
|
+
:correlation_id => :int32,
|
19
|
+
:client_id => :string)
|
20
|
+
ResponseCommon = ProtocolStruct.new(:correlation_id => :int32)
|
21
|
+
|
22
|
+
# MessageSet Common Structure
|
23
|
+
MessageStruct = ProtocolStruct.new(:magic_type => :int8,
|
24
|
+
:attributes => :int8,
|
25
|
+
:key => :bytes,
|
26
|
+
:value => :bytes).prepend_size.prepend_crc32.truncatable
|
27
|
+
MessageWithOffsetStruct = ProtocolStruct.new(:offset => :int64,
|
28
|
+
:message => MessageStruct)
|
29
|
+
|
30
|
+
# When part of produce requests of fetch responses a MessageSet
|
31
|
+
# has a prepended size. When a MessageSet is compressed and
|
32
|
+
# nested in a Message size is not prepended.
|
33
|
+
MessageSetStruct = ProtocolStruct.new(:messages => [Message]).
|
34
|
+
size_bound_array(:messages)
|
35
|
+
MessageSetStructWithSize = MessageSetStruct.dup.prepend_size
|
36
|
+
|
37
|
+
# Produce Request
|
38
|
+
MessagesForPartition = ProtocolStruct.new(:partition => :int32,
|
39
|
+
:message_set => MessageSet)
|
40
|
+
MessagesForTopic = ProtocolStruct.new(:topic => :string,
|
41
|
+
:messages_for_partitions =>
|
42
|
+
[MessagesForPartition])
|
43
|
+
ProduceRequest = ProtocolStruct.new(:common => RequestCommon,
|
44
|
+
:required_acks => :int16,
|
45
|
+
:timeout => :int32,
|
46
|
+
:messages_for_topics => [MessagesForTopic])
|
47
|
+
|
48
|
+
# Produce Response
|
49
|
+
ProducePartitionResponse = ProtocolStruct.new(:partition => :int32,
|
50
|
+
:error => :int16,
|
51
|
+
:offset => :int64)
|
52
|
+
ProduceTopicResponse = ProtocolStruct.new(:topic => :string,
|
53
|
+
:partitions => [ProducePartitionResponse])
|
54
|
+
ProduceResponse = ProtocolStruct.new(:common => ResponseCommon,
|
55
|
+
:topic_response => [ProduceTopicResponse])
|
56
|
+
|
57
|
+
# Fetch Request
|
58
|
+
PartitionFetch = ProtocolStruct.new(:partition => :int32,
|
59
|
+
:fetch_offset => :int64,
|
60
|
+
:max_bytes => :int32)
|
61
|
+
TopicFetch = ProtocolStruct.new(:topic => :string,
|
62
|
+
:partition_fetches => [PartitionFetch])
|
63
|
+
FetchRequest = ProtocolStruct.new(:common => RequestCommon,
|
64
|
+
:replica_id => :int32,
|
65
|
+
:max_wait_time => :int32,
|
66
|
+
:min_bytes => :int32,
|
67
|
+
:topic_fetches => [TopicFetch])
|
68
|
+
|
69
|
+
# Fetch Response
|
70
|
+
PartitionFetchResponse = ProtocolStruct.new(:partition => :int32,
|
71
|
+
:error => :int16,
|
72
|
+
:highwater_mark_offset => :int64,
|
73
|
+
:message_set => MessageSet)
|
74
|
+
TopicFetchResponse = ProtocolStruct.new(:topic => :string,
|
75
|
+
:partition_fetch_responses => [PartitionFetchResponse])
|
76
|
+
FetchResponse = ProtocolStruct.new(
|
77
|
+
:common => ResponseCommon,
|
78
|
+
:topic_fetch_responses => [TopicFetchResponse])
|
79
|
+
|
80
|
+
# Offset Request
|
81
|
+
PartitionOffsetRequest = ProtocolStruct.new(:partition => :int32,
|
82
|
+
:time => :int64,
|
83
|
+
:max_number_of_offsets => :int32)
|
84
|
+
TopicOffsetRequest = ProtocolStruct.new(
|
85
|
+
:topic => :string,
|
86
|
+
:partition_offset_requests => [PartitionOffsetRequest])
|
87
|
+
OffsetRequest = ProtocolStruct.new(:common => RequestCommon,
|
88
|
+
:replica_id => :int32,
|
89
|
+
:topic_offset_requests => [TopicOffsetRequest])
|
90
|
+
|
91
|
+
# Offset Response
|
92
|
+
Offset = ProtocolStruct.new(:offset => :int64)
|
93
|
+
PartitionOffset = ProtocolStruct.new(:partition => :int32,
|
94
|
+
:error => :int16,
|
95
|
+
:offsets => [Offset])
|
96
|
+
TopicOffsetResponse = ProtocolStruct.new(:topic => :string,
|
97
|
+
:partition_offsets => [PartitionOffset])
|
98
|
+
OffsetResponse = ProtocolStruct.new(
|
99
|
+
:common => ResponseCommon,
|
100
|
+
:topic_offset_responses => [TopicOffsetResponse])
|
101
|
+
|
102
|
+
# Metadata Request
|
103
|
+
MetadataRequest = ProtocolStruct.new( :common => RequestCommon,
|
104
|
+
:topic_names => [:string])
|
105
|
+
|
106
|
+
# Metadata Response
|
107
|
+
Broker = ProtocolStruct.new(:id => :int32,
|
108
|
+
:host => :string,
|
109
|
+
:port => :int32)
|
110
|
+
PartitionMetadata = ProtocolStruct.new(:error => :int16,
|
111
|
+
:id => :int32,
|
112
|
+
:leader => :int32,
|
113
|
+
:replicas => [:int32],
|
114
|
+
:isr => [:int32])
|
115
|
+
TopicMetadataStruct = ProtocolStruct.new(:error => :int16,
|
116
|
+
:name => :string,
|
117
|
+
:partitions => [PartitionMetadata])
|
118
|
+
MetadataResponse = ProtocolStruct.new(:common => ResponseCommon,
|
119
|
+
:brokers => [Broker],
|
120
|
+
:topics => [TopicMetadata])
|
121
|
+
end
|
122
|
+
end
|
@@ -0,0 +1,256 @@
|
|
1
|
+
module Poseidon
|
2
|
+
module Protocol
|
3
|
+
class ProtocolStruct < Struct
|
4
|
+
class EncodingError < StandardError;end
|
5
|
+
class DecodingError < StandardError;end
|
6
|
+
|
7
|
+
def self.new(hash)
|
8
|
+
klass = super(*hash.keys)
|
9
|
+
klass.type_map = hash
|
10
|
+
klass
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.type_map=(type_map)
|
14
|
+
@type_map = type_map
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.type_map
|
18
|
+
@type_map
|
19
|
+
end
|
20
|
+
|
21
|
+
def self.prepend_size
|
22
|
+
@prepend_size = true
|
23
|
+
self
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.prepend_crc32
|
27
|
+
@prepend_crc32 = true
|
28
|
+
self
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.truncatable
|
32
|
+
@truncatable = true
|
33
|
+
self
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.prepend_size?
|
37
|
+
@prepend_size
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.prepend_crc32?
|
41
|
+
@prepend_crc32
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.truncatable?
|
45
|
+
@truncatable
|
46
|
+
end
|
47
|
+
|
48
|
+
def self.size_bound_array(member)
|
49
|
+
@size_bound_members ||= []
|
50
|
+
@size_bound_members << member
|
51
|
+
self
|
52
|
+
end
|
53
|
+
|
54
|
+
def self.size_bound_array?(member)
|
55
|
+
@size_bound_members ||= []
|
56
|
+
@size_bound_members.include?(member)
|
57
|
+
end
|
58
|
+
|
59
|
+
# Recursively find all objects with errors
|
60
|
+
def objects_with_errors
|
61
|
+
children = []
|
62
|
+
each_pair do |member, value|
|
63
|
+
case value
|
64
|
+
when Array
|
65
|
+
value.each do |v|
|
66
|
+
if v.respond_to?(:objects_with_errors)
|
67
|
+
children << v
|
68
|
+
end
|
69
|
+
end
|
70
|
+
else
|
71
|
+
if value.respond_to?(:objects_with_errors)
|
72
|
+
children << value
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
children_with_errors = children.map(&:objects_with_errors).flatten
|
78
|
+
if members.include?(:error) && self[:error] != Errors::NO_ERROR_CODE
|
79
|
+
children_with_errors + [self]
|
80
|
+
else
|
81
|
+
children_with_errors
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def raise_error
|
86
|
+
raise error_class if error_class
|
87
|
+
end
|
88
|
+
|
89
|
+
def error_class
|
90
|
+
Errors::ERROR_CODES[self[:error]]
|
91
|
+
end
|
92
|
+
|
93
|
+
def raise_error_if_one_exists
|
94
|
+
objects_with_errors.each do |object|
|
95
|
+
object.raise_error
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
def write(buffer)
|
100
|
+
maybe_prepend_size(buffer) do
|
101
|
+
maybe_prepend_crc32(buffer) do
|
102
|
+
each_pair do |member, value|
|
103
|
+
begin
|
104
|
+
write_member(buffer, member, value)
|
105
|
+
rescue
|
106
|
+
raise EncodingError, "Error writting #{member} in #{self.class} (#{$!.class}: #{$!.message})"
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
def maybe_prepend_size(buffer)
|
114
|
+
if self.class.prepend_size?
|
115
|
+
buffer.prepend_size do
|
116
|
+
yield
|
117
|
+
end
|
118
|
+
else
|
119
|
+
yield
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def maybe_prepend_crc32(buffer)
|
124
|
+
if self.class.prepend_crc32?
|
125
|
+
buffer.prepend_crc32 do
|
126
|
+
yield
|
127
|
+
end
|
128
|
+
else
|
129
|
+
yield
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def write_member(buffer, member, value)
|
134
|
+
case type = type_map[member]
|
135
|
+
when Array
|
136
|
+
buffer.int32(value.size) unless self.class.size_bound_array?(member)
|
137
|
+
value.each { |v| write_type(buffer, type.first, v) }
|
138
|
+
else
|
139
|
+
write_type(buffer, type, value)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
def write_type(buffer, type, value)
|
144
|
+
case type
|
145
|
+
when Symbol
|
146
|
+
buffer.send(type, value)
|
147
|
+
else
|
148
|
+
value.write(buffer)
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
# Populate struct from buffer based on members and their type definition.
|
153
|
+
def self.read(buffer)
|
154
|
+
s = new
|
155
|
+
s.read(buffer)
|
156
|
+
s
|
157
|
+
end
|
158
|
+
|
159
|
+
def read(buffer)
|
160
|
+
if self.class.prepend_size?
|
161
|
+
if !have_header?(buffer)
|
162
|
+
@truncated = true
|
163
|
+
return
|
164
|
+
end
|
165
|
+
|
166
|
+
@size = buffer.int32
|
167
|
+
|
168
|
+
if self.class.prepend_crc32?
|
169
|
+
@crc32 = buffer.int32
|
170
|
+
@computed_crc32 = [Zlib::crc32(buffer.peek(@size-4))].pack("l>").unpack("l>").first
|
171
|
+
if @crc32 != @computed_crc32
|
172
|
+
@checksum_failed = true
|
173
|
+
end
|
174
|
+
expected_bytes_remaining = @size - 4
|
175
|
+
else
|
176
|
+
expected_bytes_remaining = @size
|
177
|
+
end
|
178
|
+
|
179
|
+
if self.class.truncatable? && expected_bytes_remaining > buffer.bytes_remaining
|
180
|
+
@truncated = true
|
181
|
+
return
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
members.each do |member|
|
186
|
+
begin
|
187
|
+
self[member] = read_member(buffer, member)
|
188
|
+
rescue DecodingError
|
189
|
+
# Just reraise instead of producing a crazy nested exception
|
190
|
+
raise
|
191
|
+
rescue
|
192
|
+
raise DecodingError, "Error while reading #{member} in #{self.class} (#{$!.class}: #{$!.message}))"
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
def have_header?(buffer)
|
198
|
+
if self.class.truncatable?
|
199
|
+
if self.class.prepend_crc32?
|
200
|
+
header_bytes = 8
|
201
|
+
else
|
202
|
+
header_bytes = 4
|
203
|
+
end
|
204
|
+
|
205
|
+
return buffer.bytes_remaining >= header_bytes
|
206
|
+
else
|
207
|
+
return true
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
def read_member(buffer, member)
|
212
|
+
case type = type_map[member]
|
213
|
+
when Array
|
214
|
+
if self.class.size_bound_array?(member)
|
215
|
+
if @size
|
216
|
+
array_buffer = ResponseBuffer.new(buffer.read(@size))
|
217
|
+
else
|
218
|
+
array_buffer = buffer
|
219
|
+
end
|
220
|
+
|
221
|
+
array = []
|
222
|
+
while !array_buffer.eof? && (v = read_type(array_buffer, type.first))
|
223
|
+
array << v
|
224
|
+
end
|
225
|
+
array
|
226
|
+
else
|
227
|
+
buffer.int32.times.map { read_type(buffer, type.first) }
|
228
|
+
end
|
229
|
+
else
|
230
|
+
read_type(buffer, type)
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
def read_type(buffer, type)
|
235
|
+
case type
|
236
|
+
when Symbol
|
237
|
+
buffer.send(type)
|
238
|
+
else
|
239
|
+
type.read(buffer)
|
240
|
+
end
|
241
|
+
end
|
242
|
+
|
243
|
+
def type_map
|
244
|
+
self.class.type_map
|
245
|
+
end
|
246
|
+
|
247
|
+
def checksum_failed?
|
248
|
+
@checksum_failed
|
249
|
+
end
|
250
|
+
|
251
|
+
def truncated?
|
252
|
+
@truncated
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
end
|