ruby-kafka 0.7.5 → 0.7.6.beta1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: af054c6ab2097ed1069b4875fab57e350a562467551a33f8ff46e6aeb2319dc3
4
- data.tar.gz: cb72f7416f0a587bf506465c8c52144d82ae0b98c698250b87f0c178b2768cfd
3
+ metadata.gz: e372f99c56ceb1e59bf7bc9d6cdc6d002637dc41c40b7c3f1d782aeea1e29c40
4
+ data.tar.gz: 70ec79b24ffb7105de1d01ebc2ee43da6d13778b2813feacbc30707da072d1c0
5
5
  SHA512:
6
- metadata.gz: 06ad099880c38bed87155117469f55cd2682d9b1e0fae9abc5b44aafa74acf55984db73a03861bb94e08585744139928d6859fb2a9ea93f21dcab6aa1f0ae9cb
7
- data.tar.gz: '087d565a6ae886f053323a0c1f2222eaf63f044664544c614d30e0914f145d83a0a58369f9ee343828224b13fa76f54def52fe8cfd94e9773d4089bea62ad8e8'
6
+ metadata.gz: c072eb8640de239ca77e3397933ed9d6575f42621aacff709d966b4213b0dbef8b6dd198c3d12298167cd07349c8d4438e3127677803f2630c2c405c3e9771eb
7
+ data.tar.gz: 6b265b87a57d488d36c8b91c223ebb781a6b2c602fae101023b412736bbf9a7ccda1456d66eab107c22409fb1a3e6f7a0552dd274f595ef243b227206350a445
data/.circleci/config.yml CHANGED
@@ -113,21 +113,53 @@ jobs:
113
113
  environment:
114
114
  LOG_LEVEL: DEBUG
115
115
  - image: wurstmeister/zookeeper
116
- - image: wurstmeister/kafka:2.11-2.0.0
116
+ - image: wurstmeister/kafka:2.11-2.0.1
117
117
  environment:
118
118
  KAFKA_ADVERTISED_HOST_NAME: localhost
119
119
  KAFKA_ADVERTISED_PORT: 9092
120
120
  KAFKA_PORT: 9092
121
121
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
122
122
  KAFKA_DELETE_TOPIC_ENABLE: true
123
- - image: wurstmeister/kafka:2.11-2.0.0
123
+ - image: wurstmeister/kafka:2.11-2.0.1
124
124
  environment:
125
125
  KAFKA_ADVERTISED_HOST_NAME: localhost
126
126
  KAFKA_ADVERTISED_PORT: 9093
127
127
  KAFKA_PORT: 9093
128
128
  KAFKA_ZOOKEEPER_CONNECT: localhost:2181
129
129
  KAFKA_DELETE_TOPIC_ENABLE: true
130
- - image: wurstmeister/kafka:2.11-2.0.0
130
+ - image: wurstmeister/kafka:2.11-2.0.1
131
+ environment:
132
+ KAFKA_ADVERTISED_HOST_NAME: localhost
133
+ KAFKA_ADVERTISED_PORT: 9094
134
+ KAFKA_PORT: 9094
135
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
136
+ KAFKA_DELETE_TOPIC_ENABLE: true
137
+ steps:
138
+ - checkout
139
+ - run: bundle install --path vendor/bundle
140
+ - run: bundle exec rspec --profile --tag functional spec/functional
141
+
142
+ kafka-2.1:
143
+ docker:
144
+ - image: circleci/ruby:2.5.1-node
145
+ environment:
146
+ LOG_LEVEL: DEBUG
147
+ - image: wurstmeister/zookeeper
148
+ - image: wurstmeister/kafka:2.12-2.1.0
149
+ environment:
150
+ KAFKA_ADVERTISED_HOST_NAME: localhost
151
+ KAFKA_ADVERTISED_PORT: 9092
152
+ KAFKA_PORT: 9092
153
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
154
+ KAFKA_DELETE_TOPIC_ENABLE: true
155
+ - image: wurstmeister/kafka:2.12-2.1.0
156
+ environment:
157
+ KAFKA_ADVERTISED_HOST_NAME: localhost
158
+ KAFKA_ADVERTISED_PORT: 9093
159
+ KAFKA_PORT: 9093
160
+ KAFKA_ZOOKEEPER_CONNECT: localhost:2181
161
+ KAFKA_DELETE_TOPIC_ENABLE: true
162
+ - image: wurstmeister/kafka:2.12-2.1.0
131
163
  environment:
132
164
  KAFKA_ADVERTISED_HOST_NAME: localhost
133
165
  KAFKA_ADVERTISED_PORT: 9094
@@ -148,3 +180,4 @@ workflows:
148
180
  - kafka-1.0.0
149
181
  - kafka-1.1
150
182
  - kafka-2.0
183
+ - kafka-2.1
data/CHANGELOG.md CHANGED
@@ -4,6 +4,11 @@ Changes and additions to the library will be listed here.
4
4
 
5
5
  ## Unreleased
6
6
 
7
+ ## 0.7.6
8
+ - Introduce regex matching in `Consumer#subscribe` (#700)
9
+ - Only rejoin group on error if we're not in shutdown mode (#711)
10
+ - Use `maxTimestamp` for `logAppendTime` timestamps (#706)
11
+
7
12
  ## 0.7.5
8
13
  - Distribute partitions across consumer groups when there are few partitions per topic (#681)
9
14
  - Fix an issue where a consumer would fail to fetch any messages (#689)
data/README.md CHANGED
@@ -98,6 +98,16 @@ Or install it yourself as:
98
98
  <td>Limited support</td>
99
99
  <td>Limited support</td>
100
100
  </tr>
101
+ <tr>
102
+ <th>Kafka 2.0</th>
103
+ <td>Limited support</td>
104
+ <td>Limited support</td>
105
+ </tr>
106
+ <tr>
107
+ <th>Kafka 2.1</th>
108
+ <td>Limited support</td>
109
+ <td>Limited support</td>
110
+ </tr>
101
111
  </table>
102
112
 
103
113
  This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with the v0.5.x series. There's limited support for Kafka 0.8, and things should work with Kafka 0.11, although there may be performance issues due to changes in the protocol.
@@ -107,6 +117,8 @@ This library is targeting Kafka 0.9 with the v0.4.x series and Kafka 0.10 with t
107
117
  - **Kafka 0.10:** Full support for the Producer and Consumer API in ruby-kafka v0.5.x. Note that you _must_ run version 0.10.1 or higher of Kafka due to limitations in 0.10.0.
108
118
  - **Kafka 0.11:** Full support for Producer API, limited support for Consumer API in ruby-kafka v0.7.x. New features in 0.11.x includes new Record Batch format, idempotent and transactional production. The missing feature is dirty reading of Consumer API.
109
119
  - **Kafka 1.0:** Everything that works with Kafka 0.11 should still work, but so far no features specific to Kafka 1.0 have been added.
120
+ - **Kafka 2.0:** Everything that works with Kafka 1.0 should still work, but so far no features specific to Kafka 2.0 have been added.
121
+ - **Kafka 2.1:** Everything that works with Kafka 2.0 should still work, but so far no features specific to Kafka 2.1 have been added.
110
122
 
111
123
  This library requires Ruby 2.1 or higher.
112
124
 
@@ -80,7 +80,7 @@ module Kafka
80
80
  @queue = Queue.new
81
81
  @max_queue_size = max_queue_size
82
82
  @instrumenter = instrumenter
83
- @logger = logger
83
+ @logger = TaggedLogger.new(logger)
84
84
 
85
85
  @worker = Worker.new(
86
86
  queue: @queue,
@@ -189,10 +189,11 @@ module Kafka
189
189
  @producer = producer
190
190
  @delivery_threshold = delivery_threshold
191
191
  @instrumenter = instrumenter
192
- @logger = logger
192
+ @logger = TaggedLogger.new(logger)
193
193
  end
194
194
 
195
195
  def run
196
+ @logger.push_tags(@producer.to_s)
196
197
  @logger.info "Starting async producer in the background..."
197
198
 
198
199
  loop do
@@ -233,6 +234,7 @@ module Kafka
233
234
  @logger.error "Async producer crashed!"
234
235
  ensure
235
236
  @producer.shutdown
237
+ @logger.pop_tags
236
238
  end
237
239
 
238
240
  private
data/lib/kafka/broker.rb CHANGED
@@ -12,7 +12,7 @@ module Kafka
12
12
  @host = host
13
13
  @port = port
14
14
  @node_id = node_id
15
- @logger = logger
15
+ @logger = TaggedLogger.new(logger)
16
16
  end
17
17
 
18
18
  def address_match?(host, port)
@@ -5,7 +5,7 @@ require "kafka/broker"
5
5
  module Kafka
6
6
  class BrokerPool
7
7
  def initialize(connection_builder:, logger:)
8
- @logger = logger
8
+ @logger = TaggedLogger.new(logger)
9
9
  @connection_builder = connection_builder
10
10
  @brokers = {}
11
11
  end
data/lib/kafka/client.rb CHANGED
@@ -14,6 +14,7 @@ require "kafka/fetch_operation"
14
14
  require "kafka/connection_builder"
15
15
  require "kafka/instrumenter"
16
16
  require "kafka/sasl_authenticator"
17
+ require "kafka/tagged_logger"
17
18
 
18
19
  module Kafka
19
20
  class Client
@@ -68,7 +69,7 @@ module Kafka
68
69
  sasl_gssapi_keytab: nil, sasl_plain_authzid: '', sasl_plain_username: nil, sasl_plain_password: nil,
69
70
  sasl_scram_username: nil, sasl_scram_password: nil, sasl_scram_mechanism: nil,
70
71
  sasl_over_ssl: true, ssl_ca_certs_from_system: false)
71
- @logger = logger || Logger.new(nil)
72
+ @logger = TaggedLogger.new(logger)
72
73
  @instrumenter = Instrumenter.new(client_id: client_id)
73
74
  @seed_brokers = normalize_seed_brokers(seed_brokers)
74
75
 
data/lib/kafka/cluster.rb CHANGED
@@ -23,7 +23,7 @@ module Kafka
23
23
  raise ArgumentError, "At least one seed broker must be configured"
24
24
  end
25
25
 
26
- @logger = logger
26
+ @logger = TaggedLogger.new(logger)
27
27
  @seed_brokers = seed_brokers
28
28
  @broker_pool = broker_pool
29
29
  @cluster_info = nil
@@ -52,7 +52,7 @@ module Kafka
52
52
  # @return [Connection] a new connection.
53
53
  def initialize(host:, port:, client_id:, logger:, instrumenter:, connect_timeout: nil, socket_timeout: nil, ssl_context: nil)
54
54
  @host, @port, @client_id = host, port, client_id
55
- @logger = logger
55
+ @logger = TaggedLogger.new(logger)
56
56
  @instrumenter = instrumenter
57
57
 
58
58
  @connect_timeout = connect_timeout || CONNECT_TIMEOUT
@@ -93,6 +93,7 @@ module Kafka
93
93
 
94
94
  raise IdleConnection if idle?
95
95
 
96
+ @logger.push_tags(api_name)
96
97
  @instrumenter.instrument("request.connection", notification) do
97
98
  open unless open?
98
99
 
@@ -113,6 +114,8 @@ module Kafka
113
114
  close
114
115
 
115
116
  raise ConnectionError, "Connection error #{e.class}: #{e}"
117
+ ensure
118
+ @logger.pop_tags
116
119
  end
117
120
 
118
121
  private
@@ -4,7 +4,7 @@ module Kafka
4
4
  class ConnectionBuilder
5
5
  def initialize(client_id:, logger:, instrumenter:, connect_timeout:, socket_timeout:, ssl_context:, sasl_authenticator:)
6
6
  @client_id = client_id
7
- @logger = logger
7
+ @logger = TaggedLogger.new(logger)
8
8
  @instrumenter = instrumenter
9
9
  @connect_timeout = connect_timeout
10
10
  @socket_timeout = socket_timeout
@@ -46,7 +46,7 @@ module Kafka
46
46
 
47
47
  def initialize(cluster:, logger:, instrumenter:, group:, fetcher:, offset_manager:, session_timeout:, heartbeat:)
48
48
  @cluster = cluster
49
- @logger = logger
49
+ @logger = TaggedLogger.new(logger)
50
50
  @instrumenter = instrumenter
51
51
  @group = group
52
52
  @offset_manager = offset_manager
@@ -82,7 +82,8 @@ module Kafka
82
82
  # messages to be written. In the former case, set `start_from_beginning`
83
83
  # to true (the default); in the latter, set it to false.
84
84
  #
85
- # @param topic [String] the name of the topic to subscribe to.
85
+ # @param topic_or_regex [String, Regexp] subscribe to single topic with a string
86
+ # or multiple topics matching a regex.
86
87
  # @param default_offset [Symbol] whether to start from the beginning or the
87
88
  # end of the topic's partitions. Deprecated.
88
89
  # @param start_from_beginning [Boolean] whether to start from the beginning
@@ -93,12 +94,16 @@ module Kafka
93
94
  # @param max_bytes_per_partition [Integer] the maximum amount of data fetched
94
95
  # from a single partition at a time.
95
96
  # @return [nil]
96
- def subscribe(topic, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
97
+ def subscribe(topic_or_regex, default_offset: nil, start_from_beginning: true, max_bytes_per_partition: 1048576)
97
98
  default_offset ||= start_from_beginning ? :earliest : :latest
98
99
 
99
- @group.subscribe(topic)
100
- @offset_manager.set_default_offset(topic, default_offset)
101
- @fetcher.subscribe(topic, max_bytes_per_partition: max_bytes_per_partition)
100
+ if topic_or_regex.is_a?(Regexp)
101
+ cluster_topics.select { |topic| topic =~ topic_or_regex }.each do |topic|
102
+ subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
103
+ end
104
+ else
105
+ subscribe_to_topic(topic_or_regex, default_offset, start_from_beginning, max_bytes_per_partition)
106
+ end
102
107
 
103
108
  nil
104
109
  end
@@ -241,7 +246,7 @@ module Kafka
241
246
 
242
247
  trigger_heartbeat
243
248
 
244
- return if !@running
249
+ return if shutting_down?
245
250
  end
246
251
 
247
252
  # We've successfully processed a batch from the partition, so we can clear
@@ -336,7 +341,7 @@ module Kafka
336
341
 
337
342
  trigger_heartbeat
338
343
 
339
- return if !@running
344
+ return if shutting_down?
340
345
  end
341
346
 
342
347
  # We may not have received any messages, but it's still a good idea to
@@ -386,22 +391,23 @@ module Kafka
386
391
 
387
392
  def consumer_loop
388
393
  @running = true
394
+ @logger.push_tags(@group.to_s)
389
395
 
390
396
  @fetcher.start
391
397
 
392
- while @running
398
+ while running?
393
399
  begin
394
400
  @instrumenter.instrument("loop.consumer") do
395
401
  yield
396
402
  end
397
403
  rescue HeartbeatError
398
404
  make_final_offsets_commit!
399
- join_group
405
+ join_group if running?
400
406
  rescue OffsetCommitError
401
- join_group
407
+ join_group if running?
402
408
  rescue RebalanceInProgress
403
409
  @logger.warn "Group rebalance in progress, re-joining..."
404
- join_group
410
+ join_group if running?
405
411
  rescue FetchError, NotLeaderForPartition, UnknownTopicOrPartition
406
412
  @cluster.mark_as_stale!
407
413
  rescue LeaderNotAvailable => e
@@ -424,6 +430,7 @@ module Kafka
424
430
  make_final_offsets_commit!
425
431
  @group.leave rescue nil
426
432
  @running = false
433
+ @logger.pop_tags
427
434
  end
428
435
 
429
436
  def make_final_offsets_commit!(attempts = 3)
@@ -505,7 +512,7 @@ module Kafka
505
512
 
506
513
  def fetch_batches
507
514
  # Return early if the consumer has been stopped.
508
- return [] if !@running
515
+ return [] if shutting_down?
509
516
 
510
517
  join_group unless @group.member?
511
518
 
@@ -545,6 +552,14 @@ module Kafka
545
552
  @pauses[topic][partition]
546
553
  end
547
554
 
555
+ def running?
556
+ @running
557
+ end
558
+
559
+ def shutting_down?
560
+ !running?
561
+ end
562
+
548
563
  def clear_current_offsets(excluding: {})
549
564
  @current_offsets.each do |topic, partitions|
550
565
  partitions.keep_if do |partition, _|
@@ -552,5 +567,23 @@ module Kafka
552
567
  end
553
568
  end
554
569
  end
570
+
571
+ def subscribe_to_topic(topic, default_offset, start_from_beginning, max_bytes_per_partition)
572
+ @group.subscribe(topic)
573
+ @offset_manager.set_default_offset(topic, default_offset)
574
+ @fetcher.subscribe(topic, max_bytes_per_partition: max_bytes_per_partition)
575
+ end
576
+
577
+ def cluster_topics
578
+ attempts = 0
579
+ begin
580
+ attempts += 1
581
+ @cluster.list_topics
582
+ rescue Kafka::ConnectionError
583
+ @cluster.mark_as_stale!
584
+ retry unless attempts > 1
585
+ raise
586
+ end
587
+ end
555
588
  end
556
589
  end
@@ -9,7 +9,7 @@ module Kafka
9
9
 
10
10
  def initialize(cluster:, logger:, group_id:, session_timeout:, retention_time:, instrumenter:)
11
11
  @cluster = cluster
12
- @logger = logger
12
+ @logger = TaggedLogger.new(logger)
13
13
  @group_id = group_id
14
14
  @session_timeout = session_timeout
15
15
  @instrumenter = instrumenter
@@ -122,6 +122,15 @@ module Kafka
122
122
  retry
123
123
  end
124
124
 
125
+ def to_s
126
+ "[#{@group_id}] {" + assigned_partitions.map { |topic, partitions|
127
+ partition_str = partitions.size > 5 ?
128
+ "#{partitions[0..4].join(', ')}..." :
129
+ partitions.join(', ')
130
+ "#{topic}: #{partition_str}"
131
+ }.join('; ') + '}:'
132
+ end
133
+
125
134
  private
126
135
 
127
136
  def join_group
@@ -23,7 +23,7 @@ module Kafka
23
23
  class FetchOperation
24
24
  def initialize(cluster:, logger:, min_bytes: 1, max_bytes: 10485760, max_wait_time: 5)
25
25
  @cluster = cluster
26
- @logger = logger
26
+ @logger = TaggedLogger.new(logger)
27
27
  @min_bytes = min_bytes
28
28
  @max_bytes = max_bytes
29
29
  @max_wait_time = max_wait_time
@@ -10,7 +10,7 @@ module Kafka
10
10
  def initialize(topic, fetched_partition, offset, logger:)
11
11
  @topic = topic
12
12
  @fetched_partition = fetched_partition
13
- @logger = logger
13
+ @logger = TaggedLogger.new(logger)
14
14
  @offset = offset
15
15
  end
16
16
 
@@ -3,7 +3,7 @@
3
3
  module Kafka
4
4
  class FetchedOffsetResolver
5
5
  def initialize(logger:)
6
- @logger = logger
6
+ @logger = TaggedLogger.new(logger)
7
7
  end
8
8
 
9
9
  def resolve!(broker, topics)
data/lib/kafka/fetcher.rb CHANGED
@@ -8,7 +8,7 @@ module Kafka
8
8
 
9
9
  def initialize(cluster:, logger:, instrumenter:, max_queue_size:, group:)
10
10
  @cluster = cluster
11
- @logger = logger
11
+ @logger = TaggedLogger.new(logger)
12
12
  @instrumenter = instrumenter
13
13
  @max_queue_size = max_queue_size
14
14
  @group = group
@@ -55,7 +55,7 @@ module Kafka
55
55
  while @running
56
56
  loop
57
57
  end
58
- @logger.info "Fetcher thread exited."
58
+ @logger.info "#{@group} Fetcher thread exited."
59
59
  end
60
60
  @thread.abort_on_exception = true
61
61
  end
@@ -94,6 +94,7 @@ module Kafka
94
94
  attr_reader :current_reset_counter
95
95
 
96
96
  def loop
97
+ @logger.push_tags(@group.to_s)
97
98
  @instrumenter.instrument("loop.fetcher", {
98
99
  queue_size: @queue.size,
99
100
  })
@@ -112,6 +113,8 @@ module Kafka
112
113
  @logger.warn "Reached max fetcher queue size (#{@max_queue_size}), sleeping 1s"
113
114
  sleep 1
114
115
  end
116
+ ensure
117
+ @logger.pop_tags
115
118
  end
116
119
 
117
120
  def handle_configure(min_bytes, max_bytes, max_wait_time)
@@ -13,7 +13,7 @@ module Kafka
13
13
  @cluster = cluster
14
14
  @group = group
15
15
  @fetcher = fetcher
16
- @logger = logger
16
+ @logger = TaggedLogger.new(logger)
17
17
  @commit_interval = commit_interval
18
18
  @commit_threshold = commit_threshold
19
19
 
@@ -37,7 +37,7 @@ module Kafka
37
37
  @required_acks = required_acks
38
38
  @ack_timeout = ack_timeout
39
39
  @compressor = compressor
40
- @logger = logger
40
+ @logger = TaggedLogger.new(logger)
41
41
  @instrumenter = instrumenter
42
42
  end
43
43
 
@@ -130,7 +130,7 @@ module Kafka
130
130
  def initialize(cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:)
131
131
  @cluster = cluster
132
132
  @transaction_manager = transaction_manager
133
- @logger = logger
133
+ @logger = TaggedLogger.new(logger)
134
134
  @instrumenter = instrumenter
135
135
  @required_acks = required_acks == :all ? -1 : required_acks
136
136
  @ack_timeout = ack_timeout
@@ -150,6 +150,10 @@ module Kafka
150
150
  @pending_message_queue = PendingMessageQueue.new
151
151
  end
152
152
 
153
+ def to_s
154
+ "Producer #{@target_topics.to_a.join(', ')}"
155
+ end
156
+
153
157
  # Produces a message to the specified topic. Note that messages are buffered in
154
158
  # the producer until {#deliver_messages} is called.
155
159
  #
@@ -205,7 +209,7 @@ module Kafka
205
209
  # If the producer is in transactional mode, all the message production
206
210
  # must be used when the producer is currently in transaction
207
211
  if @transaction_manager.transactional? && !@transaction_manager.in_transaction?
208
- raise 'You must trigger begin_transaction before producing messages'
212
+ raise "Cannot produce to #{topic}: You must trigger begin_transaction before producing messages"
209
213
  end
210
214
 
211
215
  @target_topics.add(topic)
@@ -391,11 +395,11 @@ module Kafka
391
395
  if buffer_size.zero?
392
396
  break
393
397
  elsif attempt <= @max_retries
394
- @logger.warn "Failed to send all messages; attempting retry #{attempt} of #{@max_retries} after #{@retry_backoff}s"
398
+ @logger.warn "Failed to send all messages to #{pretty_partitions}; attempting retry #{attempt} of #{@max_retries} after #{@retry_backoff}s"
395
399
 
396
400
  sleep @retry_backoff
397
401
  else
398
- @logger.error "Failed to send all messages; keeping remaining messages in buffer"
402
+ @logger.error "Failed to send all messages to #{pretty_partitions}; keeping remaining messages in buffer"
399
403
  break
400
404
  end
401
405
  end
@@ -407,12 +411,14 @@ module Kafka
407
411
  end
408
412
 
409
413
  unless @buffer.empty?
410
- partitions = @buffer.map {|topic, partition, _| "#{topic}/#{partition}" }.join(", ")
411
-
412
- raise DeliveryFailed.new("Failed to send messages to #{partitions}", buffer_messages)
414
+ raise DeliveryFailed.new("Failed to send messages to #{pretty_partitions}", buffer_messages)
413
415
  end
414
416
  end
415
417
 
418
+ def pretty_partitions
419
+ @buffer.map {|topic, partition, _| "#{topic}/#{partition}" }.join(", ")
420
+ end
421
+
416
422
  def assign_partitions!
417
423
  failed_messages = []
418
424
  topics_with_failures = Set.new
@@ -11,6 +11,7 @@ module Kafka
11
11
  CODEC_ID_MASK = 0b00000111
12
12
  IN_TRANSACTION_MASK = 0b00010000
13
13
  IS_CONTROL_BATCH_MASK = 0b00100000
14
+ TIMESTAMP_TYPE_MASK = 0b001000
14
15
 
15
16
  attr_reader :records, :first_offset, :first_timestamp, :partition_leader_epoch, :in_transaction, :is_control_batch, :last_offset_delta, :max_timestamp, :producer_id, :producer_epoch, :first_sequence
16
17
 
@@ -163,6 +164,7 @@ module Kafka
163
164
  codec_id = attributes & CODEC_ID_MASK
164
165
  in_transaction = (attributes & IN_TRANSACTION_MASK) > 0
165
166
  is_control_batch = (attributes & IS_CONTROL_BATCH_MASK) > 0
167
+ log_append_time = (attributes & TIMESTAMP_TYPE_MASK) != 0
166
168
 
167
169
  last_offset_delta = record_batch_decoder.int32
168
170
  first_timestamp = Time.at(record_batch_decoder.int64 / 1000)
@@ -186,7 +188,7 @@ module Kafka
186
188
  until records_array_decoder.eof?
187
189
  record = Record.decode(records_array_decoder)
188
190
  record.offset = first_offset + record.offset_delta
189
- record.create_time = first_timestamp + record.timestamp_delta
191
+ record.create_time = log_append_time && max_timestamp ? max_timestamp : first_timestamp + record.timestamp_delta
190
192
  records_array << record
191
193
  end
192
194
 
@@ -7,7 +7,7 @@ module Kafka
7
7
  GSSAPI_CONFIDENTIALITY = false
8
8
 
9
9
  def initialize(logger:, principal:, keytab:)
10
- @logger = logger
10
+ @logger = TaggedLogger.new(logger)
11
11
  @principal = principal
12
12
  @keytab = keytab
13
13
  end
@@ -6,7 +6,7 @@ module Kafka
6
6
  PLAIN_IDENT = "PLAIN"
7
7
 
8
8
  def initialize(logger:, authzid:, username:, password:)
9
- @logger = logger
9
+ @logger = TaggedLogger.new(logger)
10
10
  @authzid = authzid
11
11
  @username = username
12
12
  @password = password
@@ -14,7 +14,7 @@ module Kafka
14
14
  def initialize(username:, password:, mechanism: 'sha256', logger:)
15
15
  @username = username
16
16
  @password = password
17
- @logger = logger
17
+ @logger = TaggedLogger.new(logger)
18
18
 
19
19
  if mechanism
20
20
  @mechanism = MECHANISMS.fetch(mechanism) do
@@ -9,7 +9,7 @@ module Kafka
9
9
  def initialize(logger:, sasl_gssapi_principal:, sasl_gssapi_keytab:,
10
10
  sasl_plain_authzid:, sasl_plain_username:, sasl_plain_password:,
11
11
  sasl_scram_username:, sasl_scram_password:, sasl_scram_mechanism:)
12
- @logger = logger
12
+ @logger = TaggedLogger.new(logger)
13
13
 
14
14
  @plain = Sasl::Plain.new(
15
15
  authzid: sasl_plain_authzid,
@@ -0,0 +1,72 @@
1
+ require 'forwardable'
2
+
3
+ # Basic implementation of a tagged logger that matches the API of
4
+ # ActiveSupport::TaggedLogging.
5
+
6
+ module Kafka
7
+ module TaggedFormatter
8
+
9
+ def call(severity, timestamp, progname, msg)
10
+ super(severity, timestamp, progname, "#{tags_text}#{msg}")
11
+ end
12
+
13
+ def tagged(*tags)
14
+ new_tags = push_tags(*tags)
15
+ yield self
16
+ ensure
17
+ pop_tags(new_tags.size)
18
+ end
19
+
20
+ def push_tags(*tags)
21
+ tags.flatten.reject { |t| t.nil? || t.empty? }.tap do |new_tags|
22
+ current_tags.concat new_tags
23
+ end
24
+ end
25
+
26
+ def pop_tags(size = 1)
27
+ current_tags.pop size
28
+ end
29
+
30
+ def clear_tags!
31
+ current_tags.clear
32
+ end
33
+
34
+ def current_tags
35
+ # We use our object ID here to avoid conflicting with other instances
36
+ thread_key = @thread_key ||= "kafka_tagged_logging_tags:#{object_id}".freeze
37
+ Thread.current[thread_key] ||= []
38
+ end
39
+
40
+ def tags_text
41
+ tags = current_tags
42
+ if tags.any?
43
+ tags.collect { |tag| "[#{tag}] " }.join
44
+ end
45
+ end
46
+
47
+ end
48
+
49
+ module TaggedLogger
50
+ extend Forwardable
51
+ delegate [:push_tags, :pop_tags, :clear_tags!] => :formatter
52
+
53
+ def self.new(logger)
54
+ logger ||= Logger.new(nil)
55
+ return logger if logger.respond_to?(:push_tags) # already included
56
+ # Ensure we set a default formatter so we aren't extending nil!
57
+ logger.formatter ||= Logger::Formatter.new
58
+ logger.formatter.extend TaggedFormatter
59
+ logger.extend(self)
60
+ end
61
+
62
+ def tagged(*tags)
63
+ formatter.tagged(*tags) { yield self }
64
+ end
65
+
66
+ def flush
67
+ clear_tags!
68
+ super if defined?(super)
69
+ end
70
+ end
71
+
72
+ end
@@ -19,7 +19,7 @@ module Kafka
19
19
  transactional_timeout: DEFAULT_TRANSACTION_TIMEOUT
20
20
  )
21
21
  @cluster = cluster
22
- @logger = logger
22
+ @logger = TaggedLogger.new(logger)
23
23
 
24
24
  @transactional = transactional
25
25
  @transactional_id = transactional_id
@@ -27,7 +27,7 @@ module Kafka
27
27
  def initialize(logger:)
28
28
  @state = UNINITIALIZED
29
29
  @mutex = Mutex.new
30
- @logger = logger
30
+ @logger = TaggedLogger.new(logger)
31
31
  end
32
32
 
33
33
  def transition_to!(next_state)
data/lib/kafka/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Kafka
4
- VERSION = "0.7.5"
4
+ VERSION = "0.7.6.beta1"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-kafka
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.5
4
+ version: 0.7.6.beta1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Schierbeck
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-01-15 00:00:00.000000000 Z
11
+ date: 2019-02-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: digest-crc
@@ -426,6 +426,7 @@ files:
426
426
  - lib/kafka/ssl_context.rb
427
427
  - lib/kafka/ssl_socket_with_timeout.rb
428
428
  - lib/kafka/statsd.rb
429
+ - lib/kafka/tagged_logger.rb
429
430
  - lib/kafka/transaction_manager.rb
430
431
  - lib/kafka/transaction_state_machine.rb
431
432
  - lib/kafka/version.rb
@@ -446,9 +447,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
446
447
  version: 2.1.0
447
448
  required_rubygems_version: !ruby/object:Gem::Requirement
448
449
  requirements:
449
- - - ">="
450
+ - - ">"
450
451
  - !ruby/object:Gem::Version
451
- version: '0'
452
+ version: 1.3.1
452
453
  requirements: []
453
454
  rubyforge_project:
454
455
  rubygems_version: 2.7.6