rdkafka 0.5.0 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.semaphore/semaphore.yml +23 -0
- data/CHANGELOG.md +23 -0
- data/README.md +9 -9
- data/docker-compose.yml +17 -11
- data/ext/README.md +3 -15
- data/ext/Rakefile +23 -3
- data/lib/rdkafka.rb +8 -0
- data/lib/rdkafka/abstract_handle.rb +82 -0
- data/lib/rdkafka/admin.rb +144 -0
- data/lib/rdkafka/admin/create_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/create_topic_report.rb +22 -0
- data/lib/rdkafka/admin/delete_topic_handle.rb +27 -0
- data/lib/rdkafka/admin/delete_topic_report.rb +22 -0
- data/lib/rdkafka/bindings.rb +63 -17
- data/lib/rdkafka/callbacks.rb +106 -0
- data/lib/rdkafka/config.rb +18 -7
- data/lib/rdkafka/consumer.rb +162 -46
- data/lib/rdkafka/consumer/headers.rb +7 -5
- data/lib/rdkafka/consumer/partition.rb +1 -1
- data/lib/rdkafka/consumer/topic_partition_list.rb +6 -16
- data/lib/rdkafka/error.rb +35 -4
- data/lib/rdkafka/metadata.rb +92 -0
- data/lib/rdkafka/producer.rb +43 -15
- data/lib/rdkafka/producer/delivery_handle.rb +7 -49
- data/lib/rdkafka/producer/delivery_report.rb +7 -2
- data/lib/rdkafka/version.rb +3 -3
- data/rdkafka.gemspec +3 -3
- data/spec/rdkafka/abstract_handle_spec.rb +114 -0
- data/spec/rdkafka/admin/create_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/create_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin/delete_topic_handle_spec.rb +52 -0
- data/spec/rdkafka/admin/delete_topic_report_spec.rb +16 -0
- data/spec/rdkafka/admin_spec.rb +192 -0
- data/spec/rdkafka/bindings_spec.rb +20 -2
- data/spec/rdkafka/callbacks_spec.rb +20 -0
- data/spec/rdkafka/config_spec.rb +17 -2
- data/spec/rdkafka/consumer/message_spec.rb +6 -1
- data/spec/rdkafka/consumer_spec.rb +145 -19
- data/spec/rdkafka/error_spec.rb +7 -3
- data/spec/rdkafka/metadata_spec.rb +78 -0
- data/spec/rdkafka/producer/delivery_handle_spec.rb +3 -43
- data/spec/rdkafka/producer/delivery_report_spec.rb +5 -1
- data/spec/rdkafka/producer_spec.rb +147 -72
- data/spec/spec_helper.rb +34 -6
- metadata +34 -10
- data/.travis.yml +0 -34
@@ -19,7 +19,7 @@ module Rdkafka
|
|
19
19
|
raise Rdkafka::RdkafkaError.new(err, "Error reading message headers")
|
20
20
|
end
|
21
21
|
|
22
|
-
headers_ptr = headers_ptrptr.
|
22
|
+
headers_ptr = headers_ptrptr.read_pointer
|
23
23
|
|
24
24
|
name_ptrptr = FFI::MemoryPointer.new(:pointer)
|
25
25
|
value_ptrptr = FFI::MemoryPointer.new(:pointer)
|
@@ -42,12 +42,14 @@ module Rdkafka
|
|
42
42
|
raise Rdkafka::RdkafkaError.new(err, "Error reading a message header at index #{idx}")
|
43
43
|
end
|
44
44
|
|
45
|
-
|
46
|
-
name =
|
45
|
+
name_ptr = name_ptrptr.read_pointer
|
46
|
+
name = name_ptr.respond_to?(:read_string_to_null) ? name_ptr.read_string_to_null : name_ptr.read_string
|
47
47
|
|
48
48
|
size = size_ptr[:value]
|
49
|
-
|
50
|
-
|
49
|
+
|
50
|
+
value_ptr = value_ptrptr.read_pointer
|
51
|
+
|
52
|
+
value = value_ptr.read_string(size)
|
51
53
|
|
52
54
|
headers[name.to_sym] = value
|
53
55
|
|
@@ -4,7 +4,7 @@ module Rdkafka
|
|
4
4
|
class TopicPartitionList
|
5
5
|
# Create a topic partition list.
|
6
6
|
#
|
7
|
-
# @param data [Hash
|
7
|
+
# @param data [Hash{String => nil,Partition}] The topic and partition data or nil to create an empty list
|
8
8
|
#
|
9
9
|
# @return [TopicPartitionList]
|
10
10
|
def initialize(data=nil)
|
@@ -71,7 +71,7 @@ module Rdkafka
|
|
71
71
|
|
72
72
|
# Return a `Hash` with the topics as keys and and an array of partition information as the value if present.
|
73
73
|
#
|
74
|
-
# @return [Hash
|
74
|
+
# @return [Hash{String => Array<Partition>,nil}]
|
75
75
|
def to_h
|
76
76
|
@data
|
77
77
|
end
|
@@ -106,7 +106,7 @@ module Rdkafka
|
|
106
106
|
data[elem[:topic]] = nil
|
107
107
|
else
|
108
108
|
partitions = data[elem[:topic]] || []
|
109
|
-
offset = if elem[:offset] ==
|
109
|
+
offset = if elem[:offset] == Rdkafka::Bindings::RD_KAFKA_OFFSET_INVALID
|
110
110
|
nil
|
111
111
|
else
|
112
112
|
elem[:offset]
|
@@ -125,10 +125,10 @@ module Rdkafka
|
|
125
125
|
#
|
126
126
|
# The pointer will be cleaned by `rd_kafka_topic_partition_list_destroy` when GC releases it.
|
127
127
|
#
|
128
|
-
# @return [FFI::
|
128
|
+
# @return [FFI::Pointer]
|
129
129
|
# @private
|
130
130
|
def to_native_tpl
|
131
|
-
tpl =
|
131
|
+
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
|
132
132
|
|
133
133
|
@data.each do |topic, partitions|
|
134
134
|
if partitions
|
@@ -138,6 +138,7 @@ module Rdkafka
|
|
138
138
|
topic,
|
139
139
|
p.partition
|
140
140
|
)
|
141
|
+
|
141
142
|
if p.offset
|
142
143
|
Rdkafka::Bindings.rd_kafka_topic_partition_list_set_offset(
|
143
144
|
tpl,
|
@@ -158,17 +159,6 @@ module Rdkafka
|
|
158
159
|
|
159
160
|
tpl
|
160
161
|
end
|
161
|
-
|
162
|
-
# Creates a new native tpl and wraps it into FFI::AutoPointer which in turn calls
|
163
|
-
# `rd_kafka_topic_partition_list_destroy` when a pointer will be cleaned by GC
|
164
|
-
#
|
165
|
-
# @param count [Integer] an initial capacity of partitions list
|
166
|
-
# @return [FFI::AutoPointer]
|
167
|
-
# @private
|
168
|
-
def self.new_native_tpl(count)
|
169
|
-
tpl = Rdkafka::Bindings.rd_kafka_topic_partition_list_new(count)
|
170
|
-
FFI::AutoPointer.new(tpl, Rdkafka::Bindings.method(:rd_kafka_topic_partition_list_destroy))
|
171
|
-
end
|
172
162
|
end
|
173
163
|
end
|
174
164
|
end
|
data/lib/rdkafka/error.rb
CHANGED
@@ -1,15 +1,27 @@
|
|
1
1
|
module Rdkafka
|
2
|
+
# Base error class.
|
3
|
+
class BaseError < RuntimeError; end
|
4
|
+
|
2
5
|
# Error returned by the underlying rdkafka library.
|
3
|
-
class RdkafkaError <
|
6
|
+
class RdkafkaError < BaseError
|
4
7
|
# The underlying raw error response
|
5
8
|
# @return [Integer]
|
6
|
-
attr_reader :rdkafka_response
|
9
|
+
attr_reader :rdkafka_response
|
10
|
+
|
11
|
+
# Prefix to be used for human readable representation
|
12
|
+
# @return [String]
|
13
|
+
attr_reader :message_prefix
|
14
|
+
|
15
|
+
# Error message sent by the broker
|
16
|
+
# @return [String]
|
17
|
+
attr_reader :broker_message
|
7
18
|
|
8
19
|
# @private
|
9
|
-
def initialize(response, message_prefix=nil)
|
20
|
+
def initialize(response, message_prefix=nil, broker_message: nil)
|
10
21
|
raise TypeError.new("Response has to be an integer") unless response.is_a? Integer
|
11
22
|
@rdkafka_response = response
|
12
23
|
@message_prefix = message_prefix
|
24
|
+
@broker_message = broker_message
|
13
25
|
end
|
14
26
|
|
15
27
|
# This error's code, for example `:partition_eof`, `:msg_size_too_large`.
|
@@ -39,9 +51,14 @@ module Rdkafka
|
|
39
51
|
def is_partition_eof?
|
40
52
|
code == :partition_eof
|
41
53
|
end
|
54
|
+
|
55
|
+
# Error comparison
|
56
|
+
def ==(another_error)
|
57
|
+
another_error.is_a?(self.class) && (self.to_s == another_error.to_s)
|
58
|
+
end
|
42
59
|
end
|
43
60
|
|
44
|
-
# Error with
|
61
|
+
# Error with topic partition list returned by the underlying rdkafka library.
|
45
62
|
class RdkafkaTopicPartitionListError < RdkafkaError
|
46
63
|
# @return [TopicPartitionList]
|
47
64
|
attr_reader :topic_partition_list
|
@@ -52,4 +69,18 @@ module Rdkafka
|
|
52
69
|
@topic_partition_list = topic_partition_list
|
53
70
|
end
|
54
71
|
end
|
72
|
+
|
73
|
+
# Error class for public consumer method calls on a closed consumer.
|
74
|
+
class ClosedConsumerError < BaseError
|
75
|
+
def initialize(method)
|
76
|
+
super("Illegal call to #{method.to_s} on a closed consumer")
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
# Error class for public producer method calls on a closed producer.
|
81
|
+
class ClosedProducerError < BaseError
|
82
|
+
def initialize(method)
|
83
|
+
super("Illegal call to #{method.to_s} on a closed producer")
|
84
|
+
end
|
85
|
+
end
|
55
86
|
end
|
@@ -0,0 +1,92 @@
|
|
1
|
+
module Rdkafka
|
2
|
+
class Metadata
|
3
|
+
attr_reader :brokers, :topics
|
4
|
+
|
5
|
+
def initialize(native_client, topic_name = nil)
|
6
|
+
native_topic = if topic_name
|
7
|
+
Rdkafka::Bindings.rd_kafka_topic_new(native_client, topic_name, nil)
|
8
|
+
end
|
9
|
+
|
10
|
+
ptr = FFI::MemoryPointer.new(:pointer)
|
11
|
+
|
12
|
+
# If topic_flag is 1, we request info about *all* topics in the cluster. If topic_flag is 0,
|
13
|
+
# we only request info about locally known topics (or a single topic if one is passed in).
|
14
|
+
topic_flag = topic_name.nil? ? 1 : 0
|
15
|
+
|
16
|
+
# Retrieve the Metadata
|
17
|
+
result = Rdkafka::Bindings.rd_kafka_metadata(native_client, topic_flag, native_topic, ptr, 250)
|
18
|
+
|
19
|
+
# Error Handling
|
20
|
+
raise Rdkafka::RdkafkaError.new(result) unless result.zero?
|
21
|
+
|
22
|
+
metadata_from_native(ptr.read_pointer)
|
23
|
+
ensure
|
24
|
+
Rdkafka::Bindings.rd_kafka_topic_destroy(native_topic) if topic_name
|
25
|
+
Rdkafka::Bindings.rd_kafka_metadata_destroy(ptr.read_pointer)
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def metadata_from_native(ptr)
|
31
|
+
metadata = Metadata.new(ptr)
|
32
|
+
@brokers = Array.new(metadata[:brokers_count]) do |i|
|
33
|
+
BrokerMetadata.new(metadata[:brokers_metadata] + (i * BrokerMetadata.size)).to_h
|
34
|
+
end
|
35
|
+
|
36
|
+
@topics = Array.new(metadata[:topics_count]) do |i|
|
37
|
+
topic = TopicMetadata.new(metadata[:topics_metadata] + (i * TopicMetadata.size))
|
38
|
+
raise Rdkafka::RdkafkaError.new(topic[:rd_kafka_resp_err]) unless topic[:rd_kafka_resp_err].zero?
|
39
|
+
|
40
|
+
partitions = Array.new(topic[:partition_count]) do |j|
|
41
|
+
partition = PartitionMetadata.new(topic[:partitions_metadata] + (j * PartitionMetadata.size))
|
42
|
+
raise Rdkafka::RdkafkaError.new(partition[:rd_kafka_resp_err]) unless partition[:rd_kafka_resp_err].zero?
|
43
|
+
partition.to_h
|
44
|
+
end
|
45
|
+
topic.to_h.merge!(partitions: partitions)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
class CustomFFIStruct < FFI::Struct
|
50
|
+
def to_h
|
51
|
+
members.each_with_object({}) do |mem, hsh|
|
52
|
+
val = self.[](mem)
|
53
|
+
next if val.is_a?(FFI::Pointer) || mem == :rd_kafka_resp_err
|
54
|
+
|
55
|
+
hsh[mem] = self.[](mem)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
class Metadata < CustomFFIStruct
|
61
|
+
layout :brokers_count, :int,
|
62
|
+
:brokers_metadata, :pointer,
|
63
|
+
:topics_count, :int,
|
64
|
+
:topics_metadata, :pointer,
|
65
|
+
:broker_id, :int32,
|
66
|
+
:broker_name, :string
|
67
|
+
end
|
68
|
+
|
69
|
+
class BrokerMetadata < CustomFFIStruct
|
70
|
+
layout :broker_id, :int32,
|
71
|
+
:broker_name, :string,
|
72
|
+
:broker_port, :int
|
73
|
+
end
|
74
|
+
|
75
|
+
class TopicMetadata < CustomFFIStruct
|
76
|
+
layout :topic_name, :string,
|
77
|
+
:partition_count, :int,
|
78
|
+
:partitions_metadata, :pointer,
|
79
|
+
:rd_kafka_resp_err, :int
|
80
|
+
end
|
81
|
+
|
82
|
+
class PartitionMetadata < CustomFFIStruct
|
83
|
+
layout :partition_id, :int32,
|
84
|
+
:rd_kafka_resp_err, :int,
|
85
|
+
:leader, :int32,
|
86
|
+
:replica_count, :int,
|
87
|
+
:replicas, :pointer,
|
88
|
+
:in_sync_replica_brokers, :int,
|
89
|
+
:isrs, :pointer
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
data/lib/rdkafka/producer.rb
CHANGED
@@ -2,12 +2,19 @@ module Rdkafka
|
|
2
2
|
# A producer for Kafka messages. To create a producer set up a {Config} and call {Config#producer producer} on that.
|
3
3
|
class Producer
|
4
4
|
# @private
|
5
|
-
|
5
|
+
# Returns the current delivery callback, by default this is nil.
|
6
|
+
#
|
7
|
+
# @return [Proc, nil]
|
8
|
+
attr_reader :delivery_callback
|
6
9
|
|
7
10
|
# @private
|
8
11
|
def initialize(native_kafka)
|
9
12
|
@closing = false
|
10
13
|
@native_kafka = native_kafka
|
14
|
+
|
15
|
+
# Makes sure, that the producer gets closed before it gets GCed by Ruby
|
16
|
+
ObjectSpace.define_finalizer(self, proc { close })
|
17
|
+
|
11
18
|
# Start thread to poll client for delivery callbacks
|
12
19
|
@polling_thread = Thread.new do
|
13
20
|
loop do
|
@@ -32,25 +39,34 @@ module Rdkafka
|
|
32
39
|
@delivery_callback = callback
|
33
40
|
end
|
34
41
|
|
35
|
-
# Returns the current delivery callback, by default this is nil.
|
36
|
-
#
|
37
|
-
# @return [Proc, nil]
|
38
|
-
def delivery_callback
|
39
|
-
@delivery_callback
|
40
|
-
end
|
41
|
-
|
42
42
|
# Close this producer and wait for the internal poll queue to empty.
|
43
43
|
def close
|
44
|
+
return unless @native_kafka
|
45
|
+
|
44
46
|
# Indicate to polling thread that we're closing
|
45
47
|
@closing = true
|
46
48
|
# Wait for the polling thread to finish up
|
47
49
|
@polling_thread.join
|
50
|
+
Rdkafka::Bindings.rd_kafka_destroy(@native_kafka)
|
51
|
+
@native_kafka = nil
|
52
|
+
end
|
53
|
+
|
54
|
+
# Partition count for a given topic.
|
55
|
+
# NOTE: If 'allow.auto.create.topics' is set to true in the broker, the topic will be auto-created after returning nil.
|
56
|
+
#
|
57
|
+
# @param topic [String] The topic name.
|
58
|
+
#
|
59
|
+
# @return partition count [Integer,nil]
|
60
|
+
#
|
61
|
+
def partition_count(topic)
|
62
|
+
closed_producer_check(__method__)
|
63
|
+
Rdkafka::Metadata.new(@native_kafka, topic).topics&.first[:partition_count]
|
48
64
|
end
|
49
65
|
|
50
66
|
# Produces a message to a Kafka topic. The message is added to rdkafka's queue, call {DeliveryHandle#wait wait} on the returned delivery handle to make sure it is delivered.
|
51
67
|
#
|
52
68
|
# When no partition is specified the underlying Kafka library picks a partition based on the key. If no key is specified, a random partition will be used.
|
53
|
-
# When a timestamp is provided this is used instead of the
|
69
|
+
# When a timestamp is provided this is used instead of the auto-generated timestamp.
|
54
70
|
#
|
55
71
|
# @param topic [String] The topic to produce to
|
56
72
|
# @param payload [String,nil] The message's payload
|
@@ -62,7 +78,9 @@ module Rdkafka
|
|
62
78
|
# @raise [RdkafkaError] When adding the message to rdkafka's queue failed
|
63
79
|
#
|
64
80
|
# @return [DeliveryHandle] Delivery handle that can be used to wait for the result of producing this message
|
65
|
-
def produce(topic:, payload: nil, key: nil, partition: nil, timestamp: nil, headers: nil)
|
81
|
+
def produce(topic:, payload: nil, key: nil, partition: nil, partition_key: nil, timestamp: nil, headers: nil)
|
82
|
+
closed_producer_check(__method__)
|
83
|
+
|
66
84
|
# Start by checking and converting the input
|
67
85
|
|
68
86
|
# Get payload length
|
@@ -79,9 +97,15 @@ module Rdkafka
|
|
79
97
|
key.bytesize
|
80
98
|
end
|
81
99
|
|
82
|
-
|
83
|
-
|
84
|
-
|
100
|
+
if partition_key
|
101
|
+
partition_count = partition_count(topic)
|
102
|
+
# If the topic is not present, set to -1
|
103
|
+
partition = Rdkafka::Bindings.partitioner(partition_key, partition_count) if partition_count
|
104
|
+
end
|
105
|
+
|
106
|
+
# If partition is nil, use -1 to let librdafka set the partition randomly or
|
107
|
+
# based on the key when present.
|
108
|
+
partition ||= -1
|
85
109
|
|
86
110
|
# If timestamp is nil use 0 and let Kafka set one. If an integer or time
|
87
111
|
# use it.
|
@@ -100,7 +124,7 @@ module Rdkafka
|
|
100
124
|
delivery_handle[:response] = -1
|
101
125
|
delivery_handle[:partition] = -1
|
102
126
|
delivery_handle[:offset] = -1
|
103
|
-
DeliveryHandle.register(delivery_handle
|
127
|
+
DeliveryHandle.register(delivery_handle)
|
104
128
|
|
105
129
|
args = [
|
106
130
|
:int, Rdkafka::Bindings::RD_KAFKA_VTYPE_TOPIC, :string, topic,
|
@@ -133,7 +157,7 @@ module Rdkafka
|
|
133
157
|
*args
|
134
158
|
)
|
135
159
|
|
136
|
-
# Raise error if the produce call was not
|
160
|
+
# Raise error if the produce call was not successful
|
137
161
|
if response != 0
|
138
162
|
DeliveryHandle.remove(delivery_handle.to_ptr.address)
|
139
163
|
raise RdkafkaError.new(response)
|
@@ -146,5 +170,9 @@ module Rdkafka
|
|
146
170
|
def call_delivery_callback(delivery_handle)
|
147
171
|
@delivery_callback.call(delivery_handle) if @delivery_callback
|
148
172
|
end
|
173
|
+
|
174
|
+
def closed_producer_check(method)
|
175
|
+
raise Rdkafka::ClosedProducerError.new(method) if @native_kafka.nil?
|
176
|
+
end
|
149
177
|
end
|
150
178
|
end
|
@@ -2,63 +2,21 @@ module Rdkafka
|
|
2
2
|
class Producer
|
3
3
|
# Handle to wait for a delivery report which is returned when
|
4
4
|
# producing a message.
|
5
|
-
class DeliveryHandle <
|
5
|
+
class DeliveryHandle < Rdkafka::AbstractHandle
|
6
6
|
layout :pending, :bool,
|
7
7
|
:response, :int,
|
8
8
|
:partition, :int,
|
9
9
|
:offset, :int64
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
REGISTRY[address] = handle
|
15
|
-
end
|
16
|
-
|
17
|
-
def self.remove(address)
|
18
|
-
REGISTRY.delete(address)
|
11
|
+
# @return [String] the name of the operation (e.g. "delivery")
|
12
|
+
def operation_name
|
13
|
+
"delivery"
|
19
14
|
end
|
20
15
|
|
21
|
-
#
|
22
|
-
|
23
|
-
|
24
|
-
def pending?
|
25
|
-
self[:pending]
|
16
|
+
# @return [DeliveryReport] a report on the delivery of the message
|
17
|
+
def create_result
|
18
|
+
DeliveryReport.new(self[:partition], self[:offset])
|
26
19
|
end
|
27
|
-
|
28
|
-
# Wait for the delivery report or raise an error if this takes longer than the timeout.
|
29
|
-
# If there is a timeout this does not mean the message is not delivered, rdkafka might still be working on delivering the message.
|
30
|
-
# In this case it is possible to call wait again.
|
31
|
-
#
|
32
|
-
# @param timeout_in_seconds [Integer, nil] Number of seconds to wait before timing out. If this is nil it does not time out.
|
33
|
-
#
|
34
|
-
# @raise [RdkafkaError] When delivering the message failed
|
35
|
-
# @raise [WaitTimeoutError] When the timeout has been reached and the handle is still pending
|
36
|
-
#
|
37
|
-
# @return [DeliveryReport]
|
38
|
-
def wait(timeout_in_seconds=60)
|
39
|
-
timeout = if timeout_in_seconds
|
40
|
-
Time.now.to_i + timeout_in_seconds
|
41
|
-
else
|
42
|
-
nil
|
43
|
-
end
|
44
|
-
loop do
|
45
|
-
if pending?
|
46
|
-
if timeout && timeout <= Time.now.to_i
|
47
|
-
raise WaitTimeoutError.new("Waiting for delivery timed out after #{timeout_in_seconds} seconds")
|
48
|
-
end
|
49
|
-
sleep 0.1
|
50
|
-
next
|
51
|
-
elsif self[:response] != 0
|
52
|
-
raise RdkafkaError.new(self[:response])
|
53
|
-
else
|
54
|
-
return DeliveryReport.new(self[:partition], self[:offset])
|
55
|
-
end
|
56
|
-
end
|
57
|
-
end
|
58
|
-
|
59
|
-
# Error that is raised when waiting for a delivery handle to complete
|
60
|
-
# takes longer than the specified timeout.
|
61
|
-
class WaitTimeoutError < RuntimeError; end
|
62
20
|
end
|
63
21
|
end
|
64
22
|
end
|