dalli 3.2.8 → 4.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +169 -1
- data/Gemfile +15 -2
- data/README.md +92 -0
- data/lib/dalli/client.rb +246 -11
- data/lib/dalli/instrumentation.rb +141 -0
- data/lib/dalli/key_manager.rb +23 -8
- data/lib/dalli/pipelined_deleter.rb +82 -0
- data/lib/dalli/pipelined_getter.rb +46 -20
- data/lib/dalli/pipelined_setter.rb +87 -0
- data/lib/dalli/protocol/base.rb +82 -10
- data/lib/dalli/protocol/binary/response_processor.rb +5 -15
- data/lib/dalli/protocol/binary.rb +27 -0
- data/lib/dalli/protocol/connection_manager.rb +16 -11
- data/lib/dalli/protocol/meta/key_regularizer.rb +1 -1
- data/lib/dalli/protocol/meta/request_formatter.rb +42 -10
- data/lib/dalli/protocol/meta/response_processor.rb +72 -26
- data/lib/dalli/protocol/meta.rb +96 -5
- data/lib/dalli/protocol/response_buffer.rb +36 -12
- data/lib/dalli/protocol/server_config_parser.rb +1 -1
- data/lib/dalli/protocol/string_marshaller.rb +65 -0
- data/lib/dalli/protocol/ttl_sanitizer.rb +1 -1
- data/lib/dalli/protocol/value_compressor.rb +2 -11
- data/lib/dalli/protocol/value_marshaller.rb +1 -1
- data/lib/dalli/protocol/value_serializer.rb +59 -40
- data/lib/dalli/protocol.rb +10 -0
- data/lib/dalli/protocol_deprecations.rb +45 -0
- data/lib/dalli/socket.rb +70 -14
- data/lib/dalli/version.rb +1 -1
- data/lib/dalli.rb +11 -2
- data/lib/rack/session/dalli.rb +43 -8
- metadata +25 -10
- data/lib/dalli/server.rb +0 -6
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Dalli
|
|
4
|
+
##
|
|
5
|
+
# Instrumentation support for Dalli. Provides hooks for distributed tracing
|
|
6
|
+
# via OpenTelemetry when the SDK is available.
|
|
7
|
+
#
|
|
8
|
+
# When OpenTelemetry is loaded, Dalli automatically creates spans for cache operations.
|
|
9
|
+
# When OpenTelemetry is not available, all tracing methods are no-ops with zero overhead.
|
|
10
|
+
#
|
|
11
|
+
# Dalli 4.3.2 uses the stable OTel semantic conventions for database spans.
|
|
12
|
+
#
|
|
13
|
+
# == Span Attributes
|
|
14
|
+
#
|
|
15
|
+
# All spans include the following default attributes:
|
|
16
|
+
# - +db.system.name+ - Always "memcached"
|
|
17
|
+
#
|
|
18
|
+
# Single-key operations (+get+, +set+, +delete+, +incr+, +decr+, etc.) add:
|
|
19
|
+
# - +db.operation.name+ - The operation name (e.g., "get", "set")
|
|
20
|
+
# - +server.address+ - The server hostname (e.g., "localhost")
|
|
21
|
+
# - +server.port+ - The server port as an integer (e.g., 11211); omitted for Unix sockets
|
|
22
|
+
#
|
|
23
|
+
# Multi-key operations (+get_multi+) add:
|
|
24
|
+
# - +db.operation.name+ - "get_multi"
|
|
25
|
+
# - +db.memcached.key_count+ - Number of keys requested
|
|
26
|
+
# - +db.memcached.hit_count+ - Number of keys found in cache
|
|
27
|
+
# - +db.memcached.miss_count+ - Number of keys not found
|
|
28
|
+
#
|
|
29
|
+
# Bulk write operations (+set_multi+, +delete_multi+) add:
|
|
30
|
+
# - +db.operation.name+ - The operation name
|
|
31
|
+
# - +db.memcached.key_count+ - Number of keys in the operation
|
|
32
|
+
#
|
|
33
|
+
# == Optional Attributes
|
|
34
|
+
#
|
|
35
|
+
# - +db.query.text+ - The operation and key(s), controlled by the +:otel_db_statement+ client option:
|
|
36
|
+
# - +:include+ - Full text (e.g., "get mykey")
|
|
37
|
+
# - +:obfuscate+ - Obfuscated (e.g., "get ?")
|
|
38
|
+
# - +nil+ (default) - Attribute omitted
|
|
39
|
+
# - +peer.service+ - Logical service name, set via the +:otel_peer_service+ client option
|
|
40
|
+
#
|
|
41
|
+
# == Error Handling
|
|
42
|
+
#
|
|
43
|
+
# When an exception occurs during a traced operation:
|
|
44
|
+
# - The exception is recorded on the span via +record_exception+
|
|
45
|
+
# - The span status is set to error with the exception message
|
|
46
|
+
# - The exception is re-raised to the caller
|
|
47
|
+
#
|
|
48
|
+
# @example Checking if tracing is enabled
|
|
49
|
+
# Dalli::Instrumentation.enabled? # => true if OpenTelemetry is loaded
|
|
50
|
+
#
|
|
51
|
+
##
|
|
52
|
+
module Instrumentation
|
|
53
|
+
# Default attributes included on all memcached spans.
|
|
54
|
+
# @return [Hash] frozen hash with 'db.system.name' => 'memcached'
|
|
55
|
+
DEFAULT_ATTRIBUTES = { 'db.system.name' => 'memcached' }.freeze
|
|
56
|
+
|
|
57
|
+
class << self
|
|
58
|
+
# Returns the OpenTelemetry tracer if available, nil otherwise.
|
|
59
|
+
#
|
|
60
|
+
# The tracer is cached after first lookup for performance.
|
|
61
|
+
# Uses the library name 'dalli' and current Dalli::VERSION.
|
|
62
|
+
#
|
|
63
|
+
# @return [OpenTelemetry::Trace::Tracer, nil] the tracer or nil if OTel unavailable
|
|
64
|
+
def tracer
|
|
65
|
+
return @tracer if defined?(@tracer)
|
|
66
|
+
|
|
67
|
+
@tracer = (OpenTelemetry.tracer_provider.tracer('dalli', Dalli::VERSION) if defined?(OpenTelemetry))
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Returns true if instrumentation is enabled (OpenTelemetry SDK is available).
|
|
71
|
+
#
|
|
72
|
+
# @return [Boolean] true if tracing is active, false otherwise
|
|
73
|
+
def enabled?
|
|
74
|
+
!tracer.nil?
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
# Wraps a block with a span if instrumentation is enabled.
|
|
78
|
+
#
|
|
79
|
+
# Creates a client span with the given name and attributes merged with
|
|
80
|
+
# DEFAULT_ATTRIBUTES. The block is executed within the span context.
|
|
81
|
+
# If an exception occurs, it is recorded on the span before re-raising.
|
|
82
|
+
#
|
|
83
|
+
# When tracing is disabled (OpenTelemetry not loaded), this method
|
|
84
|
+
# simply yields directly with zero overhead.
|
|
85
|
+
#
|
|
86
|
+
# @param name [String] the span name (e.g., 'get', 'set', 'delete')
|
|
87
|
+
# @param attributes [Hash] span attributes to merge with defaults.
|
|
88
|
+
# Common attributes include:
|
|
89
|
+
# - 'db.operation.name' - the operation name
|
|
90
|
+
# - 'server.address' - the server hostname
|
|
91
|
+
# - 'server.port' - the server port (integer)
|
|
92
|
+
# - 'db.memcached.key_count' - number of keys (for multi operations)
|
|
93
|
+
# @yield the cache operation to trace
|
|
94
|
+
# @return [Object] the result of the block
|
|
95
|
+
# @raise [StandardError] re-raises any exception from the block
|
|
96
|
+
#
|
|
97
|
+
# @example Tracing a set operation
|
|
98
|
+
# trace('set', { 'db.operation.name' => 'set', 'server.address' => 'localhost', 'server.port' => 11211 }) do
|
|
99
|
+
# server.set(key, value, ttl)
|
|
100
|
+
# end
|
|
101
|
+
#
|
|
102
|
+
def trace(name, attributes = {})
|
|
103
|
+
return yield unless enabled?
|
|
104
|
+
|
|
105
|
+
tracer.in_span(name, attributes: DEFAULT_ATTRIBUTES.merge(attributes), kind: :client) do |_span|
|
|
106
|
+
yield
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Like trace, but yields the span to allow adding attributes after execution.
|
|
111
|
+
#
|
|
112
|
+
# This is useful for operations where metrics are only known after the
|
|
113
|
+
# operation completes, such as get_multi where hit/miss counts depend
|
|
114
|
+
# on the cache response.
|
|
115
|
+
#
|
|
116
|
+
# When tracing is disabled, yields nil as the span argument.
|
|
117
|
+
#
|
|
118
|
+
# @param name [String] the span name (e.g., 'get_multi')
|
|
119
|
+
# @param attributes [Hash] initial span attributes to merge with defaults
|
|
120
|
+
# @yield [OpenTelemetry::Trace::Span, nil] the span object, or nil if disabled
|
|
121
|
+
# @return [Object] the result of the block
|
|
122
|
+
# @raise [StandardError] re-raises any exception from the block
|
|
123
|
+
#
|
|
124
|
+
# @example Recording hit/miss metrics after get_multi
|
|
125
|
+
# trace_with_result('get_multi', { 'db.operation.name' => 'get_multi' }) do |span|
|
|
126
|
+
# results = fetch_from_cache(keys)
|
|
127
|
+
# if span
|
|
128
|
+
# span.set_attribute('db.memcached.hit_count', results.size)
|
|
129
|
+
# span.set_attribute('db.memcached.miss_count', keys.size - results.size)
|
|
130
|
+
# end
|
|
131
|
+
# results
|
|
132
|
+
# end
|
|
133
|
+
#
|
|
134
|
+
def trace_with_result(name, attributes = {}, &)
|
|
135
|
+
return yield(nil) unless enabled?
|
|
136
|
+
|
|
137
|
+
tracer.in_span(name, attributes: DEFAULT_ATTRIBUTES.merge(attributes), kind: :client, &)
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
end
|
data/lib/dalli/key_manager.rb
CHANGED
|
@@ -12,7 +12,7 @@ module Dalli
|
|
|
12
12
|
class KeyManager
|
|
13
13
|
MAX_KEY_LENGTH = 250
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
DEFAULT_NAMESPACE_SEPARATOR = ':'
|
|
16
16
|
|
|
17
17
|
# This is a hard coded md5 for historical reasons
|
|
18
18
|
TRUNCATED_KEY_SEPARATOR = ':md5:'
|
|
@@ -21,19 +21,26 @@ module Dalli
|
|
|
21
21
|
TRUNCATED_KEY_TARGET_SIZE = 249
|
|
22
22
|
|
|
23
23
|
DEFAULTS = {
|
|
24
|
-
digest_class: ::Digest::MD5
|
|
24
|
+
digest_class: ::Digest::MD5,
|
|
25
|
+
namespace_separator: DEFAULT_NAMESPACE_SEPARATOR
|
|
25
26
|
}.freeze
|
|
26
27
|
|
|
27
|
-
OPTIONS = %i[digest_class namespace].freeze
|
|
28
|
+
OPTIONS = %i[digest_class namespace namespace_separator].freeze
|
|
28
29
|
|
|
29
|
-
attr_reader :namespace
|
|
30
|
+
attr_reader :namespace, :namespace_separator
|
|
31
|
+
|
|
32
|
+
# Valid separators: non-alphanumeric, single printable ASCII characters
|
|
33
|
+
# Excludes: alphanumerics, whitespace, control characters
|
|
34
|
+
VALID_NAMESPACE_SEPARATORS = /\A[^a-zA-Z0-9 \x00-\x1F\x7F]\z/
|
|
30
35
|
|
|
31
36
|
def initialize(client_options)
|
|
32
37
|
@key_options =
|
|
33
|
-
DEFAULTS.merge(client_options.
|
|
38
|
+
DEFAULTS.merge(client_options.slice(*OPTIONS))
|
|
34
39
|
validate_digest_class_option(@key_options)
|
|
40
|
+
validate_namespace_separator_option(@key_options)
|
|
35
41
|
|
|
36
42
|
@namespace = namespace_from_options
|
|
43
|
+
@namespace_separator = @key_options[:namespace_separator]
|
|
37
44
|
end
|
|
38
45
|
|
|
39
46
|
##
|
|
@@ -61,7 +68,7 @@ module Dalli
|
|
|
61
68
|
def key_with_namespace(key)
|
|
62
69
|
return key if namespace.nil?
|
|
63
70
|
|
|
64
|
-
"#{evaluate_namespace}#{
|
|
71
|
+
"#{evaluate_namespace}#{namespace_separator}#{key}"
|
|
65
72
|
end
|
|
66
73
|
|
|
67
74
|
def key_without_namespace(key)
|
|
@@ -75,9 +82,9 @@ module Dalli
|
|
|
75
82
|
end
|
|
76
83
|
|
|
77
84
|
def namespace_regexp
|
|
78
|
-
return /\A#{Regexp.escape(evaluate_namespace)}
|
|
85
|
+
return /\A#{Regexp.escape(evaluate_namespace)}#{Regexp.escape(namespace_separator)}/ if namespace.is_a?(Proc)
|
|
79
86
|
|
|
80
|
-
@namespace_regexp ||= /\A#{Regexp.escape(namespace)}
|
|
87
|
+
@namespace_regexp ||= /\A#{Regexp.escape(namespace)}#{Regexp.escape(namespace_separator)}/ unless namespace.nil?
|
|
81
88
|
end
|
|
82
89
|
|
|
83
90
|
def validate_digest_class_option(opts)
|
|
@@ -86,6 +93,14 @@ module Dalli
|
|
|
86
93
|
raise ArgumentError, 'The digest_class object must respond to the hexdigest method'
|
|
87
94
|
end
|
|
88
95
|
|
|
96
|
+
def validate_namespace_separator_option(opts)
|
|
97
|
+
sep = opts[:namespace_separator]
|
|
98
|
+
return if VALID_NAMESPACE_SEPARATORS.match?(sep)
|
|
99
|
+
|
|
100
|
+
raise ArgumentError,
|
|
101
|
+
'namespace_separator must be a single non-alphanumeric character (e.g., ":", "/", "|")'
|
|
102
|
+
end
|
|
103
|
+
|
|
89
104
|
def namespace_from_options
|
|
90
105
|
raw_namespace = @key_options[:namespace]
|
|
91
106
|
return nil unless raw_namespace
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Dalli
|
|
4
|
+
##
|
|
5
|
+
# Contains logic for the pipelined delete operations implemented by the client.
|
|
6
|
+
# Efficiently deletes multiple keys by grouping requests by server
|
|
7
|
+
# and using quiet mode to minimize round trips.
|
|
8
|
+
##
|
|
9
|
+
class PipelinedDeleter
|
|
10
|
+
def initialize(ring, key_manager)
|
|
11
|
+
@ring = ring
|
|
12
|
+
@key_manager = key_manager
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
##
|
|
16
|
+
# Deletes multiple keys from memcached.
|
|
17
|
+
#
|
|
18
|
+
# @param keys [Array<String>] keys to delete
|
|
19
|
+
# @return [void]
|
|
20
|
+
##
|
|
21
|
+
def process(keys)
|
|
22
|
+
return if keys.empty?
|
|
23
|
+
|
|
24
|
+
@ring.lock do
|
|
25
|
+
servers = setup_requests(keys)
|
|
26
|
+
finish_requests(servers)
|
|
27
|
+
end
|
|
28
|
+
rescue NetworkError => e
|
|
29
|
+
Dalli.logger.debug { e.inspect }
|
|
30
|
+
Dalli.logger.debug { 'retrying pipelined deletes because of network error' }
|
|
31
|
+
retry
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
private
|
|
35
|
+
|
|
36
|
+
def setup_requests(keys)
|
|
37
|
+
groups = groups_for_keys(keys)
|
|
38
|
+
make_delete_requests(groups)
|
|
39
|
+
groups.keys
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
##
|
|
43
|
+
# Loop through the server-grouped sets of keys, writing
|
|
44
|
+
# the corresponding quiet delete requests to the appropriate servers
|
|
45
|
+
##
|
|
46
|
+
def make_delete_requests(groups)
|
|
47
|
+
groups.each do |server, keys_for_server|
|
|
48
|
+
keys_for_server.each do |key|
|
|
49
|
+
server.request(:pipelined_delete, key)
|
|
50
|
+
rescue DalliError, NetworkError => e
|
|
51
|
+
Dalli.logger.debug { e.inspect }
|
|
52
|
+
Dalli.logger.debug { "unable to delete key #{key} for server #{server.name}" }
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
##
|
|
58
|
+
# Sends noop to each server to flush responses and ensure all deletes complete.
|
|
59
|
+
##
|
|
60
|
+
def finish_requests(servers)
|
|
61
|
+
servers.each do |server|
|
|
62
|
+
server.request(:noop)
|
|
63
|
+
rescue DalliError, NetworkError => e
|
|
64
|
+
Dalli.logger.debug { e.inspect }
|
|
65
|
+
Dalli.logger.debug { "unable to complete pipelined delete on server #{server.name}" }
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def groups_for_keys(keys)
|
|
70
|
+
validated_keys = keys.map { |k| @key_manager.validate_key(k.to_s) }
|
|
71
|
+
groups = @ring.keys_grouped_by_server(validated_keys)
|
|
72
|
+
|
|
73
|
+
if (unfound_keys = groups.delete(nil))
|
|
74
|
+
Dalli.logger.debug do
|
|
75
|
+
"unable to delete #{unfound_keys.length} keys because no matching server was found"
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
groups
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
@@ -1,10 +1,19 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
+
require 'set'
|
|
4
|
+
|
|
3
5
|
module Dalli
|
|
4
6
|
##
|
|
5
7
|
# Contains logic for the pipelined gets implemented by the client.
|
|
6
8
|
##
|
|
7
9
|
class PipelinedGetter
|
|
10
|
+
# For large batches, interleave sends with response draining to prevent
|
|
11
|
+
# socket buffer deadlock. Only kicks in above this threshold.
|
|
12
|
+
INTERLEAVE_THRESHOLD = 10_000
|
|
13
|
+
|
|
14
|
+
# Number of keys to send before draining responses during interleaved mode
|
|
15
|
+
CHUNK_SIZE = 10_000
|
|
16
|
+
|
|
8
17
|
def initialize(ring, key_manager)
|
|
9
18
|
@ring = ring
|
|
10
19
|
@key_manager = key_manager
|
|
@@ -17,16 +26,31 @@ module Dalli
|
|
|
17
26
|
return {} if keys.empty?
|
|
18
27
|
|
|
19
28
|
@ring.lock do
|
|
29
|
+
# Stores partial results collected during interleaved send phase
|
|
30
|
+
@partial_results = {}
|
|
20
31
|
servers = setup_requests(keys)
|
|
21
|
-
start_time =
|
|
32
|
+
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
33
|
+
|
|
34
|
+
# First yield any partial results collected during interleaved send
|
|
35
|
+
yield_partial_results(&block)
|
|
36
|
+
|
|
22
37
|
servers = fetch_responses(servers, start_time, @ring.socket_timeout, &block) until servers.empty?
|
|
23
38
|
end
|
|
24
|
-
rescue
|
|
39
|
+
rescue Dalli::RetryableNetworkError => e
|
|
25
40
|
Dalli.logger.debug { e.inspect }
|
|
26
41
|
Dalli.logger.debug { 'retrying pipelined gets because of timeout' }
|
|
27
42
|
retry
|
|
28
43
|
end
|
|
29
44
|
|
|
45
|
+
private
|
|
46
|
+
|
|
47
|
+
def yield_partial_results
|
|
48
|
+
@partial_results.each_pair do |key, value_list|
|
|
49
|
+
yield @key_manager.key_without_namespace(key), value_list
|
|
50
|
+
end
|
|
51
|
+
@partial_results.clear
|
|
52
|
+
end
|
|
53
|
+
|
|
30
54
|
def setup_requests(keys)
|
|
31
55
|
groups = groups_for_keys(keys)
|
|
32
56
|
make_getkq_requests(groups)
|
|
@@ -45,7 +69,14 @@ module Dalli
|
|
|
45
69
|
##
|
|
46
70
|
def make_getkq_requests(groups)
|
|
47
71
|
groups.each do |server, keys_for_server|
|
|
48
|
-
|
|
72
|
+
if keys_for_server.size <= INTERLEAVE_THRESHOLD
|
|
73
|
+
# Small batch - send all at once (existing behavior)
|
|
74
|
+
server.request(:pipelined_get, keys_for_server)
|
|
75
|
+
else
|
|
76
|
+
# Large batch - interleave sends with response draining
|
|
77
|
+
# Pass @partial_results directly to avoid hash allocation/merge overhead
|
|
78
|
+
server.request(:pipelined_get_interleaved, keys_for_server, CHUNK_SIZE, @partial_results)
|
|
79
|
+
end
|
|
49
80
|
rescue DalliError, NetworkError => e
|
|
50
81
|
Dalli.logger.debug { e.inspect }
|
|
51
82
|
Dalli.logger.debug { "unable to get keys for server #{server.name}" }
|
|
@@ -57,7 +88,7 @@ module Dalli
|
|
|
57
88
|
# our set, sending the noop to terminate the set of queries.
|
|
58
89
|
##
|
|
59
90
|
def finish_queries(servers)
|
|
60
|
-
deleted =
|
|
91
|
+
deleted = Set.new
|
|
61
92
|
|
|
62
93
|
servers.each do |server|
|
|
63
94
|
next unless server.connected?
|
|
@@ -67,7 +98,7 @@ module Dalli
|
|
|
67
98
|
rescue Dalli::NetworkError
|
|
68
99
|
raise
|
|
69
100
|
rescue Dalli::DalliError
|
|
70
|
-
deleted
|
|
101
|
+
deleted << server
|
|
71
102
|
end
|
|
72
103
|
end
|
|
73
104
|
|
|
@@ -94,7 +125,7 @@ module Dalli
|
|
|
94
125
|
|
|
95
126
|
def fetch_responses(servers, start_time, timeout, &block)
|
|
96
127
|
# Remove any servers which are not connected
|
|
97
|
-
servers.
|
|
128
|
+
servers.select!(&:connected?)
|
|
98
129
|
return [] if servers.empty?
|
|
99
130
|
|
|
100
131
|
time_left = remaining_time(start_time, timeout)
|
|
@@ -112,13 +143,13 @@ module Dalli
|
|
|
112
143
|
servers
|
|
113
144
|
rescue NetworkError
|
|
114
145
|
# Abort and raise if we encountered a network error. This triggers
|
|
115
|
-
# a retry at the top level.
|
|
146
|
+
# a retry at the top level on RetryableNetworkError.
|
|
116
147
|
abort_without_timeout(servers)
|
|
117
148
|
raise
|
|
118
149
|
end
|
|
119
150
|
|
|
120
151
|
def remaining_time(start, timeout)
|
|
121
|
-
elapsed =
|
|
152
|
+
elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
|
|
122
153
|
return 0 if elapsed > timeout
|
|
123
154
|
|
|
124
155
|
timeout - elapsed
|
|
@@ -137,8 +168,8 @@ module Dalli
|
|
|
137
168
|
# Processes responses from a server. Returns true if there are no
|
|
138
169
|
# additional responses from this server.
|
|
139
170
|
def process_server(server)
|
|
140
|
-
server.pipeline_next_responses
|
|
141
|
-
yield @key_manager.key_without_namespace(key),
|
|
171
|
+
server.pipeline_next_responses do |key, value, cas|
|
|
172
|
+
yield @key_manager.key_without_namespace(key), [value, cas]
|
|
142
173
|
end
|
|
143
174
|
|
|
144
175
|
server.pipeline_complete?
|
|
@@ -147,18 +178,13 @@ module Dalli
|
|
|
147
178
|
def servers_with_response(servers, timeout)
|
|
148
179
|
return [] if servers.empty?
|
|
149
180
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
# My suspicion is that we may want to try and push this down into the
|
|
153
|
-
# individual servers, but I'm not sure. For now, we keep the
|
|
154
|
-
# mapping between the alerted object (the socket) and the
|
|
155
|
-
# corrresponding server here.
|
|
156
|
-
server_map = servers.each_with_object({}) { |s, h| h[s.sock] = s }
|
|
157
|
-
|
|
158
|
-
readable, = IO.select(server_map.keys, nil, nil, timeout)
|
|
181
|
+
sockets = servers.map(&:sock)
|
|
182
|
+
readable, = IO.select(sockets, nil, nil, timeout)
|
|
159
183
|
return [] if readable.nil?
|
|
160
184
|
|
|
161
|
-
|
|
185
|
+
# For typical server counts (1-5), linear scan is faster than
|
|
186
|
+
# building and looking up a hash map
|
|
187
|
+
readable.filter_map { |sock| servers.find { |s| s.sock == sock } }
|
|
162
188
|
end
|
|
163
189
|
|
|
164
190
|
def groups_for_keys(*keys)
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Dalli
|
|
4
|
+
##
|
|
5
|
+
# Contains logic for the pipelined set operations implemented by the client.
|
|
6
|
+
# Efficiently writes multiple key-value pairs by grouping requests by server
|
|
7
|
+
# and using quiet mode to minimize round trips.
|
|
8
|
+
##
|
|
9
|
+
class PipelinedSetter
|
|
10
|
+
def initialize(ring, key_manager)
|
|
11
|
+
@ring = ring
|
|
12
|
+
@key_manager = key_manager
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
##
|
|
16
|
+
# Writes multiple key-value pairs to memcached.
|
|
17
|
+
# Raises an error if any server is unavailable.
|
|
18
|
+
#
|
|
19
|
+
# @param hash [Hash] key-value pairs to set
|
|
20
|
+
# @param ttl [Integer] time-to-live in seconds
|
|
21
|
+
# @param req_options [Hash] options passed to each set operation
|
|
22
|
+
# @return [void]
|
|
23
|
+
##
|
|
24
|
+
def process(hash, ttl, req_options)
|
|
25
|
+
return if hash.empty?
|
|
26
|
+
|
|
27
|
+
@ring.lock do
|
|
28
|
+
servers = setup_requests(hash, ttl, req_options)
|
|
29
|
+
finish_requests(servers)
|
|
30
|
+
end
|
|
31
|
+
rescue Dalli::RetryableNetworkError => e
|
|
32
|
+
Dalli.logger.debug { e.inspect }
|
|
33
|
+
Dalli.logger.debug { 'retrying pipelined sets because of network error' }
|
|
34
|
+
retry
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
private
|
|
38
|
+
|
|
39
|
+
def setup_requests(hash, ttl, req_options)
|
|
40
|
+
groups = groups_for_keys(hash.keys)
|
|
41
|
+
make_set_requests(groups, hash, ttl, req_options)
|
|
42
|
+
groups.keys
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
##
|
|
46
|
+
# Loop through the server-grouped sets of keys, writing
|
|
47
|
+
# the corresponding quiet set requests to the appropriate servers
|
|
48
|
+
##
|
|
49
|
+
def make_set_requests(groups, hash, ttl, req_options)
|
|
50
|
+
groups.each do |server, keys_for_server|
|
|
51
|
+
keys_for_server.each do |key|
|
|
52
|
+
original_key = @key_manager.key_without_namespace(key)
|
|
53
|
+
value = hash[original_key]
|
|
54
|
+
server.request(:pipelined_set, key, value, ttl, req_options)
|
|
55
|
+
rescue DalliError, NetworkError => e
|
|
56
|
+
Dalli.logger.debug { e.inspect }
|
|
57
|
+
Dalli.logger.debug { "unable to set key #{key} for server #{server.name}" }
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
##
|
|
63
|
+
# Sends noop to each server to flush responses and ensure all writes complete.
|
|
64
|
+
##
|
|
65
|
+
def finish_requests(servers)
|
|
66
|
+
servers.each do |server|
|
|
67
|
+
server.request(:noop)
|
|
68
|
+
rescue DalliError, NetworkError => e
|
|
69
|
+
Dalli.logger.debug { e.inspect }
|
|
70
|
+
Dalli.logger.debug { "unable to complete pipelined set on server #{server.name}" }
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
def groups_for_keys(keys)
|
|
75
|
+
validated_keys = keys.map { |k| @key_manager.validate_key(k.to_s) }
|
|
76
|
+
groups = @ring.keys_grouped_by_server(validated_keys)
|
|
77
|
+
|
|
78
|
+
if (unfound_keys = groups.delete(nil))
|
|
79
|
+
Dalli.logger.debug do
|
|
80
|
+
"unable to set #{unfound_keys.length} keys because no matching server was found"
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
groups
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
data/lib/dalli/protocol/base.rb
CHANGED
|
@@ -23,10 +23,17 @@ module Dalli
|
|
|
23
23
|
def initialize(attribs, client_options = {})
|
|
24
24
|
hostname, port, socket_type, @weight, user_creds = ServerConfigParser.parse(attribs)
|
|
25
25
|
@options = client_options.merge(user_creds)
|
|
26
|
-
@
|
|
26
|
+
@raw_mode = client_options[:raw]
|
|
27
|
+
@value_marshaller = @raw_mode ? StringMarshaller.new(@options) : ValueMarshaller.new(@options)
|
|
27
28
|
@connection_manager = ConnectionManager.new(hostname, port, socket_type, @options)
|
|
28
29
|
end
|
|
29
30
|
|
|
31
|
+
# Returns true if client is in raw mode (no serialization/compression).
|
|
32
|
+
# In raw mode, we can skip requesting bitflags from the server.
|
|
33
|
+
def raw_mode?
|
|
34
|
+
@raw_mode
|
|
35
|
+
end
|
|
36
|
+
|
|
30
37
|
# Chokepoint method for error handling and ensuring liveness
|
|
31
38
|
def request(opkey, *args)
|
|
32
39
|
verify_state(opkey)
|
|
@@ -35,8 +42,8 @@ module Dalli
|
|
|
35
42
|
@connection_manager.start_request!
|
|
36
43
|
response = send(opkey, *args)
|
|
37
44
|
|
|
38
|
-
# pipelined_get emit query but
|
|
39
|
-
@connection_manager.finish_request! unless opkey
|
|
45
|
+
# pipelined_get/pipelined_get_interleaved emit query but don't read the response(s)
|
|
46
|
+
@connection_manager.finish_request! unless %i[pipelined_get pipelined_get_interleaved].include?(opkey)
|
|
40
47
|
|
|
41
48
|
response
|
|
42
49
|
rescue Dalli::MarshalError => e
|
|
@@ -74,7 +81,9 @@ module Dalli
|
|
|
74
81
|
def pipeline_response_setup
|
|
75
82
|
verify_pipelined_state(:getkq)
|
|
76
83
|
write_noop
|
|
77
|
-
|
|
84
|
+
# Use ensure_ready instead of reset to preserve any data already buffered
|
|
85
|
+
# during interleaved pipelined get draining
|
|
86
|
+
response_buffer.ensure_ready
|
|
78
87
|
end
|
|
79
88
|
|
|
80
89
|
# Attempt to receive and parse as many key/value pairs as possible
|
|
@@ -82,10 +91,13 @@ module Dalli
|
|
|
82
91
|
# repeatedly whenever this server's socket is readable until
|
|
83
92
|
# #pipeline_complete?.
|
|
84
93
|
#
|
|
85
|
-
#
|
|
86
|
-
|
|
94
|
+
# When a block is given, yields (key, value, cas) for each response,
|
|
95
|
+
# avoiding intermediate Hash allocation. Returns nil.
|
|
96
|
+
# Without a block, returns a Hash of { key => [value, cas] }.
|
|
97
|
+
# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
98
|
+
def pipeline_next_responses(&block)
|
|
87
99
|
reconnect_on_pipeline_complete!
|
|
88
|
-
values =
|
|
100
|
+
values = nil
|
|
89
101
|
|
|
90
102
|
response_buffer.read
|
|
91
103
|
|
|
@@ -99,16 +111,24 @@ module Dalli
|
|
|
99
111
|
|
|
100
112
|
# If the status is ok and the key is not nil, then this is a
|
|
101
113
|
# getkq response with a value that we want to set in the response hash
|
|
102
|
-
|
|
114
|
+
unless key.nil?
|
|
115
|
+
if block
|
|
116
|
+
yield key, value, cas
|
|
117
|
+
else
|
|
118
|
+
values ||= {}
|
|
119
|
+
values[key] = [value, cas]
|
|
120
|
+
end
|
|
121
|
+
end
|
|
103
122
|
|
|
104
123
|
# Get the next response from the buffer
|
|
105
124
|
status, cas, key, value = response_buffer.process_single_getk_response
|
|
106
125
|
end
|
|
107
126
|
|
|
108
|
-
values
|
|
109
|
-
rescue SystemCallError, *TIMEOUT_ERRORS, EOFError => e
|
|
127
|
+
values || {}
|
|
128
|
+
rescue SystemCallError, *TIMEOUT_ERRORS, *SSL_ERRORS, EOFError => e
|
|
110
129
|
@connection_manager.error_on_request!(e)
|
|
111
130
|
end
|
|
131
|
+
# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity
|
|
112
132
|
|
|
113
133
|
# Abort current pipelined get. Generally used to signal an external
|
|
114
134
|
# timeout during pipelined get. The underlying socket is
|
|
@@ -154,6 +174,8 @@ module Dalli
|
|
|
154
174
|
private
|
|
155
175
|
|
|
156
176
|
ALLOWED_QUIET_OPS = %i[add replace set delete incr decr append prepend flush noop].freeze
|
|
177
|
+
private_constant :ALLOWED_QUIET_OPS
|
|
178
|
+
|
|
157
179
|
def verify_allowed_quiet!(opkey)
|
|
158
180
|
return if ALLOWED_QUIET_OPS.include?(opkey)
|
|
159
181
|
|
|
@@ -211,6 +233,11 @@ module Dalli
|
|
|
211
233
|
end
|
|
212
234
|
|
|
213
235
|
def pipelined_get(keys)
|
|
236
|
+
# Clear buffer to remove any stale data from interrupted operations.
|
|
237
|
+
# Use clear (not reset) to keep pipeline_complete? = true, which is
|
|
238
|
+
# the expected state before pipeline_response_setup is called.
|
|
239
|
+
response_buffer.clear
|
|
240
|
+
|
|
214
241
|
req = +''
|
|
215
242
|
keys.each do |key|
|
|
216
243
|
req << quiet_get_request(key)
|
|
@@ -219,6 +246,51 @@ module Dalli
|
|
|
219
246
|
write(req)
|
|
220
247
|
end
|
|
221
248
|
|
|
249
|
+
# For large batches, interleave writing requests with draining responses.
|
|
250
|
+
# This prevents socket buffer deadlock when sending many keys.
|
|
251
|
+
# Populates the provided results hash with any responses drained during send.
|
|
252
|
+
def pipelined_get_interleaved(keys, chunk_size, results)
|
|
253
|
+
# Initialize the response buffer for draining during send phase
|
|
254
|
+
response_buffer.ensure_ready
|
|
255
|
+
|
|
256
|
+
keys.each_slice(chunk_size) do |chunk|
|
|
257
|
+
# Build and write this chunk of requests
|
|
258
|
+
req = +''
|
|
259
|
+
chunk.each do |key|
|
|
260
|
+
req << quiet_get_request(key)
|
|
261
|
+
end
|
|
262
|
+
write(req)
|
|
263
|
+
@connection_manager.flush
|
|
264
|
+
|
|
265
|
+
# Drain any available responses directly into results hash
|
|
266
|
+
drain_pipeline_responses(results)
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
# Non-blocking read and processing of any available pipeline responses.
|
|
271
|
+
# Used during interleaved pipelined gets to prevent buffer deadlock.
|
|
272
|
+
# Populates the provided results hash directly to avoid allocation overhead.
|
|
273
|
+
def drain_pipeline_responses(results)
|
|
274
|
+
return unless connected?
|
|
275
|
+
|
|
276
|
+
# Non-blocking check if socket has data available
|
|
277
|
+
return unless sock.wait_readable(0)
|
|
278
|
+
|
|
279
|
+
# Read available data without blocking
|
|
280
|
+
response_buffer.read
|
|
281
|
+
|
|
282
|
+
# Process any complete responses in the buffer
|
|
283
|
+
loop do
|
|
284
|
+
status, cas, key, value = response_buffer.process_single_getk_response
|
|
285
|
+
break if status.nil? # No complete response available
|
|
286
|
+
|
|
287
|
+
results[key] = [value, cas] unless key.nil?
|
|
288
|
+
end
|
|
289
|
+
rescue SystemCallError, Dalli::NetworkError
|
|
290
|
+
# Ignore errors during drain - they'll be handled in fetch_responses
|
|
291
|
+
nil
|
|
292
|
+
end
|
|
293
|
+
|
|
222
294
|
def response_buffer
|
|
223
295
|
@response_buffer ||= ResponseBuffer.new(@connection_manager, response_processor)
|
|
224
296
|
end
|