dalli 2.0.1 → 3.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +671 -0
  3. data/Gemfile +15 -3
  4. data/LICENSE +1 -1
  5. data/README.md +33 -148
  6. data/lib/dalli/cas/client.rb +3 -0
  7. data/lib/dalli/client.rb +293 -131
  8. data/lib/dalli/compressor.rb +40 -0
  9. data/lib/dalli/key_manager.rb +121 -0
  10. data/lib/dalli/options.rb +22 -4
  11. data/lib/dalli/pid_cache.rb +40 -0
  12. data/lib/dalli/pipelined_getter.rb +177 -0
  13. data/lib/dalli/protocol/base.rb +250 -0
  14. data/lib/dalli/protocol/binary/request_formatter.rb +117 -0
  15. data/lib/dalli/protocol/binary/response_header.rb +36 -0
  16. data/lib/dalli/protocol/binary/response_processor.rb +239 -0
  17. data/lib/dalli/protocol/binary/sasl_authentication.rb +60 -0
  18. data/lib/dalli/protocol/binary.rb +173 -0
  19. data/lib/dalli/protocol/connection_manager.rb +255 -0
  20. data/lib/dalli/protocol/meta/key_regularizer.rb +31 -0
  21. data/lib/dalli/protocol/meta/request_formatter.rb +121 -0
  22. data/lib/dalli/protocol/meta/response_processor.rb +211 -0
  23. data/lib/dalli/protocol/meta.rb +178 -0
  24. data/lib/dalli/protocol/response_buffer.rb +54 -0
  25. data/lib/dalli/protocol/server_config_parser.rb +86 -0
  26. data/lib/dalli/protocol/ttl_sanitizer.rb +45 -0
  27. data/lib/dalli/protocol/value_compressor.rb +85 -0
  28. data/lib/dalli/protocol/value_marshaller.rb +59 -0
  29. data/lib/dalli/protocol/value_serializer.rb +91 -0
  30. data/lib/dalli/protocol.rb +19 -0
  31. data/lib/dalli/ring.rb +98 -50
  32. data/lib/dalli/server.rb +4 -524
  33. data/lib/dalli/servers_arg_normalizer.rb +54 -0
  34. data/lib/dalli/socket.rb +154 -53
  35. data/lib/dalli/version.rb +5 -1
  36. data/lib/dalli.rb +49 -13
  37. data/lib/rack/session/dalli.rb +169 -26
  38. metadata +53 -88
  39. data/History.md +0 -262
  40. data/Performance.md +0 -42
  41. data/Rakefile +0 -39
  42. data/dalli.gemspec +0 -28
  43. data/lib/action_dispatch/middleware/session/dalli_store.rb +0 -76
  44. data/lib/active_support/cache/dalli_store.rb +0 -203
  45. data/test/abstract_unit.rb +0 -281
  46. data/test/benchmark_test.rb +0 -187
  47. data/test/helper.rb +0 -41
  48. data/test/memcached_mock.rb +0 -113
  49. data/test/test_active_support.rb +0 -163
  50. data/test/test_dalli.rb +0 -461
  51. data/test/test_encoding.rb +0 -43
  52. data/test/test_failover.rb +0 -107
  53. data/test/test_network.rb +0 -54
  54. data/test/test_ring.rb +0 -85
  55. data/test/test_sasl.rb +0 -83
  56. data/test/test_session_store.rb +0 -224
@@ -0,0 +1,121 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'digest/md5'
4
+
5
+ module Dalli
6
+ ##
7
+ # This class manages and validates keys sent to Memcached, ensuring
8
+ # that they meet Memcached key length requirements, and supporting
9
+ # the implementation of optional namespaces on a per-Dalli client
10
+ # basis.
11
+ ##
12
+ class KeyManager
13
+ MAX_KEY_LENGTH = 250
14
+
15
+ NAMESPACE_SEPARATOR = ':'
16
+
17
+ # This is a hard coded md5 for historical reasons
18
+ TRUNCATED_KEY_SEPARATOR = ':md5:'
19
+
20
+ # This is 249 for historical reasons
21
+ TRUNCATED_KEY_TARGET_SIZE = 249
22
+
23
+ DEFAULTS = {
24
+ digest_class: ::Digest::MD5
25
+ }.freeze
26
+
27
+ OPTIONS = %i[digest_class namespace].freeze
28
+
29
+ attr_reader :namespace
30
+
31
+ def initialize(client_options)
32
+ @key_options =
33
+ DEFAULTS.merge(client_options.select { |k, _| OPTIONS.include?(k) })
34
+ validate_digest_class_option(@key_options)
35
+
36
+ @namespace = namespace_from_options
37
+ end
38
+
39
+ ##
40
+ # Validates the key, and transforms as needed.
41
+ #
42
+ # If the key is nil or empty, raises ArgumentError. Whitespace
43
+ # characters are allowed for historical reasons, but likely shouldn't
44
+ # be used.
45
+ # If the key (with namespace) is shorter than the memcached maximum
46
+ # allowed key length, just returns the argument key
47
+ # Otherwise computes a "truncated" key that uses a truncated prefix
48
+ # combined with a 32-byte hex digest of the whole key.
49
+ ##
50
+ def validate_key(key)
51
+ raise ArgumentError, 'key cannot be blank' unless key&.length&.positive?
52
+
53
+ key = key_with_namespace(key)
54
+ key.length > MAX_KEY_LENGTH ? truncated_key(key) : key
55
+ end
56
+
57
+ ##
58
+ # Returns the key with the namespace prefixed, if a namespace is
59
+ # defined. Otherwise just returns the key
60
+ ##
61
+ def key_with_namespace(key)
62
+ return key if namespace.nil?
63
+
64
+ "#{evaluate_namespace}#{NAMESPACE_SEPARATOR}#{key}"
65
+ end
66
+
67
+ def key_without_namespace(key)
68
+ return key if namespace.nil?
69
+
70
+ key.sub(namespace_regexp, '')
71
+ end
72
+
73
+ def digest_class
74
+ @digest_class ||= @key_options[:digest_class]
75
+ end
76
+
77
+ def namespace_regexp
78
+ return /\A#{Regexp.escape(evaluate_namespace)}:/ if namespace.is_a?(Proc)
79
+
80
+ @namespace_regexp ||= /\A#{Regexp.escape(namespace)}:/.freeze unless namespace.nil?
81
+ end
82
+
83
+ def validate_digest_class_option(opts)
84
+ return if opts[:digest_class].respond_to?(:hexdigest)
85
+
86
+ raise ArgumentError, 'The digest_class object must respond to the hexdigest method'
87
+ end
88
+
89
+ def namespace_from_options
90
+ raw_namespace = @key_options[:namespace]
91
+ return nil unless raw_namespace
92
+ return raw_namespace.to_s unless raw_namespace.is_a?(Proc)
93
+
94
+ raw_namespace
95
+ end
96
+
97
+ def evaluate_namespace
98
+ return namespace.call.to_s if namespace.is_a?(Proc)
99
+
100
+ namespace
101
+ end
102
+
103
+ ##
104
+ # Produces a truncated key, if the raw key is longer than the maximum allowed
105
+ # length. The truncated key is produced by generating a hex digest
106
+ # of the key, and appending that to a truncated section of the key.
107
+ ##
108
+ def truncated_key(key)
109
+ digest = digest_class.hexdigest(key)
110
+ "#{key[0, prefix_length(digest)]}#{TRUNCATED_KEY_SEPARATOR}#{digest}"
111
+ end
112
+
113
+ def prefix_length(digest)
114
+ return TRUNCATED_KEY_TARGET_SIZE - (TRUNCATED_KEY_SEPARATOR.length + digest.length) if namespace.nil?
115
+
116
+ # For historical reasons, truncated keys with namespaces had a length of 250 rather
117
+ # than 249
118
+ TRUNCATED_KEY_TARGET_SIZE + 1 - (TRUNCATED_KEY_SEPARATOR.length + digest.length)
119
+ end
120
+ end
121
+ end
data/lib/dalli/options.rb CHANGED
@@ -1,19 +1,19 @@
1
- require 'thread'
1
+ # frozen_string_literal: true
2
+
2
3
  require 'monitor'
3
4
 
4
5
  module Dalli
5
-
6
6
  # Make Dalli threadsafe by using a lock around all
7
7
  # public server methods.
8
8
  #
9
- # Dalli::Server.extend(Dalli::Threadsafe)
9
+ # Dalli::Protocol::Binary.extend(Dalli::Threadsafe)
10
10
  #
11
11
  module Threadsafe
12
12
  def self.extended(obj)
13
13
  obj.init_threadsafe
14
14
  end
15
15
 
16
- def request(op, *args)
16
+ def request(opcode, *args)
17
17
  @lock.synchronize do
18
18
  super
19
19
  end
@@ -31,6 +31,24 @@ module Dalli
31
31
  end
32
32
  end
33
33
 
34
+ def pipeline_response_setup
35
+ @lock.synchronize do
36
+ super
37
+ end
38
+ end
39
+
40
+ def pipeline_next_responses
41
+ @lock.synchronize do
42
+ super
43
+ end
44
+ end
45
+
46
+ def pipeline_abort
47
+ @lock.synchronize do
48
+ super
49
+ end
50
+ end
51
+
34
52
  def lock!
35
53
  @lock.mon_enter
36
54
  end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Dalli
4
+ ##
5
+ # Dalli::PIDCache is a wrapper class for PID checking to avoid system calls when checking the PID.
6
+ ##
7
+ module PIDCache
8
+ if !Process.respond_to?(:fork) # JRuby or TruffleRuby
9
+ @pid = Process.pid
10
+ singleton_class.attr_reader(:pid)
11
+ elsif Process.respond_to?(:_fork) # Ruby 3.1+
12
+ class << self
13
+ attr_reader :pid
14
+
15
+ def update!
16
+ @pid = Process.pid
17
+ end
18
+ end
19
+ update!
20
+
21
+ ##
22
+ # Dalli::PIDCache::CoreExt hooks into Process to be able to reset the PID cache after fork
23
+ ##
24
+ module CoreExt
25
+ def _fork
26
+ child_pid = super
27
+ PIDCache.update! if child_pid.zero?
28
+ child_pid
29
+ end
30
+ end
31
+ Process.singleton_class.prepend(CoreExt)
32
+ else # Ruby 3.0 or older
33
+ class << self
34
+ def pid
35
+ Process.pid
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,177 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Dalli
4
+ ##
5
+ # Contains logic for the pipelined gets implemented by the client.
6
+ ##
7
+ class PipelinedGetter
8
+ def initialize(ring, key_manager)
9
+ @ring = ring
10
+ @key_manager = key_manager
11
+ end
12
+
13
+ ##
14
+ # Yields, one at a time, keys and their values+attributes.
15
+ #
16
+ def process(keys, &block)
17
+ return {} if keys.empty?
18
+
19
+ @ring.lock do
20
+ servers = setup_requests(keys)
21
+ start_time = Time.now
22
+ servers = fetch_responses(servers, start_time, @ring.socket_timeout, &block) until servers.empty?
23
+ end
24
+ rescue NetworkError => e
25
+ Dalli.logger.debug { e.inspect }
26
+ Dalli.logger.debug { 'retrying pipelined gets because of timeout' }
27
+ retry
28
+ end
29
+
30
+ def setup_requests(keys)
31
+ groups = groups_for_keys(keys)
32
+ make_getkq_requests(groups)
33
+
34
+ # TODO: How does this exit on a NetworkError
35
+ finish_queries(groups.keys)
36
+ end
37
+
38
+ ##
39
+ # Loop through the server-grouped sets of keys, writing
40
+ # the corresponding getkq requests to the appropriate servers
41
+ #
42
+ # It's worth noting that we could potentially reduce bytes
43
+ # on the wire by switching from getkq to getq, and using
44
+ # the opaque value to match requests to responses.
45
+ ##
46
+ def make_getkq_requests(groups)
47
+ groups.each do |server, keys_for_server|
48
+ server.request(:pipelined_get, keys_for_server)
49
+ rescue DalliError, NetworkError => e
50
+ Dalli.logger.debug { e.inspect }
51
+ Dalli.logger.debug { "unable to get keys for server #{server.name}" }
52
+ end
53
+ end
54
+
55
+ ##
56
+ # This loops through the servers that have keys in
57
+ # our set, sending the noop to terminate the set of queries.
58
+ ##
59
+ def finish_queries(servers)
60
+ deleted = []
61
+
62
+ servers.each do |server|
63
+ next unless server.connected?
64
+
65
+ begin
66
+ finish_query_for_server(server)
67
+ rescue Dalli::NetworkError
68
+ raise
69
+ rescue Dalli::DalliError
70
+ deleted.append(server)
71
+ end
72
+ end
73
+
74
+ servers.delete_if { |server| deleted.include?(server) }
75
+ rescue Dalli::NetworkError
76
+ abort_without_timeout(servers)
77
+ raise
78
+ end
79
+
80
+ def finish_query_for_server(server)
81
+ server.pipeline_response_setup
82
+ rescue Dalli::NetworkError
83
+ raise
84
+ rescue Dalli::DalliError => e
85
+ Dalli.logger.debug { e.inspect }
86
+ Dalli.logger.debug { "Results from server: #{server.name} will be missing from the results" }
87
+ raise
88
+ end
89
+
90
+ # Swallows Dalli::NetworkError
91
+ def abort_without_timeout(servers)
92
+ servers.each(&:pipeline_abort)
93
+ end
94
+
95
+ def fetch_responses(servers, start_time, timeout, &block)
96
+ # Remove any servers which are not connected
97
+ servers.delete_if { |s| !s.connected? }
98
+ return [] if servers.empty?
99
+
100
+ time_left = remaining_time(start_time, timeout)
101
+ readable_servers = servers_with_response(servers, time_left)
102
+ if readable_servers.empty?
103
+ abort_with_timeout(servers)
104
+ return []
105
+ end
106
+
107
+ # Loop through the servers with responses, and
108
+ # delete any from our list that are finished
109
+ readable_servers.each do |server|
110
+ servers.delete(server) if process_server(server, &block)
111
+ end
112
+ servers
113
+ rescue NetworkError
114
+ # Abort and raise if we encountered a network error. This triggers
115
+ # a retry at the top level.
116
+ abort_without_timeout(servers)
117
+ raise
118
+ end
119
+
120
+ def remaining_time(start, timeout)
121
+ elapsed = Time.now - start
122
+ return 0 if elapsed > timeout
123
+
124
+ timeout - elapsed
125
+ end
126
+
127
+ # Swallows Dalli::NetworkError
128
+ def abort_with_timeout(servers)
129
+ abort_without_timeout(servers)
130
+ servers.each do |server|
131
+ Dalli.logger.debug { "memcached at #{server.name} did not response within timeout" }
132
+ end
133
+
134
+ true # Required to simplify caller
135
+ end
136
+
137
+ # Processes responses from a server. Returns true if there are no
138
+ # additional responses from this server.
139
+ def process_server(server)
140
+ server.pipeline_next_responses.each_pair do |key, value_list|
141
+ yield @key_manager.key_without_namespace(key), value_list
142
+ end
143
+
144
+ server.pipeline_complete?
145
+ end
146
+
147
+ def servers_with_response(servers, timeout)
148
+ return [] if servers.empty?
149
+
150
+ # TODO: - This is a bit challenging. Essentially the PipelinedGetter
151
+ # is a reactor, but without the benefit of a Fiber or separate thread.
152
+ # My suspicion is that we may want to try and push this down into the
153
+ # individual servers, but I'm not sure. For now, we keep the
154
+ # mapping between the alerted object (the socket) and the
155
+ # corrresponding server here.
156
+ server_map = servers.each_with_object({}) { |s, h| h[s.sock] = s }
157
+
158
+ readable, = IO.select(server_map.keys, nil, nil, timeout)
159
+ return [] if readable.nil?
160
+
161
+ readable.map { |sock| server_map[sock] }
162
+ end
163
+
164
+ def groups_for_keys(*keys)
165
+ keys.flatten!
166
+ keys.map! { |a| @key_manager.validate_key(a.to_s) }
167
+ groups = @ring.keys_grouped_by_server(keys)
168
+ if (unfound_keys = groups.delete(nil))
169
+ Dalli.logger.debug do
170
+ "unable to get keys for #{unfound_keys.length} keys " \
171
+ 'because no matching server was found'
172
+ end
173
+ end
174
+ groups
175
+ end
176
+ end
177
+ end
@@ -0,0 +1,250 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'forwardable'
4
+ require 'socket'
5
+ require 'timeout'
6
+
7
+ module Dalli
8
+ module Protocol
9
+ ##
10
+ # Base class for a single Memcached server, containing logic common to all
11
+ # protocols. Contains logic for managing connection state to the server and value
12
+ # handling.
13
+ ##
14
+ class Base
15
+ extend Forwardable
16
+
17
+ attr_accessor :weight, :options
18
+
19
+ def_delegators :@value_marshaller, :serializer, :compressor, :compression_min_size, :compress_by_default?
20
+ def_delegators :@connection_manager, :name, :sock, :hostname, :port, :close, :connected?, :socket_timeout,
21
+ :socket_type, :up!, :down!, :write, :reconnect_down_server?, :raise_down_error
22
+
23
+ def initialize(attribs, client_options = {})
24
+ hostname, port, socket_type, @weight, user_creds = ServerConfigParser.parse(attribs)
25
+ @options = client_options.merge(user_creds)
26
+ @value_marshaller = ValueMarshaller.new(@options)
27
+ @connection_manager = ConnectionManager.new(hostname, port, socket_type, @options)
28
+ end
29
+
30
+ # Chokepoint method for error handling and ensuring liveness
31
+ def request(opkey, *args)
32
+ verify_state(opkey)
33
+
34
+ begin
35
+ @connection_manager.start_request!
36
+ response = send(opkey, *args)
37
+
38
+ # pipelined_get emit query but doesn't read the response(s)
39
+ @connection_manager.finish_request! unless opkey == :pipelined_get
40
+
41
+ response
42
+ rescue Dalli::MarshalError => e
43
+ log_marshal_err(args.first, e)
44
+ raise
45
+ rescue Dalli::DalliError
46
+ raise
47
+ rescue StandardError => e
48
+ log_unexpected_err(e)
49
+ close
50
+ raise
51
+ end
52
+ end
53
+
54
+ ##
55
+ # Boolean method used by clients of this class to determine if this
56
+ # particular memcached instance is available for use.
57
+ def alive?
58
+ ensure_connected!
59
+ rescue Dalli::NetworkError
60
+ # ensure_connected! raises a NetworkError if connection fails. We
61
+ # want to capture that error and convert it to a boolean value here.
62
+ false
63
+ end
64
+
65
+ def lock!; end
66
+
67
+ def unlock!; end
68
+
69
+ # Start reading key/value pairs from this connection. This is usually called
70
+ # after a series of GETKQ commands. A NOOP is sent, and the server begins
71
+ # flushing responses for kv pairs that were found.
72
+ #
73
+ # Returns nothing.
74
+ def pipeline_response_setup
75
+ verify_pipelined_state(:getkq)
76
+ write_noop
77
+ response_buffer.reset
78
+ end
79
+
80
+ # Attempt to receive and parse as many key/value pairs as possible
81
+ # from this server. After #pipeline_response_setup, this should be invoked
82
+ # repeatedly whenever this server's socket is readable until
83
+ # #pipeline_complete?.
84
+ #
85
+ # Returns a Hash of kv pairs received.
86
+ def pipeline_next_responses
87
+ reconnect_on_pipeline_complete!
88
+ values = {}
89
+
90
+ response_buffer.read
91
+
92
+ status, cas, key, value = response_buffer.process_single_getk_response
93
+ # status is not nil only if we have a full response to parse
94
+ # in the buffer
95
+ until status.nil?
96
+ # If the status is ok and key is nil, then this is the response
97
+ # to the noop at the end of the pipeline
98
+ finish_pipeline && break if status && key.nil?
99
+
100
+ # If the status is ok and the key is not nil, then this is a
101
+ # getkq response with a value that we want to set in the response hash
102
+ values[key] = [value, cas] unless key.nil?
103
+
104
+ # Get the next response from the buffer
105
+ status, cas, key, value = response_buffer.process_single_getk_response
106
+ end
107
+
108
+ values
109
+ rescue SystemCallError, *TIMEOUT_ERRORS, EOFError => e
110
+ @connection_manager.error_on_request!(e)
111
+ end
112
+
113
+ # Abort current pipelined get. Generally used to signal an external
114
+ # timeout during pipelined get. The underlying socket is
115
+ # disconnected, and the exception is swallowed.
116
+ #
117
+ # Returns nothing.
118
+ def pipeline_abort
119
+ response_buffer.clear
120
+ @connection_manager.abort_request!
121
+ return true unless connected?
122
+
123
+ # Closes the connection, which ensures that our connection
124
+ # is in a clean state for future requests
125
+ @connection_manager.error_on_request!('External timeout')
126
+ rescue NetworkError
127
+ true
128
+ end
129
+
130
+ # Did the last call to #pipeline_response_setup complete successfully?
131
+ def pipeline_complete?
132
+ !response_buffer.in_progress?
133
+ end
134
+
135
+ def username
136
+ @options[:username] || ENV.fetch('MEMCACHE_USERNAME', nil)
137
+ end
138
+
139
+ def password
140
+ @options[:password] || ENV.fetch('MEMCACHE_PASSWORD', nil)
141
+ end
142
+
143
+ def require_auth?
144
+ !username.nil?
145
+ end
146
+
147
+ def quiet?
148
+ Thread.current[::Dalli::QUIET]
149
+ end
150
+ alias multi? quiet?
151
+
152
+ # NOTE: Additional public methods should be overridden in Dalli::Threadsafe
153
+
154
+ private
155
+
156
+ ALLOWED_QUIET_OPS = %i[add replace set delete incr decr append prepend flush noop].freeze
157
+ def verify_allowed_quiet!(opkey)
158
+ return if ALLOWED_QUIET_OPS.include?(opkey)
159
+
160
+ raise Dalli::NotPermittedMultiOpError, "The operation #{opkey} is not allowed in a quiet block."
161
+ end
162
+
163
+ ##
164
+ # Checks to see if we can execute the specified operation. Checks
165
+ # whether the connection is in use, and whether the command is allowed
166
+ ##
167
+ def verify_state(opkey)
168
+ @connection_manager.confirm_ready!
169
+ verify_allowed_quiet!(opkey) if quiet?
170
+
171
+ # The ensure_connected call has the side effect of connecting the
172
+ # underlying socket if it is not connected, or there's been a disconnect
173
+ # because of timeout or other error. Method raises an error
174
+ # if it can't connect
175
+ raise_down_error unless ensure_connected!
176
+ end
177
+
178
+ def verify_pipelined_state(_opkey)
179
+ @connection_manager.confirm_in_progress!
180
+ raise_down_error unless connected?
181
+ end
182
+
183
+ # The socket connection to the underlying server is initialized as a side
184
+ # effect of this call. In fact, this is the ONLY place where that
185
+ # socket connection is initialized.
186
+ #
187
+ # Both this method and connect need to be in this class so we can do auth
188
+ # as required
189
+ #
190
+ # Since this is invoked exclusively in verify_state!, we don't need to worry about
191
+ # thread safety. Using it elsewhere may require revisiting that assumption.
192
+ def ensure_connected!
193
+ return true if connected?
194
+ return false unless reconnect_down_server?
195
+
196
+ connect # This call needs to be in this class so we can do auth
197
+ connected?
198
+ end
199
+
200
+ def cache_nils?(opts)
201
+ return false unless opts.is_a?(Hash)
202
+
203
+ opts[:cache_nils] ? true : false
204
+ end
205
+
206
+ def connect
207
+ @connection_manager.establish_connection
208
+ authenticate_connection if require_auth?
209
+ @version = version # Connect socket if not authed
210
+ up!
211
+ end
212
+
213
+ def pipelined_get(keys)
214
+ req = +''
215
+ keys.each do |key|
216
+ req << quiet_get_request(key)
217
+ end
218
+ # Could send noop here instead of in pipeline_response_setup
219
+ write(req)
220
+ end
221
+
222
+ def response_buffer
223
+ @response_buffer ||= ResponseBuffer.new(@connection_manager, response_processor)
224
+ end
225
+
226
+ # Called after the noop response is received at the end of a set
227
+ # of pipelined gets
228
+ def finish_pipeline
229
+ response_buffer.clear
230
+ @connection_manager.finish_request!
231
+
232
+ true # to simplify response
233
+ end
234
+
235
+ def reconnect_on_pipeline_complete!
236
+ @connection_manager.reconnect! 'pipelined get has completed' if pipeline_complete?
237
+ end
238
+
239
+ def log_marshal_err(key, err)
240
+ Dalli.logger.error "Marshalling error for key '#{key}': #{err.message}"
241
+ Dalli.logger.error 'You are trying to cache a Ruby object which cannot be serialized to memcached.'
242
+ end
243
+
244
+ def log_unexpected_err(err)
245
+ Dalli.logger.error "Unexpected exception during Dalli request: #{err.class.name}: #{err.message}"
246
+ Dalli.logger.error err.backtrace.join("\n\t")
247
+ end
248
+ end
249
+ end
250
+ end