dalli 2.7.8 → 3.2.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of dalli might be problematic. Click here for more details.

Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +12 -1
  3. data/History.md +151 -0
  4. data/README.md +27 -223
  5. data/lib/dalli/cas/client.rb +1 -57
  6. data/lib/dalli/client.rb +227 -254
  7. data/lib/dalli/compressor.rb +12 -2
  8. data/lib/dalli/key_manager.rb +113 -0
  9. data/lib/dalli/options.rb +6 -7
  10. data/lib/dalli/pipelined_getter.rb +177 -0
  11. data/lib/dalli/protocol/base.rb +241 -0
  12. data/lib/dalli/protocol/binary/request_formatter.rb +117 -0
  13. data/lib/dalli/protocol/binary/response_header.rb +36 -0
  14. data/lib/dalli/protocol/binary/response_processor.rb +239 -0
  15. data/lib/dalli/protocol/binary/sasl_authentication.rb +60 -0
  16. data/lib/dalli/protocol/binary.rb +173 -0
  17. data/lib/dalli/protocol/connection_manager.rb +252 -0
  18. data/lib/dalli/protocol/meta/key_regularizer.rb +31 -0
  19. data/lib/dalli/protocol/meta/request_formatter.rb +108 -0
  20. data/lib/dalli/protocol/meta/response_processor.rb +211 -0
  21. data/lib/dalli/protocol/meta.rb +177 -0
  22. data/lib/dalli/protocol/response_buffer.rb +54 -0
  23. data/lib/dalli/protocol/server_config_parser.rb +84 -0
  24. data/lib/dalli/protocol/ttl_sanitizer.rb +45 -0
  25. data/lib/dalli/protocol/value_compressor.rb +85 -0
  26. data/lib/dalli/protocol/value_marshaller.rb +59 -0
  27. data/lib/dalli/protocol/value_serializer.rb +91 -0
  28. data/lib/dalli/protocol.rb +8 -0
  29. data/lib/dalli/ring.rb +94 -83
  30. data/lib/dalli/server.rb +3 -746
  31. data/lib/dalli/servers_arg_normalizer.rb +54 -0
  32. data/lib/dalli/socket.rb +117 -137
  33. data/lib/dalli/version.rb +4 -1
  34. data/lib/dalli.rb +43 -15
  35. data/lib/rack/session/dalli.rb +95 -95
  36. metadata +43 -48
  37. data/lib/action_dispatch/middleware/session/dalli_store.rb +0 -82
  38. data/lib/active_support/cache/dalli_store.rb +0 -429
  39. data/lib/dalli/railtie.rb +0 -8
@@ -0,0 +1,113 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'digest/md5'
4
+
5
+ module Dalli
6
+ ##
7
+ # This class manages and validates keys sent to Memcached, ensuring
8
+ # that they meet Memcached key length requirements, and supporting
9
+ # the implementation of optional namespaces on a per-Dalli client
10
+ # basis.
11
+ ##
12
+ class KeyManager
13
+ MAX_KEY_LENGTH = 250
14
+
15
+ NAMESPACE_SEPARATOR = ':'
16
+
17
+ # This is a hard coded md5 for historical reasons
18
+ TRUNCATED_KEY_SEPARATOR = ':md5:'
19
+
20
+ # This is 249 for historical reasons
21
+ TRUNCATED_KEY_TARGET_SIZE = 249
22
+
23
+ DEFAULTS = {
24
+ digest_class: ::Digest::MD5
25
+ }.freeze
26
+
27
+ OPTIONS = %i[digest_class namespace].freeze
28
+
29
+ attr_reader :namespace
30
+
31
+ def initialize(client_options)
32
+ @key_options =
33
+ DEFAULTS.merge(client_options.select { |k, _| OPTIONS.include?(k) })
34
+ validate_digest_class_option(@key_options)
35
+
36
+ @namespace = namespace_from_options
37
+ end
38
+
39
+ ##
40
+ # Validates the key, and transforms as needed.
41
+ #
42
+ # If the key is nil or empty, raises ArgumentError. Whitespace
43
+ # characters are allowed for historical reasons, but likely shouldn't
44
+ # be used.
45
+ # If the key (with namespace) is shorter than the memcached maximum
46
+ # allowed key length, just returns the argument key
47
+ # Otherwise computes a "truncated" key that uses a truncated prefix
48
+ # combined with a 32-byte hex digest of the whole key.
49
+ ##
50
+ def validate_key(key)
51
+ raise ArgumentError, 'key cannot be blank' unless key&.length&.positive?
52
+
53
+ key = key_with_namespace(key)
54
+ key.length > MAX_KEY_LENGTH ? truncated_key(key) : key
55
+ end
56
+
57
+ ##
58
+ # Returns the key with the namespace prefixed, if a namespace is
59
+ # defined. Otherwise just returns the key
60
+ ##
61
+ def key_with_namespace(key)
62
+ return key if namespace.nil?
63
+
64
+ "#{namespace}#{NAMESPACE_SEPARATOR}#{key}"
65
+ end
66
+
67
+ def key_without_namespace(key)
68
+ return key if namespace.nil?
69
+
70
+ key.sub(namespace_regexp, '')
71
+ end
72
+
73
+ def digest_class
74
+ @digest_class ||= @key_options[:digest_class]
75
+ end
76
+
77
+ def namespace_regexp
78
+ @namespace_regexp ||= /\A#{Regexp.escape(namespace)}:/.freeze unless namespace.nil?
79
+ end
80
+
81
+ def validate_digest_class_option(opts)
82
+ return if opts[:digest_class].respond_to?(:hexdigest)
83
+
84
+ raise ArgumentError, 'The digest_class object must respond to the hexdigest method'
85
+ end
86
+
87
+ def namespace_from_options
88
+ raw_namespace = @key_options[:namespace]
89
+ return nil unless raw_namespace
90
+ return raw_namespace.call.to_s if raw_namespace.is_a?(Proc)
91
+
92
+ raw_namespace.to_s
93
+ end
94
+
95
+ ##
96
+ # Produces a truncated key, if the raw key is longer than the maximum allowed
97
+ # length. The truncated key is produced by generating a hex digest
98
+ # of the key, and appending that to a truncated section of the key.
99
+ ##
100
+ def truncated_key(key)
101
+ digest = digest_class.hexdigest(key)
102
+ "#{key[0, prefix_length(digest)]}#{TRUNCATED_KEY_SEPARATOR}#{digest}"
103
+ end
104
+
105
+ def prefix_length(digest)
106
+ return TRUNCATED_KEY_TARGET_SIZE - (TRUNCATED_KEY_SEPARATOR.length + digest.length) if namespace.nil?
107
+
108
+ # For historical reasons, truncated keys with namespaces had a length of 250 rather
109
+ # than 249
110
+ TRUNCATED_KEY_TARGET_SIZE + 1 - (TRUNCATED_KEY_SEPARATOR.length + digest.length)
111
+ end
112
+ end
113
+ end
data/lib/dalli/options.rb CHANGED
@@ -1,20 +1,19 @@
1
1
  # frozen_string_literal: true
2
- require 'thread'
2
+
3
3
  require 'monitor'
4
4
 
5
5
  module Dalli
6
-
7
6
  # Make Dalli threadsafe by using a lock around all
8
7
  # public server methods.
9
8
  #
10
- # Dalli::Server.extend(Dalli::Threadsafe)
9
+ # Dalli::Protocol::Binary.extend(Dalli::Threadsafe)
11
10
  #
12
11
  module Threadsafe
13
12
  def self.extended(obj)
14
13
  obj.init_threadsafe
15
14
  end
16
15
 
17
- def request(op, *args)
16
+ def request(opcode, *args)
18
17
  @lock.synchronize do
19
18
  super
20
19
  end
@@ -32,19 +31,19 @@ module Dalli
32
31
  end
33
32
  end
34
33
 
35
- def multi_response_start
34
+ def pipeline_response_setup
36
35
  @lock.synchronize do
37
36
  super
38
37
  end
39
38
  end
40
39
 
41
- def multi_response_nonblock
40
+ def pipeline_next_responses
42
41
  @lock.synchronize do
43
42
  super
44
43
  end
45
44
  end
46
45
 
47
- def multi_response_abort
46
+ def pipeline_abort
48
47
  @lock.synchronize do
49
48
  super
50
49
  end
@@ -0,0 +1,177 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Dalli
4
+ ##
5
+ # Contains logic for the pipelined gets implemented by the client.
6
+ ##
7
+ class PipelinedGetter
8
+ def initialize(ring, key_manager)
9
+ @ring = ring
10
+ @key_manager = key_manager
11
+ end
12
+
13
+ ##
14
+ # Yields, one at a time, keys and their values+attributes.
15
+ #
16
+ def process(keys, &block)
17
+ return {} if keys.empty?
18
+
19
+ @ring.lock do
20
+ servers = setup_requests(keys)
21
+ start_time = Time.now
22
+ servers = fetch_responses(servers, start_time, @ring.socket_timeout, &block) until servers.empty?
23
+ end
24
+ rescue NetworkError => e
25
+ Dalli.logger.debug { e.inspect }
26
+ Dalli.logger.debug { 'retrying pipelined gets because of timeout' }
27
+ retry
28
+ end
29
+
30
+ def setup_requests(keys)
31
+ groups = groups_for_keys(keys)
32
+ make_getkq_requests(groups)
33
+
34
+ # TODO: How does this exit on a NetworkError
35
+ finish_queries(groups.keys)
36
+ end
37
+
38
+ ##
39
+ # Loop through the server-grouped sets of keys, writing
40
+ # the corresponding getkq requests to the appropriate servers
41
+ #
42
+ # It's worth noting that we could potentially reduce bytes
43
+ # on the wire by switching from getkq to getq, and using
44
+ # the opaque value to match requests to responses.
45
+ ##
46
+ def make_getkq_requests(groups)
47
+ groups.each do |server, keys_for_server|
48
+ server.request(:pipelined_get, keys_for_server)
49
+ rescue DalliError, NetworkError => e
50
+ Dalli.logger.debug { e.inspect }
51
+ Dalli.logger.debug { "unable to get keys for server #{server.name}" }
52
+ end
53
+ end
54
+
55
+ ##
56
+ # This loops through the servers that have keys in
57
+ # our set, sending the noop to terminate the set of queries.
58
+ ##
59
+ def finish_queries(servers)
60
+ deleted = []
61
+
62
+ servers.each do |server|
63
+ next unless server.alive?
64
+
65
+ begin
66
+ finish_query_for_server(server)
67
+ rescue Dalli::NetworkError
68
+ raise
69
+ rescue Dalli::DalliError
70
+ deleted.append(server)
71
+ end
72
+ end
73
+
74
+ servers.delete_if { |server| deleted.include?(server) }
75
+ rescue Dalli::NetworkError
76
+ abort_without_timeout(servers)
77
+ raise
78
+ end
79
+
80
+ def finish_query_for_server(server)
81
+ server.pipeline_response_setup
82
+ rescue Dalli::NetworkError
83
+ raise
84
+ rescue Dalli::DalliError => e
85
+ Dalli.logger.debug { e.inspect }
86
+ Dalli.logger.debug { "Results from server: #{server.name} will be missing from the results" }
87
+ raise
88
+ end
89
+
90
+ # Swallows Dalli::NetworkError
91
+ def abort_without_timeout(servers)
92
+ servers.each(&:pipeline_abort)
93
+ end
94
+
95
+ def fetch_responses(servers, start_time, timeout, &block)
96
+ # Remove any servers which are not connected
97
+ servers.delete_if { |s| !s.connected? }
98
+ return [] if servers.empty?
99
+
100
+ time_left = remaining_time(start_time, timeout)
101
+ readable_servers = servers_with_response(servers, time_left)
102
+ if readable_servers.empty?
103
+ abort_with_timeout(servers)
104
+ return []
105
+ end
106
+
107
+ # Loop through the servers with responses, and
108
+ # delete any from our list that are finished
109
+ readable_servers.each do |server|
110
+ servers.delete(server) if process_server(server, &block)
111
+ end
112
+ servers
113
+ rescue NetworkError
114
+ # Abort and raise if we encountered a network error. This triggers
115
+ # a retry at the top level.
116
+ abort_without_timeout(servers)
117
+ raise
118
+ end
119
+
120
+ def remaining_time(start, timeout)
121
+ elapsed = Time.now - start
122
+ return 0 if elapsed > timeout
123
+
124
+ timeout - elapsed
125
+ end
126
+
127
+ # Swallows Dalli::NetworkError
128
+ def abort_with_timeout(servers)
129
+ abort_without_timeout(servers)
130
+ servers.each do |server|
131
+ Dalli.logger.debug { "memcached at #{server.name} did not response within timeout" }
132
+ end
133
+
134
+ true # Required to simplify caller
135
+ end
136
+
137
+ # Processes responses from a server. Returns true if there are no
138
+ # additional responses from this server.
139
+ def process_server(server)
140
+ server.pipeline_next_responses.each_pair do |key, value_list|
141
+ yield @key_manager.key_without_namespace(key), value_list
142
+ end
143
+
144
+ server.pipeline_complete?
145
+ end
146
+
147
+ def servers_with_response(servers, timeout)
148
+ return [] if servers.empty?
149
+
150
+ # TODO: - This is a bit challenging. Essentially the PipelinedGetter
151
+ # is a reactor, but without the benefit of a Fiber or separate thread.
152
+ # My suspicion is that we may want to try and push this down into the
153
+ # individual servers, but I'm not sure. For now, we keep the
154
+ # mapping between the alerted object (the socket) and the
155
+ # corrresponding server here.
156
+ server_map = servers.each_with_object({}) { |s, h| h[s.sock] = s }
157
+
158
+ readable, = IO.select(server_map.keys, nil, nil, timeout)
159
+ return [] if readable.nil?
160
+
161
+ readable.map { |sock| server_map[sock] }
162
+ end
163
+
164
+ def groups_for_keys(*keys)
165
+ keys.flatten!
166
+ keys.map! { |a| @key_manager.validate_key(a.to_s) }
167
+ groups = @ring.keys_grouped_by_server(keys)
168
+ if (unfound_keys = groups.delete(nil))
169
+ Dalli.logger.debug do
170
+ "unable to get keys for #{unfound_keys.length} keys "\
171
+ 'because no matching server was found'
172
+ end
173
+ end
174
+ groups
175
+ end
176
+ end
177
+ end
@@ -0,0 +1,241 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'forwardable'
4
+ require 'socket'
5
+ require 'timeout'
6
+
7
+ module Dalli
8
+ module Protocol
9
+ ##
10
+ # Base class for a single Memcached server, containing logic common to all
11
+ # protocols. Contains logic for managing connection state to the server and value
12
+ # handling.
13
+ ##
14
+ class Base
15
+ extend Forwardable
16
+
17
+ attr_accessor :weight, :options
18
+
19
+ def_delegators :@value_marshaller, :serializer, :compressor, :compression_min_size, :compress_by_default?
20
+ def_delegators :@connection_manager, :name, :sock, :hostname, :port, :close, :connected?, :socket_timeout,
21
+ :socket_type, :up!, :down!, :write, :reconnect_down_server?, :raise_down_error
22
+
23
+ def initialize(attribs, client_options = {})
24
+ hostname, port, socket_type, @weight, user_creds = ServerConfigParser.parse(attribs)
25
+ @options = client_options.merge(user_creds)
26
+ @value_marshaller = ValueMarshaller.new(@options)
27
+ @connection_manager = ConnectionManager.new(hostname, port, socket_type, @options)
28
+ end
29
+
30
+ # Chokepoint method for error handling and ensuring liveness
31
+ def request(opkey, *args)
32
+ verify_state(opkey)
33
+
34
+ begin
35
+ send(opkey, *args)
36
+ rescue Dalli::MarshalError => e
37
+ log_marshal_err(args.first, e)
38
+ raise
39
+ rescue Dalli::DalliError
40
+ raise
41
+ rescue StandardError => e
42
+ log_unexpected_err(e)
43
+ down!
44
+ end
45
+ end
46
+
47
+ ##
48
+ # Boolean method used by clients of this class to determine if this
49
+ # particular memcached instance is available for use.
50
+ def alive?
51
+ ensure_connected!
52
+ rescue Dalli::NetworkError
53
+ # ensure_connected! raises a NetworkError if connection fails. We
54
+ # want to capture that error and convert it to a boolean value here.
55
+ false
56
+ end
57
+
58
+ def lock!; end
59
+
60
+ def unlock!; end
61
+
62
+ # Start reading key/value pairs from this connection. This is usually called
63
+ # after a series of GETKQ commands. A NOOP is sent, and the server begins
64
+ # flushing responses for kv pairs that were found.
65
+ #
66
+ # Returns nothing.
67
+ def pipeline_response_setup
68
+ verify_state(:getkq)
69
+ write_noop
70
+ response_buffer.reset
71
+ @connection_manager.start_request!
72
+ end
73
+
74
+ # Attempt to receive and parse as many key/value pairs as possible
75
+ # from this server. After #pipeline_response_setup, this should be invoked
76
+ # repeatedly whenever this server's socket is readable until
77
+ # #pipeline_complete?.
78
+ #
79
+ # Returns a Hash of kv pairs received.
80
+ def pipeline_next_responses
81
+ reconnect_on_pipeline_complete!
82
+ values = {}
83
+
84
+ response_buffer.read
85
+
86
+ status, cas, key, value = response_buffer.process_single_getk_response
87
+ # status is not nil only if we have a full response to parse
88
+ # in the buffer
89
+ until status.nil?
90
+ # If the status is ok and key is nil, then this is the response
91
+ # to the noop at the end of the pipeline
92
+ finish_pipeline && break if status && key.nil?
93
+
94
+ # If the status is ok and the key is not nil, then this is a
95
+ # getkq response with a value that we want to set in the response hash
96
+ values[key] = [value, cas] unless key.nil?
97
+
98
+ # Get the next response from the buffer
99
+ status, cas, key, value = response_buffer.process_single_getk_response
100
+ end
101
+
102
+ values
103
+ rescue SystemCallError, Timeout::Error, EOFError => e
104
+ @connection_manager.error_on_request!(e)
105
+ end
106
+
107
+ # Abort current pipelined get. Generally used to signal an external
108
+ # timeout during pipelined get. The underlying socket is
109
+ # disconnected, and the exception is swallowed.
110
+ #
111
+ # Returns nothing.
112
+ def pipeline_abort
113
+ response_buffer.clear
114
+ @connection_manager.abort_request!
115
+ return true unless connected?
116
+
117
+ # Closes the connection, which ensures that our connection
118
+ # is in a clean state for future requests
119
+ @connection_manager.error_on_request!('External timeout')
120
+ rescue NetworkError
121
+ true
122
+ end
123
+
124
+ # Did the last call to #pipeline_response_setup complete successfully?
125
+ def pipeline_complete?
126
+ !response_buffer.in_progress?
127
+ end
128
+
129
+ def username
130
+ @options[:username] || ENV['MEMCACHE_USERNAME']
131
+ end
132
+
133
+ def password
134
+ @options[:password] || ENV['MEMCACHE_PASSWORD']
135
+ end
136
+
137
+ def require_auth?
138
+ !username.nil?
139
+ end
140
+
141
+ def quiet?
142
+ Thread.current[::Dalli::QUIET]
143
+ end
144
+ alias multi? quiet?
145
+
146
+ # NOTE: Additional public methods should be overridden in Dalli::Threadsafe
147
+
148
+ private
149
+
150
+ ALLOWED_QUIET_OPS = %i[add replace set delete incr decr append prepend flush noop].freeze
151
+ def verify_allowed_quiet!(opkey)
152
+ return if ALLOWED_QUIET_OPS.include?(opkey)
153
+
154
+ raise Dalli::NotPermittedMultiOpError, "The operation #{opkey} is not allowed in a quiet block."
155
+ end
156
+
157
+ ##
158
+ # Checks to see if we can execute the specified operation. Checks
159
+ # whether the connection is in use, and whether the command is allowed
160
+ ##
161
+ def verify_state(opkey)
162
+ @connection_manager.confirm_ready!
163
+ verify_allowed_quiet!(opkey) if quiet?
164
+
165
+ # The ensure_connected call has the side effect of connecting the
166
+ # underlying socket if it is not connected, or there's been a disconnect
167
+ # because of timeout or other error. Method raises an error
168
+ # if it can't connect
169
+ raise_down_error unless ensure_connected!
170
+ end
171
+
172
+ # The socket connection to the underlying server is initialized as a side
173
+ # effect of this call. In fact, this is the ONLY place where that
174
+ # socket connection is initialized.
175
+ #
176
+ # Both this method and connect need to be in this class so we can do auth
177
+ # as required
178
+ #
179
+ # Since this is invoked exclusively in verify_state!, we don't need to worry about
180
+ # thread safety. Using it elsewhere may require revisiting that assumption.
181
+ def ensure_connected!
182
+ return true if connected?
183
+ return false unless reconnect_down_server?
184
+
185
+ connect # This call needs to be in this class so we can do auth
186
+ connected?
187
+ end
188
+
189
+ def cache_nils?(opts)
190
+ return false unless opts.is_a?(Hash)
191
+
192
+ opts[:cache_nils] ? true : false
193
+ end
194
+
195
+ def connect
196
+ @connection_manager.establish_connection
197
+ authenticate_connection if require_auth?
198
+ @version = version # Connect socket if not authed
199
+ up!
200
+ rescue Dalli::DalliError
201
+ raise
202
+ end
203
+
204
+ def pipelined_get(keys)
205
+ req = +''
206
+ keys.each do |key|
207
+ req << quiet_get_request(key)
208
+ end
209
+ # Could send noop here instead of in pipeline_response_setup
210
+ write(req)
211
+ end
212
+
213
+ def response_buffer
214
+ @response_buffer ||= ResponseBuffer.new(@connection_manager, response_processor)
215
+ end
216
+
217
+ # Called after the noop response is received at the end of a set
218
+ # of pipelined gets
219
+ def finish_pipeline
220
+ response_buffer.clear
221
+ @connection_manager.finish_request!
222
+
223
+ true # to simplify response
224
+ end
225
+
226
+ def reconnect_on_pipeline_complete!
227
+ @connection_manager.reconnect! 'pipelined get has completed' if pipeline_complete?
228
+ end
229
+
230
+ def log_marshal_err(key, err)
231
+ Dalli.logger.error "Marshalling error for key '#{key}': #{err.message}"
232
+ Dalli.logger.error 'You are trying to cache a Ruby object which cannot be serialized to memcached.'
233
+ end
234
+
235
+ def log_unexpected_err(err)
236
+ Dalli.logger.error "Unexpected exception during Dalli request: #{err.class.name}: #{err.message}"
237
+ Dalli.logger.error err.backtrace.join("\n\t")
238
+ end
239
+ end
240
+ end
241
+ end
@@ -0,0 +1,117 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Dalli
4
+ module Protocol
5
+ class Binary
6
+ ##
7
+ # Class that encapsulates logic for formatting binary protocol requests
8
+ # to memcached.
9
+ ##
10
+ class RequestFormatter
11
+ REQUEST = 0x80
12
+
13
+ OPCODES = {
14
+ get: 0x00,
15
+ set: 0x01,
16
+ add: 0x02,
17
+ replace: 0x03,
18
+ delete: 0x04,
19
+ incr: 0x05,
20
+ decr: 0x06,
21
+ flush: 0x08,
22
+ noop: 0x0A,
23
+ version: 0x0B,
24
+ getkq: 0x0D,
25
+ append: 0x0E,
26
+ prepend: 0x0F,
27
+ stat: 0x10,
28
+ setq: 0x11,
29
+ addq: 0x12,
30
+ replaceq: 0x13,
31
+ deleteq: 0x14,
32
+ incrq: 0x15,
33
+ decrq: 0x16,
34
+ flushq: 0x18,
35
+ appendq: 0x19,
36
+ prependq: 0x1A,
37
+ touch: 0x1C,
38
+ gat: 0x1D,
39
+ auth_negotiation: 0x20,
40
+ auth_request: 0x21,
41
+ auth_continue: 0x22
42
+ }.freeze
43
+
44
+ REQ_HEADER_FORMAT = 'CCnCCnNNQ'
45
+
46
+ KEY_ONLY = 'a*'
47
+ TTL_AND_KEY = 'Na*'
48
+ KEY_AND_VALUE = 'a*a*'
49
+ INCR_DECR = 'NNNNNa*'
50
+ TTL_ONLY = 'N'
51
+ NO_BODY = ''
52
+
53
+ BODY_FORMATS = {
54
+ get: KEY_ONLY,
55
+ getkq: KEY_ONLY,
56
+ delete: KEY_ONLY,
57
+ deleteq: KEY_ONLY,
58
+ stat: KEY_ONLY,
59
+
60
+ append: KEY_AND_VALUE,
61
+ prepend: KEY_AND_VALUE,
62
+ appendq: KEY_AND_VALUE,
63
+ prependq: KEY_AND_VALUE,
64
+ auth_request: KEY_AND_VALUE,
65
+ auth_continue: KEY_AND_VALUE,
66
+
67
+ set: 'NNa*a*',
68
+ setq: 'NNa*a*',
69
+ add: 'NNa*a*',
70
+ addq: 'NNa*a*',
71
+ replace: 'NNa*a*',
72
+ replaceq: 'NNa*a*',
73
+
74
+ incr: INCR_DECR,
75
+ decr: INCR_DECR,
76
+ incrq: INCR_DECR,
77
+ decrq: INCR_DECR,
78
+
79
+ flush: TTL_ONLY,
80
+ flushq: TTL_ONLY,
81
+
82
+ noop: NO_BODY,
83
+ auth_negotiation: NO_BODY,
84
+ version: NO_BODY,
85
+
86
+ touch: TTL_AND_KEY,
87
+ gat: TTL_AND_KEY
88
+ }.freeze
89
+ FORMAT = BODY_FORMATS.transform_values { |v| REQ_HEADER_FORMAT + v; }
90
+
91
+ # rubocop:disable Metrics/ParameterLists
92
+ def self.standard_request(opkey:, key: nil, value: nil, opaque: 0, cas: 0, bitflags: nil, ttl: nil)
93
+ extra_len = (bitflags.nil? ? 0 : 4) + (ttl.nil? ? 0 : 4)
94
+ key_len = key.nil? ? 0 : key.bytesize
95
+ value_len = value.nil? ? 0 : value.bytesize
96
+ header = [REQUEST, OPCODES[opkey], key_len, extra_len, 0, 0, extra_len + key_len + value_len, opaque, cas]
97
+ body = [bitflags, ttl, key, value].compact
98
+ (header + body).pack(FORMAT[opkey])
99
+ end
100
+ # rubocop:enable Metrics/ParameterLists
101
+
102
+ def self.decr_incr_request(opkey:, key: nil, count: nil, initial: nil, expiry: nil)
103
+ extra_len = 20
104
+ (h, l) = as_8byte_uint(count)
105
+ (dh, dl) = as_8byte_uint(initial)
106
+ header = [REQUEST, OPCODES[opkey], key.bytesize, extra_len, 0, 0, key.bytesize + extra_len, 0, 0]
107
+ body = [h, l, dh, dl, expiry, key]
108
+ (header + body).pack(FORMAT[opkey])
109
+ end
110
+
111
+ def self.as_8byte_uint(val)
112
+ [val >> 32, 0xFFFFFFFF & val]
113
+ end
114
+ end
115
+ end
116
+ end
117
+ end