dalli 2.7.11 → 3.0.6
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of dalli might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Gemfile +7 -6
- data/History.md +69 -0
- data/README.md +26 -200
- data/lib/dalli/cas/client.rb +1 -57
- data/lib/dalli/client.rb +272 -209
- data/lib/dalli/compressor.rb +12 -2
- data/lib/dalli/key_manager.rb +113 -0
- data/lib/dalli/options.rb +3 -4
- data/lib/dalli/protocol/binary/request_formatter.rb +109 -0
- data/lib/dalli/protocol/binary/response_processor.rb +149 -0
- data/lib/dalli/protocol/binary/sasl_authentication.rb +60 -0
- data/lib/dalli/protocol/binary.rb +544 -0
- data/lib/dalli/protocol/server_config_parser.rb +84 -0
- data/lib/dalli/protocol/ttl_sanitizer.rb +45 -0
- data/lib/dalli/protocol/value_compressor.rb +85 -0
- data/lib/dalli/protocol/value_marshaller.rb +59 -0
- data/lib/dalli/protocol/value_serializer.rb +91 -0
- data/lib/dalli/protocol.rb +8 -0
- data/lib/dalli/ring.rb +86 -81
- data/lib/dalli/server.rb +3 -749
- data/lib/dalli/servers_arg_normalizer.rb +54 -0
- data/lib/dalli/socket.rb +115 -137
- data/lib/dalli/version.rb +4 -1
- data/lib/dalli.rb +32 -14
- data/lib/rack/session/dalli.rb +46 -55
- metadata +103 -10
- data/lib/action_dispatch/middleware/session/dalli_store.rb +0 -82
- data/lib/active_support/cache/dalli_store.rb +0 -441
- data/lib/dalli/railtie.rb +0 -8
@@ -0,0 +1,544 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'English'
|
4
|
+
require 'forwardable'
|
5
|
+
require 'socket'
|
6
|
+
require 'timeout'
|
7
|
+
|
8
|
+
require_relative 'binary/request_formatter'
|
9
|
+
require_relative 'binary/response_processor'
|
10
|
+
require_relative 'binary/sasl_authentication'
|
11
|
+
|
12
|
+
module Dalli
|
13
|
+
module Protocol
|
14
|
+
##
|
15
|
+
# Access point for a single Memcached server, accessed via Memcached's binary
|
16
|
+
# protocol. Contains logic for managing connection state to the server (retries, etc),
|
17
|
+
# formatting requests to the server, and unpacking responses.
|
18
|
+
##
|
19
|
+
class Binary
|
20
|
+
extend Forwardable
|
21
|
+
|
22
|
+
attr_accessor :hostname, :port, :weight, :options
|
23
|
+
attr_reader :sock, :socket_type
|
24
|
+
|
25
|
+
def_delegators :@value_marshaller, :serializer, :compressor, :compression_min_size, :compress_by_default?
|
26
|
+
|
27
|
+
DEFAULTS = {
|
28
|
+
# seconds between trying to contact a remote server
|
29
|
+
down_retry_delay: 30,
|
30
|
+
# connect/read/write timeout for socket operations
|
31
|
+
socket_timeout: 1,
|
32
|
+
# times a socket operation may fail before considering the server dead
|
33
|
+
socket_max_failures: 2,
|
34
|
+
# amount of time to sleep between retries when a failure occurs
|
35
|
+
socket_failure_delay: 0.1,
|
36
|
+
username: nil,
|
37
|
+
password: nil
|
38
|
+
}.freeze
|
39
|
+
|
40
|
+
def initialize(attribs, options = {})
|
41
|
+
@hostname, @port, @weight, @socket_type, options = ServerConfigParser.parse(attribs, options)
|
42
|
+
@options = DEFAULTS.merge(options)
|
43
|
+
@value_marshaller = ValueMarshaller.new(@options)
|
44
|
+
@response_processor = ResponseProcessor.new(self, @value_marshaller)
|
45
|
+
|
46
|
+
reset_down_info
|
47
|
+
@sock = nil
|
48
|
+
@pid = nil
|
49
|
+
@request_in_progress = false
|
50
|
+
end
|
51
|
+
|
52
|
+
def name
|
53
|
+
if socket_type == :unix
|
54
|
+
hostname
|
55
|
+
else
|
56
|
+
"#{hostname}:#{port}"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
# Chokepoint method for error handling and ensuring liveness
|
61
|
+
def request(opcode, *args)
|
62
|
+
verify_state
|
63
|
+
# The alive? call has the side effect of connecting the underlying
|
64
|
+
# socket if it is not connected, or there's been a disconnect
|
65
|
+
# because of timeout or other error. Method raises an error
|
66
|
+
# if it can't connect
|
67
|
+
raise_memcached_down_err unless alive?
|
68
|
+
|
69
|
+
begin
|
70
|
+
send(opcode, *args)
|
71
|
+
rescue Dalli::MarshalError => e
|
72
|
+
log_marshall_err(args.first, e)
|
73
|
+
raise
|
74
|
+
rescue Dalli::DalliError, Dalli::NetworkError, Dalli::ValueOverMaxSize, Timeout::Error
|
75
|
+
raise
|
76
|
+
rescue StandardError => e
|
77
|
+
log_unexpected_err(e)
|
78
|
+
down!
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def raise_memcached_down_err
|
83
|
+
raise Dalli::NetworkError,
|
84
|
+
"#{name} is down: #{@error} #{@msg}. If you are sure it is running, "\
|
85
|
+
"ensure memcached version is > #{::Dalli::MIN_SUPPORTED_MEMCACHED_VERSION}."
|
86
|
+
end
|
87
|
+
|
88
|
+
def log_marshall_err(key, err)
|
89
|
+
Dalli.logger.error "Marshalling error for key '#{key}': #{err.message}"
|
90
|
+
Dalli.logger.error 'You are trying to cache a Ruby object which cannot be serialized to memcached.'
|
91
|
+
end
|
92
|
+
|
93
|
+
def log_unexpected_err(err)
|
94
|
+
Dalli.logger.error "Unexpected exception during Dalli request: #{err.class.name}: #{err.message}"
|
95
|
+
Dalli.logger.error err.backtrace.join("\n\t")
|
96
|
+
end
|
97
|
+
|
98
|
+
# The socket connection to the underlying server is initialized as a side
|
99
|
+
# effect of this call. In fact, this is the ONLY place where that
|
100
|
+
# socket connection is initialized.
|
101
|
+
def alive?
|
102
|
+
return true if @sock
|
103
|
+
return false unless reconnect_down_server?
|
104
|
+
|
105
|
+
connect
|
106
|
+
!!@sock
|
107
|
+
rescue Dalli::NetworkError
|
108
|
+
false
|
109
|
+
end
|
110
|
+
|
111
|
+
def reconnect_down_server?
|
112
|
+
return true unless @last_down_at
|
113
|
+
|
114
|
+
time_to_next_reconnect = @last_down_at + options[:down_retry_delay] - Time.now
|
115
|
+
return true unless time_to_next_reconnect.positive?
|
116
|
+
|
117
|
+
Dalli.logger.debug do
|
118
|
+
format('down_retry_delay not reached for %<name>s (%<time>.3f seconds left)', name: name,
|
119
|
+
time: time_to_next_reconnect)
|
120
|
+
end
|
121
|
+
false
|
122
|
+
end
|
123
|
+
|
124
|
+
# Closes the underlying socket and cleans up
|
125
|
+
# socket state.
|
126
|
+
def close
|
127
|
+
return unless @sock
|
128
|
+
|
129
|
+
begin
|
130
|
+
@sock.close
|
131
|
+
rescue StandardError
|
132
|
+
nil
|
133
|
+
end
|
134
|
+
@sock = nil
|
135
|
+
@pid = nil
|
136
|
+
abort_request!
|
137
|
+
end
|
138
|
+
|
139
|
+
def lock!; end
|
140
|
+
|
141
|
+
def unlock!; end
|
142
|
+
|
143
|
+
# Start reading key/value pairs from this connection. This is usually called
|
144
|
+
# after a series of GETKQ commands. A NOOP is sent, and the server begins
|
145
|
+
# flushing responses for kv pairs that were found.
|
146
|
+
#
|
147
|
+
# Returns nothing.
|
148
|
+
def multi_response_start
|
149
|
+
verify_state
|
150
|
+
write_noop
|
151
|
+
@multi_buffer = +''
|
152
|
+
@position = 0
|
153
|
+
start_request!
|
154
|
+
end
|
155
|
+
|
156
|
+
# Did the last call to #multi_response_start complete successfully?
|
157
|
+
def multi_response_completed?
|
158
|
+
@multi_buffer.nil?
|
159
|
+
end
|
160
|
+
|
161
|
+
# Attempt to receive and parse as many key/value pairs as possible
|
162
|
+
# from this server. After #multi_response_start, this should be invoked
|
163
|
+
# repeatedly whenever this server's socket is readable until
|
164
|
+
# #multi_response_completed?.
|
165
|
+
#
|
166
|
+
# Returns a Hash of kv pairs received.
|
167
|
+
def multi_response_nonblock
|
168
|
+
reconnect! 'multi_response has completed' if @multi_buffer.nil?
|
169
|
+
|
170
|
+
@multi_buffer << @sock.read_available
|
171
|
+
buf = @multi_buffer
|
172
|
+
pos = @position
|
173
|
+
values = {}
|
174
|
+
|
175
|
+
while buf.bytesize - pos >= ResponseProcessor::RESP_HEADER_SIZE
|
176
|
+
header = buf.slice(pos, ResponseProcessor::RESP_HEADER_SIZE)
|
177
|
+
_, extra_len, key_len, body_len, cas = @response_processor.unpack_header(header)
|
178
|
+
|
179
|
+
# We've reached the noop at the end of the pipeline
|
180
|
+
if key_len.zero?
|
181
|
+
finish_multi_response
|
182
|
+
break
|
183
|
+
end
|
184
|
+
|
185
|
+
# Break and read more unless we already have the entire response for this header
|
186
|
+
resp_size = ResponseProcessor::RESP_HEADER_SIZE + body_len
|
187
|
+
break unless buf.bytesize - pos >= resp_size
|
188
|
+
|
189
|
+
body = buf.slice(pos + ResponseProcessor::RESP_HEADER_SIZE, body_len)
|
190
|
+
begin
|
191
|
+
key, value = @response_processor.unpack_response_body(extra_len, key_len, body, true)
|
192
|
+
values[key] = [value, cas]
|
193
|
+
rescue DalliError
|
194
|
+
# TODO: Determine if we should be swallowing
|
195
|
+
# this error
|
196
|
+
end
|
197
|
+
|
198
|
+
pos = pos + ResponseProcessor::RESP_HEADER_SIZE + body_len
|
199
|
+
end
|
200
|
+
# TODO: We should be discarding the already processed buffer at this point
|
201
|
+
@position = pos
|
202
|
+
|
203
|
+
values
|
204
|
+
rescue SystemCallError, Timeout::Error, EOFError => e
|
205
|
+
failure!(e)
|
206
|
+
end
|
207
|
+
|
208
|
+
def finish_multi_response
|
209
|
+
@multi_buffer = nil
|
210
|
+
@position = nil
|
211
|
+
finish_request!
|
212
|
+
end
|
213
|
+
|
214
|
+
# Abort an earlier #multi_response_start. Used to signal an external
|
215
|
+
# timeout. The underlying socket is disconnected, and the exception is
|
216
|
+
# swallowed.
|
217
|
+
#
|
218
|
+
# Returns nothing.
|
219
|
+
def multi_response_abort
|
220
|
+
@multi_buffer = nil
|
221
|
+
@position = nil
|
222
|
+
abort_request!
|
223
|
+
return true unless @sock
|
224
|
+
|
225
|
+
failure!(RuntimeError.new('External timeout'))
|
226
|
+
rescue NetworkError
|
227
|
+
true
|
228
|
+
end
|
229
|
+
|
230
|
+
def read(count)
|
231
|
+
start_request!
|
232
|
+
data = @sock.readfull(count)
|
233
|
+
finish_request!
|
234
|
+
data
|
235
|
+
rescue SystemCallError, Timeout::Error, EOFError => e
|
236
|
+
failure!(e)
|
237
|
+
end
|
238
|
+
|
239
|
+
def write(bytes)
|
240
|
+
start_request!
|
241
|
+
result = @sock.write(bytes)
|
242
|
+
finish_request!
|
243
|
+
result
|
244
|
+
rescue SystemCallError, Timeout::Error => e
|
245
|
+
failure!(e)
|
246
|
+
end
|
247
|
+
|
248
|
+
def socket_timeout
|
249
|
+
@socket_timeout ||= @options[:socket_timeout]
|
250
|
+
end
|
251
|
+
|
252
|
+
# NOTE: Additional public methods should be overridden in Dalli::Threadsafe
|
253
|
+
|
254
|
+
private
|
255
|
+
|
256
|
+
def request_in_progress?
|
257
|
+
@request_in_progress
|
258
|
+
end
|
259
|
+
|
260
|
+
def start_request!
|
261
|
+
@request_in_progress = true
|
262
|
+
end
|
263
|
+
|
264
|
+
def finish_request!
|
265
|
+
@request_in_progress = false
|
266
|
+
end
|
267
|
+
|
268
|
+
def abort_request!
|
269
|
+
@request_in_progress = false
|
270
|
+
end
|
271
|
+
|
272
|
+
def verify_state
|
273
|
+
failure!(RuntimeError.new('Already writing to socket')) if request_in_progress?
|
274
|
+
reconnect_on_fork if fork_detected?
|
275
|
+
end
|
276
|
+
|
277
|
+
def fork_detected?
|
278
|
+
@pid && @pid != Process.pid
|
279
|
+
end
|
280
|
+
|
281
|
+
def reconnect_on_fork
|
282
|
+
message = 'Fork detected, re-connecting child process...'
|
283
|
+
Dalli.logger.info { message }
|
284
|
+
reconnect! message
|
285
|
+
end
|
286
|
+
|
287
|
+
# Marks the server instance as needing reconnect. Raises a
|
288
|
+
# Dalli::NetworkError with the specified message. Calls close
|
289
|
+
# to clean up socket state
|
290
|
+
def reconnect!(message)
|
291
|
+
close
|
292
|
+
sleep(options[:socket_failure_delay]) if options[:socket_failure_delay]
|
293
|
+
raise Dalli::NetworkError, message
|
294
|
+
end
|
295
|
+
|
296
|
+
# Raises Dalli::NetworkError
|
297
|
+
def failure!(exception)
|
298
|
+
message = "#{name} failed (count: #{@fail_count}) #{exception.class}: #{exception.message}"
|
299
|
+
Dalli.logger.warn { message }
|
300
|
+
|
301
|
+
@fail_count += 1
|
302
|
+
if @fail_count >= options[:socket_max_failures]
|
303
|
+
down!
|
304
|
+
else
|
305
|
+
reconnect! 'Socket operation failed, retrying...'
|
306
|
+
end
|
307
|
+
end
|
308
|
+
|
309
|
+
# Marks the server instance as down. Updates the down_at state
|
310
|
+
# and raises an Dalli::NetworkError that includes the underlying
|
311
|
+
# error in the message. Calls close to clean up socket state
|
312
|
+
def down!
|
313
|
+
close
|
314
|
+
log_down_detected
|
315
|
+
|
316
|
+
@error = $ERROR_INFO&.class&.name
|
317
|
+
@msg ||= $ERROR_INFO&.message
|
318
|
+
raise Dalli::NetworkError, "#{name} is down: #{@error} #{@msg}"
|
319
|
+
end
|
320
|
+
|
321
|
+
def log_down_detected
|
322
|
+
@last_down_at = Time.now
|
323
|
+
|
324
|
+
if @down_at
|
325
|
+
time = Time.now - @down_at
|
326
|
+
Dalli.logger.debug { format('%<name>s is still down (for %<time>.3f seconds now)', name: name, time: time) }
|
327
|
+
else
|
328
|
+
@down_at = @last_down_at
|
329
|
+
Dalli.logger.warn("#{name} is down")
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
def log_up_detected
|
334
|
+
return unless @down_at
|
335
|
+
|
336
|
+
time = Time.now - @down_at
|
337
|
+
Dalli.logger.warn { format('%<name>s is back (downtime was %<time>.3f seconds)', name: name, time: time) }
|
338
|
+
end
|
339
|
+
|
340
|
+
def up!
|
341
|
+
log_up_detected
|
342
|
+
reset_down_info
|
343
|
+
end
|
344
|
+
|
345
|
+
def reset_down_info
|
346
|
+
@fail_count = 0
|
347
|
+
@down_at = nil
|
348
|
+
@last_down_at = nil
|
349
|
+
@msg = nil
|
350
|
+
@error = nil
|
351
|
+
end
|
352
|
+
|
353
|
+
def multi?
|
354
|
+
Thread.current[:dalli_multi]
|
355
|
+
end
|
356
|
+
|
357
|
+
def cache_nils?(opts)
|
358
|
+
return false unless opts.is_a?(Hash)
|
359
|
+
|
360
|
+
opts[:cache_nils] ? true : false
|
361
|
+
end
|
362
|
+
|
363
|
+
def get(key, options = nil)
|
364
|
+
req = RequestFormatter.standard_request(opkey: :get, key: key)
|
365
|
+
write(req)
|
366
|
+
@response_processor.generic_response(unpack: true, cache_nils: cache_nils?(options))
|
367
|
+
end
|
368
|
+
|
369
|
+
def send_multiget(keys)
|
370
|
+
req = +''
|
371
|
+
keys.each do |key|
|
372
|
+
req << RequestFormatter.standard_request(opkey: :getkq, key: key)
|
373
|
+
end
|
374
|
+
# Could send noop here instead of in multi_response_start
|
375
|
+
write(req)
|
376
|
+
end
|
377
|
+
|
378
|
+
def set(key, value, ttl, cas, options)
|
379
|
+
opkey = multi? ? :setq : :set
|
380
|
+
process_value_req(opkey, key, value, ttl, cas, options)
|
381
|
+
end
|
382
|
+
|
383
|
+
def add(key, value, ttl, options)
|
384
|
+
opkey = multi? ? :addq : :add
|
385
|
+
cas = 0
|
386
|
+
process_value_req(opkey, key, value, ttl, cas, options)
|
387
|
+
end
|
388
|
+
|
389
|
+
def replace(key, value, ttl, cas, options)
|
390
|
+
opkey = multi? ? :replaceq : :replace
|
391
|
+
process_value_req(opkey, key, value, ttl, cas, options)
|
392
|
+
end
|
393
|
+
|
394
|
+
# rubocop:disable Metrics/ParameterLists
|
395
|
+
def process_value_req(opkey, key, value, ttl, cas, options)
|
396
|
+
(value, bitflags) = @value_marshaller.store(key, value, options)
|
397
|
+
ttl = TtlSanitizer.sanitize(ttl)
|
398
|
+
|
399
|
+
req = RequestFormatter.standard_request(opkey: opkey, key: key,
|
400
|
+
value: value, bitflags: bitflags,
|
401
|
+
ttl: ttl, cas: cas)
|
402
|
+
write(req)
|
403
|
+
@response_processor.cas_response unless multi?
|
404
|
+
end
|
405
|
+
# rubocop:enable Metrics/ParameterLists
|
406
|
+
|
407
|
+
def delete(key, cas)
|
408
|
+
opkey = multi? ? :deleteq : :delete
|
409
|
+
req = RequestFormatter.standard_request(opkey: opkey, key: key, cas: cas)
|
410
|
+
write(req)
|
411
|
+
@response_processor.generic_response unless multi?
|
412
|
+
end
|
413
|
+
|
414
|
+
def flush(ttl = 0)
|
415
|
+
req = RequestFormatter.standard_request(opkey: :flush, ttl: ttl)
|
416
|
+
write(req)
|
417
|
+
@response_processor.generic_response
|
418
|
+
end
|
419
|
+
|
420
|
+
# This allows us to special case a nil initial value, and
|
421
|
+
# handle it differently than a zero. This special value
|
422
|
+
# for expiry causes memcached to return a not found
|
423
|
+
# if the key doesn't already exist, rather than
|
424
|
+
# setting the initial value
|
425
|
+
NOT_FOUND_EXPIRY = 0xFFFFFFFF
|
426
|
+
|
427
|
+
def decr_incr(opkey, key, count, ttl, initial)
|
428
|
+
expiry = initial ? TtlSanitizer.sanitize(ttl) : NOT_FOUND_EXPIRY
|
429
|
+
initial ||= 0
|
430
|
+
write(RequestFormatter.decr_incr_request(opkey: opkey, key: key,
|
431
|
+
count: count, initial: initial, expiry: expiry))
|
432
|
+
@response_processor.decr_incr_response
|
433
|
+
end
|
434
|
+
|
435
|
+
def decr(key, count, ttl, initial)
|
436
|
+
decr_incr :decr, key, count, ttl, initial
|
437
|
+
end
|
438
|
+
|
439
|
+
def incr(key, count, ttl, initial)
|
440
|
+
decr_incr :incr, key, count, ttl, initial
|
441
|
+
end
|
442
|
+
|
443
|
+
def write_append_prepend(opkey, key, value)
|
444
|
+
write_generic RequestFormatter.standard_request(opkey: opkey, key: key, value: value)
|
445
|
+
end
|
446
|
+
|
447
|
+
def write_generic(bytes)
|
448
|
+
write(bytes)
|
449
|
+
@response_processor.generic_response
|
450
|
+
end
|
451
|
+
|
452
|
+
def write_noop
|
453
|
+
req = RequestFormatter.standard_request(opkey: :noop)
|
454
|
+
write(req)
|
455
|
+
end
|
456
|
+
|
457
|
+
# Noop is a keepalive operation but also used to demarcate the end of a set of pipelined commands.
|
458
|
+
# We need to read all the responses at once.
|
459
|
+
def noop
|
460
|
+
write_noop
|
461
|
+
@response_processor.multi_with_keys_response
|
462
|
+
end
|
463
|
+
|
464
|
+
def append(key, value)
|
465
|
+
write_append_prepend :append, key, value
|
466
|
+
end
|
467
|
+
|
468
|
+
def prepend(key, value)
|
469
|
+
write_append_prepend :prepend, key, value
|
470
|
+
end
|
471
|
+
|
472
|
+
def stats(info = '')
|
473
|
+
req = RequestFormatter.standard_request(opkey: :stat, key: info)
|
474
|
+
write(req)
|
475
|
+
@response_processor.multi_with_keys_response
|
476
|
+
end
|
477
|
+
|
478
|
+
def reset_stats
|
479
|
+
write_generic RequestFormatter.standard_request(opkey: :stat, key: 'reset')
|
480
|
+
end
|
481
|
+
|
482
|
+
def cas(key)
|
483
|
+
req = RequestFormatter.standard_request(opkey: :get, key: key)
|
484
|
+
write(req)
|
485
|
+
@response_processor.data_cas_response
|
486
|
+
end
|
487
|
+
|
488
|
+
def version
|
489
|
+
write_generic RequestFormatter.standard_request(opkey: :version)
|
490
|
+
end
|
491
|
+
|
492
|
+
def touch(key, ttl)
|
493
|
+
ttl = TtlSanitizer.sanitize(ttl)
|
494
|
+
write_generic RequestFormatter.standard_request(opkey: :touch, key: key, ttl: ttl)
|
495
|
+
end
|
496
|
+
|
497
|
+
def gat(key, ttl, options = nil)
|
498
|
+
ttl = TtlSanitizer.sanitize(ttl)
|
499
|
+
req = RequestFormatter.standard_request(opkey: :gat, key: key, ttl: ttl)
|
500
|
+
write(req)
|
501
|
+
@response_processor.generic_response(unpack: true, cache_nils: cache_nils?(options))
|
502
|
+
end
|
503
|
+
|
504
|
+
def connect
|
505
|
+
Dalli.logger.debug { "Dalli::Server#connect #{name}" }
|
506
|
+
|
507
|
+
begin
|
508
|
+
@pid = Process.pid
|
509
|
+
@sock = memcached_socket
|
510
|
+
authenticate_connection if require_auth?
|
511
|
+
@version = version # Connect socket if not authed
|
512
|
+
up!
|
513
|
+
rescue Dalli::DalliError # SASL auth failure
|
514
|
+
raise
|
515
|
+
rescue SystemCallError, Timeout::Error, EOFError, SocketError => e
|
516
|
+
# SocketError = DNS resolution failure
|
517
|
+
failure!(e)
|
518
|
+
end
|
519
|
+
end
|
520
|
+
|
521
|
+
def memcached_socket
|
522
|
+
if socket_type == :unix
|
523
|
+
Dalli::Socket::UNIX.open(hostname, self, options)
|
524
|
+
else
|
525
|
+
Dalli::Socket::TCP.open(hostname, port, self, options)
|
526
|
+
end
|
527
|
+
end
|
528
|
+
|
529
|
+
def require_auth?
|
530
|
+
!username.nil?
|
531
|
+
end
|
532
|
+
|
533
|
+
def username
|
534
|
+
@options[:username] || ENV['MEMCACHE_USERNAME']
|
535
|
+
end
|
536
|
+
|
537
|
+
def password
|
538
|
+
@options[:password] || ENV['MEMCACHE_PASSWORD']
|
539
|
+
end
|
540
|
+
|
541
|
+
include SaslAuthentication
|
542
|
+
end
|
543
|
+
end
|
544
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Dalli
|
4
|
+
module Protocol
|
5
|
+
##
|
6
|
+
# Dalli::Protocol::ServerConfigParser parses a server string passed to
|
7
|
+
# a Dalli::Protocol::Binary instance into the hostname, port, weight, and
|
8
|
+
# socket_type.
|
9
|
+
##
|
10
|
+
class ServerConfigParser
|
11
|
+
MEMCACHED_URI_PROTOCOL = 'memcached://'
|
12
|
+
|
13
|
+
# TODO: Revisit this, especially the IP/domain part. Likely
|
14
|
+
# can limit character set to LDH + '.'. Hex digit section
|
15
|
+
# is there to support IPv6 addresses, which need to be specified with
|
16
|
+
# a bounding []
|
17
|
+
SERVER_CONFIG_REGEXP = /\A(\[([\h:]+)\]|[^:]+)(?::(\d+))?(?::(\d+))?\z/.freeze
|
18
|
+
|
19
|
+
DEFAULT_PORT = 11_211
|
20
|
+
DEFAULT_WEIGHT = 1
|
21
|
+
|
22
|
+
def self.parse(str, client_options)
|
23
|
+
return parse_non_uri(str, client_options) unless str.start_with?(MEMCACHED_URI_PROTOCOL)
|
24
|
+
|
25
|
+
parse_uri(str, client_options)
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.parse_uri(str, client_options)
|
29
|
+
uri = URI.parse(str)
|
30
|
+
auth_details = {
|
31
|
+
username: uri.user,
|
32
|
+
password: uri.password
|
33
|
+
}
|
34
|
+
[uri.host, normalize_port(uri.port), DEFAULT_WEIGHT, :tcp, client_options.merge(auth_details)]
|
35
|
+
end
|
36
|
+
|
37
|
+
def self.parse_non_uri(str, client_options)
|
38
|
+
res = deconstruct_string(str)
|
39
|
+
|
40
|
+
hostname = normalize_host_from_match(str, res)
|
41
|
+
if hostname.start_with?('/')
|
42
|
+
socket_type = :unix
|
43
|
+
port, weight = attributes_for_unix_socket(res)
|
44
|
+
else
|
45
|
+
socket_type = :tcp
|
46
|
+
port, weight = attributes_for_tcp_socket(res)
|
47
|
+
end
|
48
|
+
[hostname, port, weight, socket_type, client_options]
|
49
|
+
end
|
50
|
+
|
51
|
+
def self.deconstruct_string(str)
|
52
|
+
mtch = str.match(SERVER_CONFIG_REGEXP)
|
53
|
+
raise Dalli::DalliError, "Could not parse hostname #{str}" if mtch.nil? || mtch[1] == '[]'
|
54
|
+
|
55
|
+
mtch
|
56
|
+
end
|
57
|
+
|
58
|
+
def self.attributes_for_unix_socket(res)
|
59
|
+
# in case of unix socket, allow only setting of weight, not port
|
60
|
+
raise Dalli::DalliError, "Could not parse hostname #{res[0]}" if res[4]
|
61
|
+
|
62
|
+
[nil, normalize_weight(res[3])]
|
63
|
+
end
|
64
|
+
|
65
|
+
def self.attributes_for_tcp_socket(res)
|
66
|
+
[normalize_port(res[3]), normalize_weight(res[4])]
|
67
|
+
end
|
68
|
+
|
69
|
+
def self.normalize_host_from_match(str, res)
|
70
|
+
raise Dalli::DalliError, "Could not parse hostname #{str}" if res.nil? || res[1] == '[]'
|
71
|
+
|
72
|
+
res[2] || res[1]
|
73
|
+
end
|
74
|
+
|
75
|
+
def self.normalize_port(port)
|
76
|
+
Integer(port || DEFAULT_PORT)
|
77
|
+
end
|
78
|
+
|
79
|
+
def self.normalize_weight(weight)
|
80
|
+
Integer(weight || DEFAULT_WEIGHT)
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Dalli
|
4
|
+
module Protocol
|
5
|
+
##
|
6
|
+
# Utility class for sanitizing TTL arguments based on Memcached rules.
|
7
|
+
# TTLs are either expirations times in seconds (with a maximum value of
|
8
|
+
# 30 days) or expiration timestamps. This class sanitizes TTLs to ensure
|
9
|
+
# they meet those restrictions.
|
10
|
+
##
|
11
|
+
class TtlSanitizer
|
12
|
+
# https://github.com/memcached/memcached/blob/master/doc/protocol.txt#L79
|
13
|
+
# > An expiration time, in seconds. Can be up to 30 days. After 30 days, is
|
14
|
+
# treated as a unix timestamp of an exact date.
|
15
|
+
MAX_ACCEPTABLE_EXPIRATION_INTERVAL = 30 * 24 * 60 * 60 # 30 days
|
16
|
+
|
17
|
+
# Ensures the TTL passed to Memcached is a valid TTL in the expected format.
|
18
|
+
def self.sanitize(ttl)
|
19
|
+
ttl_as_i = ttl.to_i
|
20
|
+
return ttl_as_i if less_than_max_expiration_interval?(ttl_as_i)
|
21
|
+
|
22
|
+
as_timestamp(ttl_as_i)
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.less_than_max_expiration_interval?(ttl_as_i)
|
26
|
+
ttl_as_i <= MAX_ACCEPTABLE_EXPIRATION_INTERVAL
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.as_timestamp(ttl_as_i)
|
30
|
+
now = current_timestamp
|
31
|
+
return ttl_as_i if ttl_as_i > now # Already a timestamp
|
32
|
+
|
33
|
+
Dalli.logger.debug "Expiration interval (#{ttl_as_i}) too long for Memcached " \
|
34
|
+
'and too short to be a future timestamp,' \
|
35
|
+
'converting to an expiration timestamp'
|
36
|
+
now + ttl_as_i
|
37
|
+
end
|
38
|
+
|
39
|
+
# Pulled out into a method so it's easy to stub time
|
40
|
+
def self.current_timestamp
|
41
|
+
Time.now.to_i
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|