iopromise 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3fc60525e45ec8cfa8a6cd5ba34bf396b77e4d42f3fd566137f14367a2ba31ae
4
- data.tar.gz: 1e70b2147a204a220be6b268ae62f67dc06b79065b8ec07636da414264ae626b
3
+ metadata.gz: f58c2a28347c1f21fe6c660eff9f2b7ce1b078b1a06a37c91de0d6ec05db1c66
4
+ data.tar.gz: f5b2f6f66c5f79bb3f68f8cd041f6fecdde33dba2700b3f474ae9a76995378c9
5
5
  SHA512:
6
- metadata.gz: b92e71ab4fa32e5c45f9b4a4fa607844f2558f270f69ddb3a63b1394633a4b3e7eb101f77b06e8bc4efe45ad657e00a7d4bad10f34374356f2dc660db17af525
7
- data.tar.gz: 4034ef0ae2010b4cea930894d1f7cc05722e6dd0e3c112fe237ce6488441d24d670c098df525046c13d26514ff39ee672c3a26c49f601ddc40b757fbbe691079
6
+ metadata.gz: '09e91e024b854c41dd60942dc76af20795ca88807f3d6548bbf7d96982babd4c618e7054f9e63c640cca3eb4add0d08c347d8bb7a5ac5357c7969645817562c9'
7
+ data.tar.gz: d44281e24ab187944c15da2ceae76a1b8be8d8b54dd6a117f2783aec3418e12231283cfe3adc548d8d4d78536d5712c493126c0618f820f541e6142d33cc27dc
@@ -9,7 +9,6 @@ jobs:
9
9
  - uses: actions/checkout@v2
10
10
  - name: Install dependencies
11
11
  run: sudo apt-get install libcurl4-openssl-dev libsasl2-dev
12
- - uses: niden/actions-memcached@v7
13
12
  - name: Set up Ruby
14
13
  uses: ruby/setup-ruby@v1
15
14
  with:
data/Gemfile CHANGED
@@ -10,16 +10,6 @@ gem "rake", "~> 13.0"
10
10
  gem "rspec", "~> 3.0"
11
11
 
12
12
  group :development, :test do
13
- # faraday adapter
14
- gem 'faraday'
15
- gem 'typhoeus'
16
-
17
- # memcached adapter
18
- gem 'memcached', :git => 'https://github.com/theojulienne/memcached.git', :branch => 'continuable-get'
19
-
20
- # dalli adapter
21
- gem 'dalli', "= 2.7.11"
22
-
23
13
  # view_component extensions
24
14
  gem "rails"
25
15
  gem "view_component", require: "view_component/engine"
data/Gemfile.lock CHANGED
@@ -1,14 +1,7 @@
1
- GIT
2
- remote: https://github.com/theojulienne/memcached.git
3
- revision: 18c1da3708f3e7dca316b2f0143b4f05116f7672
4
- branch: continuable-get
5
- specs:
6
- memcached (2.0.0.alpha)
7
-
8
1
  PATH
9
2
  remote: .
10
3
  specs:
11
- iopromise (0.1.1)
4
+ iopromise (0.1.3)
12
5
  nio4r
13
6
  promise.rb (~> 0.7.4)
14
7
 
@@ -78,21 +71,8 @@ GEM
78
71
  builder (3.2.4)
79
72
  concurrent-ruby (1.1.8)
80
73
  crass (1.0.6)
81
- dalli (2.7.11)
82
74
  diff-lcs (1.4.4)
83
75
  erubi (1.10.0)
84
- ethon (0.14.0)
85
- ffi (>= 1.15.0)
86
- faraday (1.4.1)
87
- faraday-excon (~> 1.1)
88
- faraday-net_http (~> 1.0)
89
- faraday-net_http_persistent (~> 1.1)
90
- multipart-post (>= 1.2, < 3)
91
- ruby2_keywords (>= 0.0.4)
92
- faraday-excon (1.1.0)
93
- faraday-net_http (1.0.1)
94
- faraday-net_http_persistent (1.1.0)
95
- ffi (1.15.0)
96
76
  globalid (0.4.2)
97
77
  activesupport (>= 4.2.0)
98
78
  i18n (1.8.10)
@@ -106,7 +86,6 @@ GEM
106
86
  method_source (1.0.0)
107
87
  mini_mime (1.0.3)
108
88
  minitest (5.14.4)
109
- multipart-post (2.1.1)
110
89
  nio4r (2.5.7)
111
90
  nokogiri (1.11.3-x86_64-linux)
112
91
  racc (~> 1.4)
@@ -155,7 +134,6 @@ GEM
155
134
  diff-lcs (>= 1.2.0, < 2.0)
156
135
  rspec-support (~> 3.10.0)
157
136
  rspec-support (3.10.2)
158
- ruby2_keywords (0.0.4)
159
137
  sprockets (4.0.2)
160
138
  concurrent-ruby (~> 1.0)
161
139
  rack (> 1, < 3)
@@ -165,8 +143,6 @@ GEM
165
143
  sprockets (>= 3.0.0)
166
144
  stackprof (0.2.17)
167
145
  thor (1.1.0)
168
- typhoeus (1.4.0)
169
- ethon (>= 0.9.0)
170
146
  tzinfo (2.0.4)
171
147
  concurrent-ruby (~> 1.0)
172
148
  view_component (2.31.1)
@@ -181,15 +157,11 @@ PLATFORMS
181
157
 
182
158
  DEPENDENCIES
183
159
  benchmark-ips
184
- dalli (= 2.7.11)
185
- faraday
186
160
  iopromise!
187
- memcached!
188
161
  rails
189
162
  rake (~> 13.0)
190
163
  rspec (~> 3.0)
191
164
  stackprof
192
- typhoeus
193
165
  view_component
194
166
 
195
167
  BUNDLED WITH
data/README.md CHANGED
@@ -1,6 +1,23 @@
1
- # iopromise
1
+ # IOPromise
2
2
 
3
- This **experimental pre-release** gem extends promise.rb promises to support an extremely simple pattern for \"continuing\" execution of all pending promises in an asynchronous non-blocking way.
3
+ IOPromise is a pattern that allows parallel execution of IO-bound requests (data store and RPCs) behind the abstraction of promises, without needing to introduce the complexity of threading. It uses [promise.rb](https://github.com/lgierth/promise.rb) for promises, and [nio4r](https://github.com/socketry/nio4r) to implement the IO loop.
4
+
5
+ A simple example of this behaviour is using [iopromise-faraday](https://github.com/iopromise-ruby/iopromise-faraday) to perform concurrent HTTP requests:
6
+ ```ruby
7
+ require 'iopromise/faraday'
8
+
9
+ conn = IOPromise::Faraday.new('https://github.com/')
10
+
11
+ promises = (1..3).map do
12
+ conn.get('/status')
13
+ end
14
+
15
+ Promise.all(promises).then do |responses|
16
+ responses.each_with_index do |response, i|
17
+ puts "#{i}: #{response.body.strip} #{response.headers["x-github-request-id"]}"
18
+ end
19
+ end.sync
20
+ ```
4
21
 
5
22
  ## Installation
6
23
 
@@ -20,7 +37,9 @@ Or install it yourself as:
20
37
 
21
38
  ## Usage
22
39
 
23
- TODO: Write usage instructions here
40
+ IOPromise itself is a base library that makes it easy to wrap other IO-based workloads inside a promise-based API that back to an event loop. To use IOPromise, look at the following gems:
41
+
42
+ * [iopromise-faraday](https://github.com/iopromise-ruby/iopromise-faraday) supports [faraday](https://github.com/lostisland/faraday) HTTP requests, backed by libcurl/ethon/typhoeus.
24
43
 
25
44
  ## Development
26
45
 
data/bin/setup CHANGED
@@ -4,6 +4,3 @@ IFS=$'\n\t'
4
4
  set -vx
5
5
 
6
6
  bundle install
7
-
8
- # bring up a memcached for testing
9
- docker run -d -p 11211:11211 memcached:alpine
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module IOPromise
4
- VERSION = '0.1.2'
4
+ VERSION = '0.1.3'
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: iopromise
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Theo Julienne
@@ -60,12 +60,6 @@ files:
60
60
  - bin/setup
61
61
  - iopromise.gemspec
62
62
  - lib/iopromise.rb
63
- - lib/iopromise/dalli.rb
64
- - lib/iopromise/dalli/client.rb
65
- - lib/iopromise/dalli/executor_pool.rb
66
- - lib/iopromise/dalli/patch_dalli.rb
67
- - lib/iopromise/dalli/promise.rb
68
- - lib/iopromise/dalli/response.rb
69
63
  - lib/iopromise/deferred.rb
70
64
  - lib/iopromise/deferred/executor_pool.rb
71
65
  - lib/iopromise/deferred/promise.rb
@@ -73,10 +67,6 @@ files:
73
67
  - lib/iopromise/executor_pool/base.rb
74
68
  - lib/iopromise/executor_pool/batch.rb
75
69
  - lib/iopromise/executor_pool/sequential.rb
76
- - lib/iopromise/memcached.rb
77
- - lib/iopromise/memcached/client.rb
78
- - lib/iopromise/memcached/executor_pool.rb
79
- - lib/iopromise/memcached/promise.rb
80
70
  - lib/iopromise/version.rb
81
71
  - lib/iopromise/view_component.rb
82
72
  - lib/iopromise/view_component/data_loader.rb
@@ -1,13 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require_relative 'dalli/client'
4
-
5
- module IOPromise
6
- module Dalli
7
- class << self
8
- def new(*args, **kwargs)
9
- ::IOPromise::Dalli::Client.new(*args, **kwargs)
10
- end
11
- end
12
- end
13
- end
@@ -1,142 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'dalli'
4
- require_relative 'promise'
5
- require_relative 'patch_dalli'
6
-
7
- module IOPromise
8
- module Dalli
9
- class Client
10
- # General note:
11
- # There is no need for explicit get_multi or batching, as requests
12
- # are sent as soon as the IOPromise is created, multiple can be
13
- # awaiting response at any time, and responses are automatically demuxed.
14
- def initialize(servers = nil, options = {})
15
- @cache_nils = !!options[:cache_nils]
16
- options[:iopromise_async] = true
17
- @options = options
18
- @client = ::Dalli::Client.new(servers, options)
19
- end
20
-
21
- # Returns a promise that resolves to a IOPromise::Dalli::Response with the
22
- # value for the given key, or +nil+ if the key is not found.
23
- def get(key, options = nil)
24
- @client.perform(:get, key, options)
25
- end
26
-
27
- # Convenience function that attempts to fetch the given key, or set
28
- # the key with a dynamically generated value if it does not exist.
29
- # Either way, the returned promise will resolve to the cached or computed
30
- # value.
31
- #
32
- # If the value does not exist then the provided block is run to generate
33
- # the value (which can also be a promise), after which the value is set
34
- # if it still doesn't exist.
35
- def fetch(key, ttl = nil, options = nil, &block)
36
- # match the Dalli behaviour exactly
37
- options = options.nil? ? ::Dalli::Client::CACHE_NILS : options.merge(::Dalli::Client::CACHE_NILS) if @cache_nils
38
- get(key, options).then do |response|
39
- not_found = @options[:cache_nils] ?
40
- !response.exist? :
41
- response.value.nil?
42
- if not_found && !block.nil?
43
- block.call.then do |new_val|
44
- # delay the final resolution here until after the add succeeds,
45
- # to guarantee errors are caught. we could potentially allow
46
- # the add to resolve once it's sent (without confirmation), but
47
- # we do need to wait on the add promise to ensure it's sent.
48
- add(key, new_val, ttl, options).then { new_val }
49
- end
50
- else
51
- response.value
52
- end
53
- end
54
- end
55
-
56
- # Unconditionally sets the +key+ to the +value+ specified.
57
- # Returns a promise that resolves to a IOPromise::Dalli::Response.
58
- def set(key, value, ttl = nil, options = nil)
59
- @client.perform(:set, key, value, ttl_or_default(ttl), 0, options)
60
- end
61
-
62
- # Conditionally sets the +key+ to the +value+ specified.
63
- # Returns a promise that resolves to a IOPromise::Dalli::Response.
64
- def add(key, value, ttl = nil, options = nil)
65
- @client.perform(:add, key, value, ttl_or_default(ttl), options)
66
- end
67
-
68
- # Conditionally sets the +key+ to the +value+ specified only
69
- # if the key already exists.
70
- # Returns a promise that resolves to a IOPromise::Dalli::Response.
71
- def replace(key, value, ttl = nil, options = nil)
72
- @client.perform(:replace, key, value, ttl_or_default(ttl), 0, options)
73
- end
74
-
75
- # Deletes the specified key, resolving the promise when complete.
76
- def delete(key)
77
- @client.perform(:delete, key, 0)
78
- end
79
-
80
- # Appends a value to the specified key, resolving the promise when complete.
81
- # Appending only works for values stored with :raw => true.
82
- def append(key, value)
83
- value.then do |resolved_value|
84
- @client.perform(:append, key, resolved_value.to_s)
85
- end
86
- end
87
-
88
- # Prepend a value to the specified key, resolving the promise when complete.
89
- # Prepending only works for values stored with :raw => true.
90
- def prepend(key, value)
91
- value.then do |resolved_value|
92
- @client.perform(:prepend, key, resolved_value.to_s)
93
- end
94
- end
95
-
96
- ##
97
- # Incr adds the given amount to the counter on the memcached server.
98
- # Amt must be a positive integer value.
99
- #
100
- # If default is nil, the counter must already exist or the operation
101
- # will fail and will return nil. Otherwise this method will return
102
- # the new value for the counter.
103
- #
104
- # Note that the ttl will only apply if the counter does not already
105
- # exist. To increase an existing counter and update its TTL, use
106
- # #cas.
107
- def incr(key, amt = 1, ttl = nil, default = nil)
108
- raise ArgumentError, "Positive values only: #{amt}" if amt < 0
109
- @client.perform(:incr, key, amt.to_i, ttl_or_default(ttl), default)
110
- end
111
-
112
- ##
113
- # Decr subtracts the given amount from the counter on the memcached server.
114
- # Amt must be a positive integer value.
115
- #
116
- # memcached counters are unsigned and cannot hold negative values. Calling
117
- # decr on a counter which is 0 will just return 0.
118
- #
119
- # If default is nil, the counter must already exist or the operation
120
- # will fail and will return nil. Otherwise this method will return
121
- # the new value for the counter.
122
- #
123
- # Note that the ttl will only apply if the counter does not already
124
- # exist. To decrease an existing counter and update its TTL, use
125
- # #cas.
126
- def decr(key, amt = 1, ttl = nil, default = nil)
127
- raise ArgumentError, "Positive values only: #{amt}" if amt < 0
128
- @client.perform(:decr, key, amt.to_i, ttl_or_default(ttl), default)
129
- end
130
-
131
- # TODO: touch, gat, CAS operations
132
-
133
- private
134
-
135
- def ttl_or_default(ttl)
136
- (ttl || @options[:expires_in]).to_i
137
- rescue NoMethodError
138
- raise ArgumentError, "Cannot convert ttl (#{ttl}) to an integer"
139
- end
140
- end
141
- end
142
- end
@@ -1,46 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module IOPromise
4
- module Dalli
5
- class DalliExecutorPool < IOPromise::ExecutorPool::Base
6
- def initialize(*)
7
- super
8
-
9
- @iop_monitor = nil
10
- end
11
-
12
- def dalli_server
13
- @connection_pool
14
- end
15
-
16
- def execute_continue
17
- dalli_server.execute_continue
18
- end
19
-
20
- def connected_socket(sock)
21
- close_socket
22
-
23
- @iop_monitor = ::IOPromise::ExecutorContext.current.register_observer_io(self, sock, :r)
24
- end
25
-
26
- def close_socket
27
- unless @iop_monitor.nil?
28
- @iop_monitor.close
29
- @iop_monitor = nil
30
- end
31
- end
32
-
33
- def monitor_ready(monitor, readiness)
34
- dalli_server.async_io_ready(monitor.readable?, monitor.writable?)
35
- end
36
-
37
- def set_interest(direction, interested)
38
- if interested
39
- @iop_monitor.add_interest(direction)
40
- else
41
- @iop_monitor.remove_interest(direction)
42
- end
43
- end
44
- end
45
- end
46
- end
@@ -1,353 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'dalli'
4
- require_relative 'response'
5
-
6
- module IOPromise
7
- module Dalli
8
- module AsyncClient
9
- def initialize(servers = nil, options = {})
10
- @async = options[:iopromise_async] == true
11
-
12
- super
13
- end
14
-
15
- def perform(*)
16
- return super unless @async
17
-
18
- begin
19
- super
20
- rescue => ex
21
- # Wrap any connection errors into a promise, this is more forwards-compatible
22
- # if we ever attempt to make connecting/server fallback nonblocking too.
23
- Promise.new.tap { |p| p.reject(ex) }
24
- end
25
- end
26
- end
27
-
28
- module AsyncServer
29
- def initialize(attribs, options = {})
30
- @async = options.delete(:iopromise_async) == true
31
-
32
- if @async
33
- @write_buffer = +""
34
- @read_buffer = +""
35
- async_reset
36
-
37
- @next_opaque_id = 0
38
- @pending_ops = {}
39
-
40
- @executor_pool = DalliExecutorPool.for(self)
41
- end
42
-
43
- super
44
- end
45
-
46
- def async?
47
- @async
48
- end
49
-
50
- def close
51
- if async?
52
- async_reset
53
- end
54
-
55
- super
56
- end
57
-
58
- def connect
59
- super
60
-
61
- if async?
62
- @executor_pool.connected_socket(@sock)
63
- end
64
- end
65
-
66
- def async_reset
67
- @write_buffer.clear
68
- @write_offset = 0
69
-
70
- @read_buffer.clear
71
- @read_offset = 0
72
-
73
- @executor_pool.close_socket if defined? @executor_pool
74
- end
75
-
76
- def async_io_ready(readable, writable)
77
- async_sock_write_nonblock if writable
78
- async_sock_read_nonblock if readable
79
- end
80
-
81
- # called by ExecutorPool to continue processing for this server
82
- def execute_continue
83
- timeout = @options[:socket_timeout]
84
- @pending_ops.select! do |key, op|
85
- if op.timeout?
86
- op.reject(Timeout::Error.new)
87
- next false # this op is done
88
- end
89
-
90
- # let all pending operations know that they are seeing the
91
- # select loop. this starts the timer for the operation, because
92
- # it guarantees we're now working on it.
93
- # this is more accurate than starting the timer when we buffer
94
- # the write.
95
- op.in_select_loop
96
-
97
- remaining = op.timeout_remaining
98
- timeout = remaining if remaining < timeout
99
-
100
- true # keep
101
- end
102
-
103
- @executor_pool.select_timeout = timeout
104
- @executor_pool.set_interest(:r, !@pending_ops.empty?)
105
- end
106
-
107
- private
108
-
109
- REQUEST = ::Dalli::Server::REQUEST
110
- OPCODES = ::Dalli::Server::OPCODES
111
- FORMAT = ::Dalli::Server::FORMAT
112
-
113
-
114
- def promised_request(key, &block)
115
- promise = ::IOPromise::Dalli::DalliPromise.new(self, key)
116
-
117
- new_id = @next_opaque_id
118
- @pending_ops[new_id] = promise
119
- @next_opaque_id = (@next_opaque_id + 1) & 0xffff_ffff
120
-
121
- async_buffered_write(block.call(new_id))
122
-
123
- promise
124
- end
125
-
126
- def get(key, options = nil)
127
- return super unless async?
128
-
129
- promised_request(key) do |opaque|
130
- [REQUEST, OPCODES[:get], key.bytesize, 0, 0, 0, key.bytesize, opaque, 0, key].pack(FORMAT[:get])
131
- end
132
- end
133
-
134
- def async_generic_write_op(op, key, value, ttl, cas, options)
135
- value.then do |value|
136
- (value, flags) = serialize(key, value, options)
137
- ttl = sanitize_ttl(ttl)
138
-
139
- guard_max_value_with_raise(key, value)
140
-
141
- promised_request(key) do |opaque|
142
- [REQUEST, OPCODES[op], key.bytesize, 8, 0, 0, value.bytesize + key.bytesize + 8, opaque, cas, flags, ttl, key, value].pack(FORMAT[op])
143
- end
144
- end
145
- end
146
-
147
- def set(key, value, ttl, cas, options)
148
- return super unless async?
149
- async_generic_write_op(:set, key, value, ttl, cas, options)
150
- end
151
-
152
- def add(key, value, ttl, options)
153
- return super unless async?
154
-
155
- async_generic_write_op(:add, key, value, ttl, 0, options)
156
- end
157
-
158
- def replace(key, value, ttl, cas, options)
159
- return super unless async?
160
-
161
- async_generic_write_op(:replace, key, value, ttl, cas, options)
162
- end
163
-
164
- def delete(key, cas)
165
- return super unless async?
166
-
167
- promised_request(key) do |opaque|
168
- [REQUEST, OPCODES[:delete], key.bytesize, 0, 0, 0, key.bytesize, opaque, cas, key].pack(FORMAT[:delete])
169
- end
170
- end
171
-
172
- def async_append_prepend_op(op, key, value)
173
- promised_request(key) do |opaque|
174
- [REQUEST, OPCODES[op], key.bytesize, 0, 0, 0, value.bytesize + key.bytesize, opaque, 0, key, value].pack(FORMAT[op])
175
- end
176
- end
177
-
178
- def append(key, value)
179
- return super unless async?
180
-
181
- async_append_prepend_op(:append, key, value)
182
- end
183
-
184
- def prepend(key, value)
185
- return super unless async?
186
-
187
- async_append_prepend_op(:prepend, key, value)
188
- end
189
-
190
- def flush
191
- return super unless async?
192
-
193
- promised_request(nil) do |opaque|
194
- [REQUEST, OPCODES[:flush], 0, 4, 0, 0, 4, opaque, 0, 0].pack(FORMAT[:flush])
195
- end
196
- end
197
-
198
- def async_decr_incr(opcode, key, count, ttl, default)
199
- expiry = default ? sanitize_ttl(ttl) : 0xFFFFFFFF
200
- default ||= 0
201
- (h, l) = split(count)
202
- (dh, dl) = split(default)
203
- promised_request(key) do |opaque|
204
- req = [REQUEST, OPCODES[opcode], key.bytesize, 20, 0, 0, key.bytesize + 20, opaque, 0, h, l, dh, dl, expiry, key].pack(FORMAT[opcode])
205
- end
206
- end
207
-
208
- def decr(key, count, ttl, default)
209
- return super unless async?
210
-
211
- async_decr_incr :decr, key, count, ttl, default
212
- end
213
-
214
- def incr(key, count, ttl, default)
215
- return super unless async?
216
-
217
- async_decr_incr :incr, key, count, ttl, default
218
- end
219
-
220
- def async_buffered_write(data)
221
- @write_buffer << data
222
- async_sock_write_nonblock
223
- end
224
-
225
- def async_sock_write_nonblock
226
- remaining = @write_buffer.byteslice(@write_offset, @write_buffer.length)
227
- begin
228
- bytes_written = @sock.write_nonblock(remaining, exception: false)
229
- rescue Errno::EINTR
230
- retry
231
- end
232
-
233
- return if bytes_written == :wait_writable
234
-
235
- @write_offset += bytes_written
236
- completed = (@write_offset == @write_buffer.length)
237
- if completed
238
- @write_buffer.clear
239
- @write_offset = 0
240
- end
241
- @executor_pool.set_interest(:w, !completed)
242
- rescue SystemCallError, Timeout::Error => e
243
- failure!(e)
244
- end
245
-
246
- FULL_HEADER = 'CCnCCnNNQ'
247
-
248
- def read_available
249
- loop do
250
- result = @sock.read_nonblock(8196, exception: false)
251
- if result == :wait_readable
252
- break
253
- elsif result == :wait_writable
254
- break
255
- elsif result
256
- @read_buffer << result
257
- else
258
- raise Errno::ECONNRESET, "Connection reset: #{safe_options.inspect}"
259
- end
260
- end
261
- end
262
-
263
- def async_sock_read_nonblock
264
- read_available
265
-
266
- buf = @read_buffer
267
- pos = @read_offset
268
-
269
- while buf.bytesize - pos >= 24
270
- header = buf.byteslice(pos, 24)
271
- (magic, opcode, key_length, extra_length, data_type, status, body_length, opaque, cas) = header.unpack(FULL_HEADER)
272
-
273
- if buf.bytesize - pos >= 24 + body_length
274
- exists = (status != 1) # Key not found
275
- this_pos = pos
276
-
277
- # key = buf.byteslice(this_pos + 24 + extra_length, key_length)
278
- value = buf.byteslice(this_pos + 24 + extra_length + key_length, body_length - key_length - extra_length) if exists
279
-
280
- pos = pos + 24 + body_length
281
-
282
- promise = @pending_ops.delete(opaque)
283
- next if promise.nil?
284
-
285
- begin
286
- raise Dalli::DalliError, "Response error #{status}: #{Dalli::RESPONSE_CODES[status]}" unless status == 0 || status == 1 || status == 2 || status == 5
287
-
288
- final_value = nil
289
- if opcode == OPCODES[:incr] || opcode == OPCODES[:decr]
290
- final_value = value.unpack1("Q>")
291
- elsif exists
292
- flags = if extra_length >= 4
293
- buf.byteslice(this_pos + 24, 4).unpack1("N")
294
- else
295
- 0
296
- end
297
- final_value = deserialize(value, flags)
298
- end
299
-
300
- response = ::IOPromise::Dalli::Response.new(
301
- key: promise.key,
302
- value: final_value,
303
- exists: exists,
304
- stored: !(status == 2 || status == 5), # Key exists or Item not stored
305
- cas: cas,
306
- )
307
-
308
- promise.fulfill(response)
309
- rescue => ex
310
- promise.reject(ex)
311
- end
312
- else
313
- # not enough data yet, wait for more
314
- break
315
- end
316
- end
317
-
318
- if pos == @read_buffer.length
319
- @read_buffer.clear
320
- @read_offset = 0
321
- else
322
- @read_offset = pos
323
- end
324
-
325
- rescue SystemCallError, Timeout::Error, EOFError => e
326
- failure!(e)
327
- end
328
-
329
- def failure!(ex)
330
- if async?
331
- # all pending operations need to be rejected when a failure occurs
332
- @pending_ops.each do |op|
333
- op.reject(ex)
334
- end
335
- @pending_ops = {}
336
- end
337
-
338
- super
339
- end
340
-
341
- # this is guard_max_value from the master version, rather than using the yield block.
342
- def guard_max_value_with_raise(key, value)
343
- return if value.bytesize <= @options[:value_max_bytes]
344
-
345
- message = "Value for #{key} over max size: #{@options[:value_max_bytes]} <= #{value.bytesize}"
346
- raise Dalli::ValueOverMaxSize, message
347
- end
348
- end
349
- end
350
- end
351
-
352
- ::Dalli::Server.prepend(IOPromise::Dalli::AsyncServer)
353
- ::Dalli::Client.prepend(IOPromise::Dalli::AsyncClient)
@@ -1,60 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require_relative 'executor_pool'
4
-
5
- module IOPromise
6
- module Dalli
7
- class DalliPromise < ::IOPromise::Base
8
- attr_reader :key
9
-
10
- def initialize(server = nil, key = nil)
11
- super()
12
-
13
- # when created from a 'then' call, initialize nothing
14
- return if server.nil? || key.nil?
15
-
16
- @server = server
17
- @key = key
18
- @start_time = nil
19
-
20
- ::IOPromise::ExecutorContext.current.register(self)
21
- end
22
-
23
- def wait
24
- unless defined?(@server)
25
- super
26
- else
27
- ::IOPromise::ExecutorContext.current.wait_for_all_data(end_when_complete: self)
28
- end
29
- end
30
-
31
- def execute_pool
32
- return @pool if defined?(@pool)
33
- if defined?(@server)
34
- @pool = DalliExecutorPool.for(@server)
35
- else
36
- @pool = nil
37
- end
38
- end
39
-
40
- def in_select_loop
41
- if @start_time.nil?
42
- @start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
43
- end
44
- end
45
-
46
- def timeout_remaining
47
- now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
48
- elapsed = now - @start_time
49
- remaining = @server.options[:socket_timeout] - elapsed
50
- return 0 if remaining < 0
51
- remaining
52
- end
53
-
54
- def timeout?
55
- return false if @start_time.nil?
56
- timeout_remaining <= 0
57
- end
58
- end
59
- end
60
- end
@@ -1,25 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module IOPromise
4
- module Dalli
5
- class Response
6
- attr_reader :key, :value, :cas
7
-
8
- def initialize(key:, value:, exists: false, stored: false, cas: nil)
9
- @key = key
10
- @value = value
11
- @exists = exists
12
- @stored = stored
13
- @cas = cas
14
- end
15
-
16
- def exist?
17
- @exists
18
- end
19
-
20
- def stored?
21
- @stored
22
- end
23
- end
24
- end
25
- end
@@ -1,13 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require_relative 'memcached/client'
4
-
5
- module IOPromise
6
- module Memcached
7
- class << self
8
- def new(*args, **kwargs)
9
- ::IOPromise::Memcached::Client.new(*args, **kwargs)
10
- end
11
- end
12
- end
13
- end
@@ -1,22 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require 'memcached'
4
- require_relative 'promise'
5
-
6
- module IOPromise
7
- module Memcached
8
- class Client
9
- def initialize(*args, **kwargs)
10
- if args.first.is_a?(::Memcached::Client)
11
- @client = args.first.clone
12
- else
13
- @client = ::Memcached::Client.new(*args, **kwargs)
14
- end
15
- end
16
-
17
- def get_as_promise(key)
18
- MemcachePromise.new(@client, key)
19
- end
20
- end
21
- end
22
- end
@@ -1,82 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module IOPromise
4
- module Memcached
5
- class MemcacheExecutorPool < ::IOPromise::ExecutorPool::Batch
6
- def initialize(*)
7
- super
8
-
9
- @monitors = {}
10
- end
11
-
12
- def next_batch
13
- super
14
-
15
- unless @current_batch.empty?
16
- @keys_to_promises = @current_batch.group_by { |promise| promise.key }
17
- @current_batch.each { |promise| begin_executing(promise) }
18
- begin
19
- memcache_client.begin_get_multi(@keys_to_promises.keys)
20
- rescue => e
21
- @keys_to_promises.values.flatten.each do |promise|
22
- promise.reject(e)
23
- @current_batch.delete(promise)
24
- end
25
-
26
- @keys_to_promises = nil
27
- end
28
- end
29
- end
30
-
31
- def execute_continue
32
- if @current_batch.empty?
33
- next_batch
34
- end
35
-
36
- if @current_batch.empty?
37
- @monitors.each do |_, monitor|
38
- monitor.interests = nil
39
- end
40
- return
41
- end
42
-
43
- so_far, readers, writers = memcache_client.continue_get_multi
44
-
45
- # when we're done (nothing to wait on), fill in any remaining keys with nil for completions to occur
46
- if readers.empty? && writers.empty?
47
- @keys_to_promises.each do |key, _|
48
- so_far[key] = nil unless so_far.include? key
49
- end
50
- end
51
-
52
- so_far.each do |key, value|
53
- next unless @keys_to_promises[key]
54
- @keys_to_promises[key].each do |promise|
55
- next if promise.fulfilled?
56
-
57
- promise.fulfill(value)
58
- @current_batch.delete(promise)
59
- end
60
- end
61
-
62
- @monitors.each do |_, monitor|
63
- monitor.interests = nil
64
- end
65
-
66
- readers.each do |reader|
67
- @monitors[reader] ||= ::IOPromise::ExecutorContext.current.register_observer_io(self, reader, :r)
68
- @monitors[reader].add_interest(:r)
69
- end
70
-
71
- writers.each do |writer|
72
- @monitors[writer] ||= ::IOPromise::ExecutorContext.current.register_observer_io(self, writer, :w)
73
- @monitors[writer].add_interest(:w)
74
- end
75
- end
76
-
77
- def memcache_client
78
- @connection_pool
79
- end
80
- end
81
- end
82
- end
@@ -1,32 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require_relative 'executor_pool'
4
-
5
- module IOPromise
6
- module Memcached
7
- class MemcachePromise < ::IOPromise::Base
8
- attr_reader :key
9
-
10
- def initialize(client = nil, key = nil)
11
- super()
12
-
13
- @client = client
14
- @key = key
15
-
16
- ::IOPromise::ExecutorContext.current.register(self) unless @client.nil? || @key.nil?
17
- end
18
-
19
- def wait
20
- if @client.nil? || @key.nil?
21
- super
22
- else
23
- ::IOPromise::ExecutorContext.current.wait_for_all_data(end_when_complete: self)
24
- end
25
- end
26
-
27
- def execute_pool
28
- MemcacheExecutorPool.for(@client)
29
- end
30
- end
31
- end
32
- end