iopromise 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/main.yml +21 -0
- data/.gitignore +11 -0
- data/.rspec +3 -0
- data/CODE_OF_CONDUCT.md +84 -0
- data/Gemfile +26 -0
- data/Gemfile.lock +191 -0
- data/LICENSE +21 -0
- data/LICENSE.txt +21 -0
- data/README.md +41 -0
- data/Rakefile +8 -0
- data/bin/console +15 -0
- data/bin/setup +9 -0
- data/iopromise.gemspec +30 -0
- data/lib/iopromise.rb +54 -0
- data/lib/iopromise/dalli.rb +13 -0
- data/lib/iopromise/dalli/client.rb +146 -0
- data/lib/iopromise/dalli/executor_pool.rb +13 -0
- data/lib/iopromise/dalli/patch_dalli.rb +337 -0
- data/lib/iopromise/dalli/promise.rb +52 -0
- data/lib/iopromise/dalli/response.rb +25 -0
- data/lib/iopromise/deferred.rb +13 -0
- data/lib/iopromise/deferred/executor_pool.rb +29 -0
- data/lib/iopromise/deferred/promise.rb +38 -0
- data/lib/iopromise/executor_context.rb +114 -0
- data/lib/iopromise/executor_pool/base.rb +47 -0
- data/lib/iopromise/executor_pool/batch.rb +23 -0
- data/lib/iopromise/executor_pool/sequential.rb +32 -0
- data/lib/iopromise/faraday.rb +17 -0
- data/lib/iopromise/faraday/connection.rb +25 -0
- data/lib/iopromise/faraday/continuable_hydra.rb +29 -0
- data/lib/iopromise/faraday/executor_pool.rb +19 -0
- data/lib/iopromise/faraday/multi_socket_action.rb +107 -0
- data/lib/iopromise/faraday/promise.rb +42 -0
- data/lib/iopromise/memcached.rb +13 -0
- data/lib/iopromise/memcached/client.rb +22 -0
- data/lib/iopromise/memcached/executor_pool.rb +61 -0
- data/lib/iopromise/memcached/promise.rb +32 -0
- data/lib/iopromise/rack/context_middleware.rb +20 -0
- data/lib/iopromise/version.rb +5 -0
- data/lib/iopromise/view_component.rb +9 -0
- data/lib/iopromise/view_component/data_loader.rb +62 -0
- metadata +101 -0
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'executor_pool'
|
4
|
+
|
5
|
+
module IOPromise
|
6
|
+
module Dalli
|
7
|
+
class DalliPromise < ::IOPromise::Base
|
8
|
+
attr_reader :key
|
9
|
+
|
10
|
+
def initialize(server = nil, key = nil)
|
11
|
+
super()
|
12
|
+
|
13
|
+
@server = server
|
14
|
+
@key = key
|
15
|
+
@start_time = nil
|
16
|
+
|
17
|
+
::IOPromise::ExecutorContext.current.register(self) unless @server.nil? || @key.nil?
|
18
|
+
end
|
19
|
+
|
20
|
+
def wait
|
21
|
+
if @server.nil? || @key.nil?
|
22
|
+
super
|
23
|
+
else
|
24
|
+
::IOPromise::ExecutorContext.current.wait_for_all_data(end_when_complete: self)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def execute_pool
|
29
|
+
DalliExecutorPool.for(@server)
|
30
|
+
end
|
31
|
+
|
32
|
+
def in_select_loop
|
33
|
+
if @start_time.nil?
|
34
|
+
@start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def timeout_remaining
|
39
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
40
|
+
elapsed = now - @start_time
|
41
|
+
remaining = @server.options[:socket_timeout] - elapsed
|
42
|
+
return 0 if remaining < 0
|
43
|
+
remaining
|
44
|
+
end
|
45
|
+
|
46
|
+
def timeout?
|
47
|
+
return false if @start_time.nil?
|
48
|
+
timeout_remaining <= 0
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module IOPromise
|
4
|
+
module Dalli
|
5
|
+
class Response
|
6
|
+
attr_reader :key, :value, :cas
|
7
|
+
|
8
|
+
def initialize(key:, value:, exists: false, stored: false, cas: nil)
|
9
|
+
@key = key
|
10
|
+
@value = value
|
11
|
+
@exists = exists
|
12
|
+
@stored = stored
|
13
|
+
@cas = cas
|
14
|
+
end
|
15
|
+
|
16
|
+
def exist?
|
17
|
+
@exists
|
18
|
+
end
|
19
|
+
|
20
|
+
def stored?
|
21
|
+
@stored
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module IOPromise
|
4
|
+
module Deferred
|
5
|
+
class DeferredExecutorPool < ::IOPromise::ExecutorPool::Batch
|
6
|
+
def execute_continue(ready_readers, ready_writers, ready_exceptions)
|
7
|
+
if @current_batch.empty?
|
8
|
+
next_batch
|
9
|
+
end
|
10
|
+
|
11
|
+
until @current_batch.empty?
|
12
|
+
# we are just running this in the sync cycle, in a blocking way.
|
13
|
+
@current_batch.each do |promise|
|
14
|
+
begin_executing(promise)
|
15
|
+
promise.run_deferred
|
16
|
+
complete(promise)
|
17
|
+
end
|
18
|
+
|
19
|
+
@current_batch = []
|
20
|
+
|
21
|
+
next_batch
|
22
|
+
end
|
23
|
+
|
24
|
+
# we always fully complete each cycle
|
25
|
+
return [[], [], [], nil]
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'executor_pool'
|
4
|
+
|
5
|
+
module IOPromise
|
6
|
+
module Deferred
|
7
|
+
class DeferredPromise < ::IOPromise::Base
|
8
|
+
def initialize(&block)
|
9
|
+
super()
|
10
|
+
|
11
|
+
@block = block
|
12
|
+
|
13
|
+
::IOPromise::ExecutorContext.current.register(self) unless @block.nil?
|
14
|
+
end
|
15
|
+
|
16
|
+
def wait
|
17
|
+
if @block.nil?
|
18
|
+
super
|
19
|
+
else
|
20
|
+
::IOPromise::ExecutorContext.current.wait_for_all_data(end_when_complete: self)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def execute_pool
|
25
|
+
DeferredExecutorPool.for(Thread.current)
|
26
|
+
end
|
27
|
+
|
28
|
+
def run_deferred
|
29
|
+
return if @block.nil?
|
30
|
+
begin
|
31
|
+
fulfill(@block.call)
|
32
|
+
rescue => exception
|
33
|
+
reject(exception)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,114 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'set'
|
4
|
+
|
5
|
+
module IOPromise
|
6
|
+
class ExecutorContext
|
7
|
+
class << self
|
8
|
+
def push
|
9
|
+
@contexts ||= []
|
10
|
+
@contexts << ExecutorContext.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def current
|
14
|
+
@contexts.last
|
15
|
+
end
|
16
|
+
|
17
|
+
def pop
|
18
|
+
@contexts.pop
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def initialize
|
23
|
+
@pools = Set.new
|
24
|
+
|
25
|
+
@pool_ready_readers = {}
|
26
|
+
@pool_ready_writers = {}
|
27
|
+
@pool_ready_exceptions = {}
|
28
|
+
|
29
|
+
@pending_registrations = []
|
30
|
+
end
|
31
|
+
|
32
|
+
def register(promise)
|
33
|
+
@pending_registrations << promise
|
34
|
+
end
|
35
|
+
|
36
|
+
def wait_for_all_data(end_when_complete: nil)
|
37
|
+
loop do
|
38
|
+
complete_pending_registrations
|
39
|
+
|
40
|
+
readers, writers, exceptions, wait_time = continue_to_read_pools
|
41
|
+
|
42
|
+
unless end_when_complete.nil?
|
43
|
+
return unless end_when_complete.pending?
|
44
|
+
end
|
45
|
+
|
46
|
+
break if readers.empty? && writers.empty? && exceptions.empty? && @pending_registrations.empty?
|
47
|
+
|
48
|
+
# if we have any pending promises to register, we'll not block at all so we immediately continue
|
49
|
+
wait_time = 0 unless @pending_registrations.empty?
|
50
|
+
|
51
|
+
# we could be clever and decide which ones to "continue" on next
|
52
|
+
ready = IO.select(readers.keys, writers.keys, exceptions.keys, wait_time)
|
53
|
+
ready = [[], [], []] if ready.nil?
|
54
|
+
ready_readers, ready_writers, ready_exceptions = ready
|
55
|
+
|
56
|
+
# group by the pool object that provided the fd
|
57
|
+
@pool_ready_readers = ready_readers.group_by { |i| readers[i] }
|
58
|
+
@pool_ready_writers = ready_writers.group_by { |i| writers[i] }
|
59
|
+
@pool_ready_exceptions = ready_exceptions.group_by { |i| exceptions[i] }
|
60
|
+
end
|
61
|
+
|
62
|
+
unless end_when_complete.nil?
|
63
|
+
raise ::IOPromise::Error.new('Internal error: IO loop completed without fulfilling the desired promise')
|
64
|
+
else
|
65
|
+
@pools.each do |pool|
|
66
|
+
pool.wait
|
67
|
+
end
|
68
|
+
end
|
69
|
+
ensure
|
70
|
+
complete_pending_registrations
|
71
|
+
end
|
72
|
+
|
73
|
+
private
|
74
|
+
|
75
|
+
def complete_pending_registrations
|
76
|
+
pending = @pending_registrations
|
77
|
+
@pending_registrations = []
|
78
|
+
pending.each do |promise|
|
79
|
+
register_now(promise)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def continue_to_read_pools
|
84
|
+
readers = {}
|
85
|
+
writers = {}
|
86
|
+
exceptions = {}
|
87
|
+
max_timeout = nil
|
88
|
+
|
89
|
+
@pools.each do |pool|
|
90
|
+
rd, wr, ex, ti = pool.execute_continue(@pool_ready_readers[pool], @pool_ready_writers[pool], @pool_ready_exceptions[pool])
|
91
|
+
rd.each do |io|
|
92
|
+
readers[io] = pool
|
93
|
+
end
|
94
|
+
wr.each do |io|
|
95
|
+
writers[io] = pool
|
96
|
+
end
|
97
|
+
ex.each do |io|
|
98
|
+
exceptions[io] = pool
|
99
|
+
end
|
100
|
+
if max_timeout.nil? || (!ti.nil? && ti < max_timeout)
|
101
|
+
max_timeout = ti
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
[readers, writers, exceptions, max_timeout]
|
106
|
+
end
|
107
|
+
|
108
|
+
def register_now(promise)
|
109
|
+
pool = promise.execute_pool
|
110
|
+
pool.register(promise)
|
111
|
+
@pools.add(pool)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module IOPromise
|
4
|
+
module ExecutorPool
|
5
|
+
class Base
|
6
|
+
class << self
|
7
|
+
def for(connection_pool)
|
8
|
+
@executors ||= {}
|
9
|
+
@executors[connection_pool] ||= new(connection_pool)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def initialize(connection_pool)
|
14
|
+
@connection_pool = connection_pool
|
15
|
+
@pending = []
|
16
|
+
end
|
17
|
+
|
18
|
+
def register(item)
|
19
|
+
@pending << item
|
20
|
+
end
|
21
|
+
|
22
|
+
def complete(item)
|
23
|
+
@pending.delete(item)
|
24
|
+
end
|
25
|
+
|
26
|
+
def begin_executing(item)
|
27
|
+
item.beginning
|
28
|
+
end
|
29
|
+
|
30
|
+
# Continue execution of one or more pending IOPromises assigned to this pool.
|
31
|
+
# Returns [readers, writers, exceptions, max_timeout], which are arrays of the
|
32
|
+
# readers, writers, and exceptions to select on. The timeout specifies the maximum
|
33
|
+
# time to block waiting for one of these IO objects to become ready, after which
|
34
|
+
# this function is called again with empty "ready" arguments.
|
35
|
+
# Must be implemented by subclasses.
|
36
|
+
def execute_continue(ready_readers, ready_writers, ready_exceptions)
|
37
|
+
raise NotImplementedError
|
38
|
+
end
|
39
|
+
|
40
|
+
def sync
|
41
|
+
@pending.each do |promise|
|
42
|
+
promise.sync if promise.is_a?(Promise)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module IOPromise
|
4
|
+
module ExecutorPool
|
5
|
+
class Batch < Base
|
6
|
+
def initialize(connection_pool)
|
7
|
+
super(connection_pool)
|
8
|
+
|
9
|
+
@current_batch = []
|
10
|
+
end
|
11
|
+
|
12
|
+
def next_batch
|
13
|
+
# ensure that all current items are fully completed
|
14
|
+
@current_batch.each do |promise|
|
15
|
+
promise.wait
|
16
|
+
end
|
17
|
+
|
18
|
+
# every pending operation becomes part of the current batch
|
19
|
+
@current_batch = @pending.dup
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module IOPromise
|
4
|
+
module ExecutorPool
|
5
|
+
class Sequential < Base
|
6
|
+
def execute_continue_item(item, ready_readers, ready_writers, ready_exceptions)
|
7
|
+
item.execute_continue(ready_readers, ready_writers, ready_exceptions)
|
8
|
+
end
|
9
|
+
|
10
|
+
def execute_continue(ready_readers, ready_writers, ready_exceptions)
|
11
|
+
@pending.dup.each do |active|
|
12
|
+
status = if active.fulfilled?
|
13
|
+
nil
|
14
|
+
else
|
15
|
+
execute_continue_item(active, ready_readers, ready_writers, ready_exceptions)
|
16
|
+
end
|
17
|
+
|
18
|
+
unless status.nil?
|
19
|
+
# once we're waiting on our one next item, we're done
|
20
|
+
return status
|
21
|
+
else
|
22
|
+
# we're done with this one, so remove it
|
23
|
+
complete(active)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
# if we fall through to here, we have nothing to wait on.
|
28
|
+
[[], [], [], nil]
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'faraday/connection'
|
4
|
+
|
5
|
+
module IOPromise
|
6
|
+
module Faraday
|
7
|
+
class << self
|
8
|
+
def new(url = nil, options = {}, &block)
|
9
|
+
options = ::Faraday.default_connection_options.merge(options)
|
10
|
+
::IOPromise::Faraday::Connection.new(url, options) do |faraday|
|
11
|
+
faraday.adapter :typhoeus
|
12
|
+
block.call unless block.nil?
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'faraday'
|
4
|
+
|
5
|
+
require_relative 'promise'
|
6
|
+
|
7
|
+
module IOPromise
|
8
|
+
module Faraday
|
9
|
+
class Connection < ::Faraday::Connection
|
10
|
+
def with_deferred_parallel
|
11
|
+
@parallel_manager = FaradayPromise.parallel_manager
|
12
|
+
yield
|
13
|
+
ensure
|
14
|
+
@parallel_manager = nil
|
15
|
+
end
|
16
|
+
|
17
|
+
def get_as_promise(*args, **kwargs)
|
18
|
+
@parallel_manager = FaradayPromise.parallel_manager
|
19
|
+
FaradayPromise.new(get(*args, **kwargs))
|
20
|
+
ensure
|
21
|
+
@parallel_manager = nil
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'typhoeus'
|
4
|
+
require_relative 'multi_socket_action'
|
5
|
+
|
6
|
+
module IOPromise
|
7
|
+
module Faraday
|
8
|
+
class ContinuableHydra < Typhoeus::Hydra
|
9
|
+
class << self
|
10
|
+
def for_current_thread
|
11
|
+
Thread.current[:faraday_promise_typhoeus_hydra] ||= new
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
def initialize(options = {})
|
16
|
+
super(options)
|
17
|
+
|
18
|
+
@multi = MultiSocketAction.new(options.reject{|k,_| k==:max_concurrency})
|
19
|
+
end
|
20
|
+
|
21
|
+
def execute_continue(ready_readers, ready_writers, ready_exceptions)
|
22
|
+
# fill up the curl easy handle as much as possible
|
23
|
+
dequeue_many
|
24
|
+
|
25
|
+
@multi.execute_continue(ready_readers, ready_writers, ready_exceptions)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|