sequel 5.38.0 → 5.43.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +54 -0
- data/MIT-LICENSE +1 -1
- data/README.rdoc +1 -1
- data/doc/cheat_sheet.rdoc +5 -5
- data/doc/code_order.rdoc +0 -12
- data/doc/fork_safety.rdoc +84 -0
- data/doc/postgresql.rdoc +1 -1
- data/doc/querying.rdoc +3 -3
- data/doc/release_notes/5.39.0.txt +19 -0
- data/doc/release_notes/5.40.0.txt +40 -0
- data/doc/release_notes/5.41.0.txt +25 -0
- data/doc/release_notes/5.42.0.txt +136 -0
- data/doc/release_notes/5.43.0.txt +98 -0
- data/doc/sql.rdoc +1 -1
- data/doc/testing.rdoc +2 -0
- data/lib/sequel/adapters/ado.rb +16 -16
- data/lib/sequel/adapters/jdbc.rb +2 -2
- data/lib/sequel/adapters/shared/mssql.rb +21 -1
- data/lib/sequel/adapters/shared/postgres.rb +5 -3
- data/lib/sequel/adapters/shared/sqlite.rb +35 -1
- data/lib/sequel/database/misc.rb +1 -2
- data/lib/sequel/database/schema_generator.rb +16 -1
- data/lib/sequel/database/schema_methods.rb +19 -5
- data/lib/sequel/database/transactions.rb +1 -1
- data/lib/sequel/dataset/features.rb +10 -0
- data/lib/sequel/dataset/prepared_statements.rb +2 -0
- data/lib/sequel/dataset/sql.rb +32 -10
- data/lib/sequel/extensions/async_thread_pool.rb +438 -0
- data/lib/sequel/extensions/blank.rb +8 -0
- data/lib/sequel/extensions/date_arithmetic.rb +7 -9
- data/lib/sequel/extensions/eval_inspect.rb +2 -0
- data/lib/sequel/extensions/inflector.rb +8 -0
- data/lib/sequel/extensions/migration.rb +2 -0
- data/lib/sequel/extensions/named_timezones.rb +5 -1
- data/lib/sequel/extensions/pg_array.rb +1 -0
- data/lib/sequel/extensions/pg_interval.rb +34 -8
- data/lib/sequel/extensions/pg_row.rb +1 -0
- data/lib/sequel/extensions/query.rb +2 -0
- data/lib/sequel/model/associations.rb +28 -4
- data/lib/sequel/model/base.rb +23 -6
- data/lib/sequel/model/plugins.rb +5 -0
- data/lib/sequel/plugins/association_proxies.rb +2 -0
- data/lib/sequel/plugins/async_thread_pool.rb +39 -0
- data/lib/sequel/plugins/auto_validations.rb +15 -1
- data/lib/sequel/plugins/column_encryption.rb +711 -0
- data/lib/sequel/plugins/composition.rb +7 -2
- data/lib/sequel/plugins/constraint_validations.rb +2 -1
- data/lib/sequel/plugins/dataset_associations.rb +4 -1
- data/lib/sequel/plugins/json_serializer.rb +37 -22
- data/lib/sequel/plugins/nested_attributes.rb +8 -3
- data/lib/sequel/plugins/pg_array_associations.rb +4 -0
- data/lib/sequel/plugins/pg_auto_constraint_validations.rb +2 -0
- data/lib/sequel/plugins/serialization.rb +8 -3
- data/lib/sequel/plugins/serialization_modification_detection.rb +1 -1
- data/lib/sequel/plugins/single_table_inheritance.rb +2 -0
- data/lib/sequel/plugins/tree.rb +9 -4
- data/lib/sequel/plugins/validation_helpers.rb +6 -2
- data/lib/sequel/timezones.rb +8 -3
- data/lib/sequel/version.rb +1 -1
- metadata +36 -21
@@ -82,7 +82,7 @@ module Sequel
|
|
82
82
|
# :server :: The server/shard the transaction is being executed on.
|
83
83
|
def rollback_on_exit(opts=OPTS)
|
84
84
|
synchronize(opts[:server]) do |conn|
|
85
|
-
raise Error, "Cannot call Sequel:: Database#rollback_on_exit
|
85
|
+
raise Error, "Cannot call Sequel:: Database#rollback_on_exit unless inside a transaction" unless h = _trans(conn)
|
86
86
|
rollback = !opts[:cancel]
|
87
87
|
|
88
88
|
if supports_savepoints?
|
@@ -51,6 +51,11 @@ module Sequel
|
|
51
51
|
false
|
52
52
|
end
|
53
53
|
|
54
|
+
# Whether deleting from joined datasets is supported, false by default.
|
55
|
+
def supports_deleting_joins?
|
56
|
+
supports_modifying_joins?
|
57
|
+
end
|
58
|
+
|
54
59
|
# Whether the database supports derived column lists (e.g.
|
55
60
|
# "table_expr AS table_alias(column_alias1, column_alias2, ...)"), true by
|
56
61
|
# default.
|
@@ -178,6 +183,11 @@ module Sequel
|
|
178
183
|
true
|
179
184
|
end
|
180
185
|
|
186
|
+
# Whether updating joined datasets is supported, false by default.
|
187
|
+
def supports_updating_joins?
|
188
|
+
supports_modifying_joins?
|
189
|
+
end
|
190
|
+
|
181
191
|
# Whether the dataset supports the WINDOW clause to define windows used by multiple
|
182
192
|
# window functions, false by default.
|
183
193
|
def supports_window_clause?
|
@@ -201,7 +201,9 @@ module Sequel
|
|
201
201
|
when :insert_pk
|
202
202
|
fetch_rows(prepared_sql){|r| return r.values.first}
|
203
203
|
when Array
|
204
|
+
# :nocov:
|
204
205
|
case prepared_type[0]
|
206
|
+
# :nocov:
|
205
207
|
when :map, :as_hash, :to_hash, :to_hash_groups
|
206
208
|
public_send(*prepared_type, &block)
|
207
209
|
end
|
data/lib/sequel/dataset/sql.rb
CHANGED
@@ -22,7 +22,7 @@ module Sequel
|
|
22
22
|
def insert_sql(*values)
|
23
23
|
return static_sql(@opts[:sql]) if @opts[:sql]
|
24
24
|
|
25
|
-
|
25
|
+
check_insert_allowed!
|
26
26
|
|
27
27
|
columns = []
|
28
28
|
|
@@ -172,7 +172,7 @@ module Sequel
|
|
172
172
|
# than one table.
|
173
173
|
def update_sql(values = OPTS)
|
174
174
|
return static_sql(opts[:sql]) if opts[:sql]
|
175
|
-
|
175
|
+
check_update_allowed!
|
176
176
|
check_not_limited!(:update)
|
177
177
|
|
178
178
|
case values
|
@@ -215,7 +215,7 @@ module Sequel
|
|
215
215
|
lines << "def #{'_' if priv}#{type}_sql"
|
216
216
|
lines << 'if sql = opts[:sql]; return static_sql(sql) end' unless priv
|
217
217
|
lines << "if sql = cache_get(:_#{type}_sql); return sql end" if cacheable
|
218
|
-
lines << '
|
218
|
+
lines << 'check_delete_allowed!' << 'check_not_limited!(:delete)' if type == :delete
|
219
219
|
lines << 'sql = @opts[:append_sql] || sql_string_origin'
|
220
220
|
|
221
221
|
if clauses.all?{|c| c.is_a?(Array)}
|
@@ -918,10 +918,35 @@ module Sequel
|
|
918
918
|
!@opts[:no_cache_sql] && !cache_get(:_no_cache_sql)
|
919
919
|
end
|
920
920
|
|
921
|
-
# Raise an InvalidOperation exception if
|
921
|
+
# Raise an InvalidOperation exception if modification is not allowed for this dataset.
|
922
|
+
# Check whether it is allowed to insert into this dataset.
|
923
|
+
# Only for backwards compatibility with older external adapters.
|
922
924
|
def check_modification_allowed!
|
925
|
+
# SEQUEL6: Remove
|
926
|
+
Sequel::Deprecation.deprecate("Dataset#check_modification_allowed!", "Use check_{insert,delete,update,truncation}_allowed! instead")
|
927
|
+
_check_modification_allowed!(supports_modifying_joins?)
|
928
|
+
end
|
929
|
+
|
930
|
+
# Check whether it is allowed to insert into this dataset.
|
931
|
+
def check_insert_allowed!
|
932
|
+
_check_modification_allowed!(false)
|
933
|
+
end
|
934
|
+
alias check_truncation_allowed! check_insert_allowed!
|
935
|
+
|
936
|
+
# Check whether it is allowed to delete from this dataset.
|
937
|
+
def check_delete_allowed!
|
938
|
+
_check_modification_allowed!(supports_deleting_joins?)
|
939
|
+
end
|
940
|
+
|
941
|
+
# Check whether it is allowed to update this dataset.
|
942
|
+
def check_update_allowed!
|
943
|
+
_check_modification_allowed!(supports_updating_joins?)
|
944
|
+
end
|
945
|
+
|
946
|
+
# Internals of the check_*_allowed! methods
|
947
|
+
def _check_modification_allowed!(modifying_joins_supported)
|
923
948
|
raise(InvalidOperation, "Grouped datasets cannot be modified") if opts[:group]
|
924
|
-
raise(InvalidOperation, "Joined datasets cannot be modified") if !
|
949
|
+
raise(InvalidOperation, "Joined datasets cannot be modified") if !modifying_joins_supported && joined_dataset?
|
925
950
|
end
|
926
951
|
|
927
952
|
# Raise error if the dataset uses limits or offsets.
|
@@ -930,11 +955,6 @@ module Sequel
|
|
930
955
|
raise InvalidOperation, "Dataset##{type} not supported on datasets with limits or offsets" if opts[:limit] || opts[:offset]
|
931
956
|
end
|
932
957
|
|
933
|
-
# Alias of check_modification_allowed!
|
934
|
-
def check_truncation_allowed!
|
935
|
-
check_modification_allowed!
|
936
|
-
end
|
937
|
-
|
938
958
|
# Append column list to SQL string.
|
939
959
|
# If the column list is empty, a wildcard (*) is appended.
|
940
960
|
def column_list_append(sql, columns)
|
@@ -971,7 +991,9 @@ module Sequel
|
|
971
991
|
# operators unsupported by some databases. Used by adapters for databases
|
972
992
|
# that don't support the operators natively.
|
973
993
|
def complex_expression_emulate_append(sql, op, args)
|
994
|
+
# :nocov:
|
974
995
|
case op
|
996
|
+
# :nocov:
|
975
997
|
when :%
|
976
998
|
complex_expression_arg_pairs_append(sql, args){|a, b| Sequel.function(:MOD, a, b)}
|
977
999
|
when :>>
|
@@ -0,0 +1,438 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
#
|
3
|
+
# The async_thread_pool extension adds support for running database
|
4
|
+
# queries in a separate threads using a thread pool. With the following
|
5
|
+
# code
|
6
|
+
#
|
7
|
+
# DB.extension :async_thread_pool
|
8
|
+
# foos = DB[:foos].async.where{:name=>'A'..'M'}.all
|
9
|
+
# bar_names = DB[:bar].async.select_order_map(:name)
|
10
|
+
# baz_1 = DB[:bazes].async.first(:id=>1)
|
11
|
+
#
|
12
|
+
# All 3 queries will be run in separate threads. +foos+, +bar_names+
|
13
|
+
# and +baz_1+ will be proxy objects. Calling a method on the proxy
|
14
|
+
# object will wait for the query to be run, and will return the result
|
15
|
+
# of calling that method on the result of the query method. For example,
|
16
|
+
# if you run:
|
17
|
+
#
|
18
|
+
# foos = DB[:foos].async.where{:name=>'A'..'M'}.all
|
19
|
+
# bar_names = DB[:bars].async.select_order_map(:name)
|
20
|
+
# baz_1 = DB[:bazes].async.first(:id=>1)
|
21
|
+
# sleep(1)
|
22
|
+
# foos.size
|
23
|
+
# bar_names.first
|
24
|
+
# baz_1.name
|
25
|
+
#
|
26
|
+
# These three queries will generally be run concurrently in separate
|
27
|
+
# threads. If you instead run:
|
28
|
+
#
|
29
|
+
# DB[:foos].async.where{:name=>'A'..'M'}.all.size
|
30
|
+
# DB[:bars].async.select_order_map(:name).first
|
31
|
+
# DB[:bazes].async.first(:id=>1).name
|
32
|
+
#
|
33
|
+
# Then will run each query sequentially, since you need the result of
|
34
|
+
# one query before running the next query. The queries will still be
|
35
|
+
# run in separate threads (by default).
|
36
|
+
#
|
37
|
+
# What is run in the separate thread is the entire method call that
|
38
|
+
# returns results. So with the original example:
|
39
|
+
#
|
40
|
+
# foos = DB[:foos].async.where{:name=>'A'..'M'}.all
|
41
|
+
# bar_names = DB[:bars].async.select_order_map(:name)
|
42
|
+
# baz_1 = DB[:bazes].async.first(:id=>1)
|
43
|
+
#
|
44
|
+
# The +all+, <tt>select_order_map(:name)</tt>, and <tt>first(:id=>1)</tt>
|
45
|
+
# calls are run in separate threads. If a block is passed to a method
|
46
|
+
# such as +all+ or +each+, the block is also run in that thread. If you
|
47
|
+
# have code such as:
|
48
|
+
#
|
49
|
+
# h = {}
|
50
|
+
# DB[:foos].async.each{|row| h[row[:id]] = row}
|
51
|
+
# bar_names = DB[:bars].async.select_order_map(:name)
|
52
|
+
# p h
|
53
|
+
#
|
54
|
+
# You may end up with it printing an empty hash or partial hash, because the
|
55
|
+
# async +each+ call will not have run or finished running. Since the
|
56
|
+
# <tt>p h</tt> code relies on a side-effect of the +each+ block and not the
|
57
|
+
# return value of the +each+ call, it will not wait for the loading.
|
58
|
+
#
|
59
|
+
# You should avoid using +async+ for any queries where you are ignoring the
|
60
|
+
# return value, as otherwise you have no way to wait for the query to be run.
|
61
|
+
#
|
62
|
+
# Datasets that use async will use async threads to load data for the majority
|
63
|
+
# of methods that can return data. However, dataset methods that return
|
64
|
+
# enumerators will not use an async thread (e.g. calling # Dataset#map
|
65
|
+
# without a block or arguments does not use an async thread or return a
|
66
|
+
# proxy object).
|
67
|
+
#
|
68
|
+
# Because async methods (including their blocks) run in a separate thread, you
|
69
|
+
# should not use control flow modifiers such as +return+ or +break+ in async
|
70
|
+
# queries. Doing so will result in a error.
|
71
|
+
#
|
72
|
+
# Because async results are returned as proxy objects, it's a bad idea
|
73
|
+
# to use them in a boolean setting:
|
74
|
+
#
|
75
|
+
# result = DB[:foo].async.get(:boolean_column)
|
76
|
+
# # or:
|
77
|
+
# result = DB[:foo].async.first
|
78
|
+
#
|
79
|
+
# # ...
|
80
|
+
# if result
|
81
|
+
# # will always execute this banch, since result is a proxy object
|
82
|
+
# end
|
83
|
+
#
|
84
|
+
# In this case, you can call the +__value+ method to return the actual
|
85
|
+
# result:
|
86
|
+
#
|
87
|
+
# if result.__value
|
88
|
+
# # will not execute this branch if the dataset method returned nil or false
|
89
|
+
# end
|
90
|
+
#
|
91
|
+
# Similarly, because a proxy object is used, you should be careful using the
|
92
|
+
# result in a case statement or an argument to <tt>Class#===</tt>:
|
93
|
+
#
|
94
|
+
# # ...
|
95
|
+
# case result
|
96
|
+
# when Hash, true, false
|
97
|
+
# # will never take this branch, since result is a proxy object
|
98
|
+
# end
|
99
|
+
#
|
100
|
+
# Similar to usage in an +if+ statement, you should use +__value+:
|
101
|
+
#
|
102
|
+
# case result.__value
|
103
|
+
# when Hash, true, false
|
104
|
+
# # will never take this branch, since result is a proxy object
|
105
|
+
# end
|
106
|
+
#
|
107
|
+
# On Ruby 2.2+, you can use +itself+ instead of +__value+. It's preferable to
|
108
|
+
# use +itself+ if you can, as that will allow code to work with both proxy
|
109
|
+
# objects and regular objects.
|
110
|
+
#
|
111
|
+
# Because separate threads and connections are used for async queries,
|
112
|
+
# they do not use any state on the current connection/thread. So if
|
113
|
+
# you do:
|
114
|
+
#
|
115
|
+
# DB.transaction{DB[:table].async.all}
|
116
|
+
#
|
117
|
+
# Be aware that the transaction runs on one connection, and the SELECT
|
118
|
+
# query on a different connection. If you use currently using
|
119
|
+
# transactional testing (running each test inside a transaction/savepoint),
|
120
|
+
# and want to start using this extension, you should first switch to
|
121
|
+
# non-transactional testing of the code that will use the async thread
|
122
|
+
# pool before using this extension, as otherwise the use of
|
123
|
+
# <tt>Dataset#async</tt> will likely break your tests.
|
124
|
+
#
|
125
|
+
# If you are using Database#synchronize to checkout a connection, the
|
126
|
+
# same issue applies, where the async query runs on a different
|
127
|
+
# connection:
|
128
|
+
#
|
129
|
+
# DB.synchronize{DB[:table].async.all}
|
130
|
+
#
|
131
|
+
# Similarly, if you are using the server_block extension, any async
|
132
|
+
# queries inside with_server blocks will not use the server specified:
|
133
|
+
#
|
134
|
+
# DB.with_server(:shard1) do
|
135
|
+
# DB[:a].all # Uses shard1
|
136
|
+
# DB[:a].async.all # Uses default shard
|
137
|
+
# end
|
138
|
+
#
|
139
|
+
# You need to manually specify the shard for any dataset using an async
|
140
|
+
# query:
|
141
|
+
#
|
142
|
+
# DB.with_server(:shard1) do
|
143
|
+
# DB[:a].all # Uses shard1
|
144
|
+
# DB[:a].async.server(:shard1).all # Uses shard1
|
145
|
+
# end
|
146
|
+
#
|
147
|
+
# When the async_thread_pool extension, the size of the async thread pool
|
148
|
+
# can be set by using the +:num_async_threads+ Database option, which must
|
149
|
+
# be set before loading the async_thread_pool extension. This defaults
|
150
|
+
# to the size of the Database object's connection pool.
|
151
|
+
#
|
152
|
+
# By default, for consistent behavior, the async_thread_pool extension
|
153
|
+
# will always run the query in a separate thread. However, in some cases,
|
154
|
+
# such as when the async thread pool is busy and the results of a query
|
155
|
+
# are needed right away, it can improve performance to allow preemption,
|
156
|
+
# so that the query will run in the current thread instead of waiting
|
157
|
+
# for an async thread to become available. With the following code:
|
158
|
+
#
|
159
|
+
# foos = DB[:foos].async.where{:name=>'A'..'M'}.all
|
160
|
+
# bar_names = DB[:bar].async.select_order_map(:name)
|
161
|
+
# if foos.length > 4
|
162
|
+
# baz_1 = DB[:bazes].async.first(:id=>1)
|
163
|
+
# end
|
164
|
+
#
|
165
|
+
# Whether you need the +baz_1+ variable depends on the value of foos.
|
166
|
+
# If the async thread pool is busy, and by the time the +foos.length+
|
167
|
+
# call is made, the async thread pool has not started the processing
|
168
|
+
# to get the +foos+ value, it can improve performance to start that
|
169
|
+
# processing in the current thread, since it is needed immediately to
|
170
|
+
# determine whether to schedule query to get the +baz_1+ variable.
|
171
|
+
# The default is to not allow preemption, because if the current
|
172
|
+
# thread is used, it may have already checked out a connection that
|
173
|
+
# could be used, and that connection could be inside a transaction or
|
174
|
+
# have some other manner of connection-specific state applied to it.
|
175
|
+
# If you want to allow preemption, you can set the
|
176
|
+
# +:preempt_async_thread+ Database option before loading the
|
177
|
+
# async_thread_pool extension.
|
178
|
+
#
|
179
|
+
# Related module: Sequel::Database::AsyncThreadPool::DatasetMethods
|
180
|
+
|
181
|
+
|
182
|
+
#
|
183
|
+
module Sequel
|
184
|
+
module Database::AsyncThreadPool
|
185
|
+
# JobProcessor is a wrapper around a single thread, that will
|
186
|
+
# process a queue of jobs until it is shut down.
|
187
|
+
class JobProcessor # :nodoc:
|
188
|
+
def self.create_finalizer(queue, pool)
|
189
|
+
proc{run_finalizer(queue, pool)}
|
190
|
+
end
|
191
|
+
|
192
|
+
def self.run_finalizer(queue, pool)
|
193
|
+
# Push a nil for each thread using the queue, signalling
|
194
|
+
# that thread to close.
|
195
|
+
pool.each{queue.push(nil)}
|
196
|
+
|
197
|
+
# Join each of the closed threads.
|
198
|
+
pool.each(&:join)
|
199
|
+
|
200
|
+
# Clear the thread pool. Probably not necessary, but this allows
|
201
|
+
# for a simple way to check whether this finalizer has been run.
|
202
|
+
pool.clear
|
203
|
+
|
204
|
+
nil
|
205
|
+
end
|
206
|
+
private_class_method :run_finalizer
|
207
|
+
|
208
|
+
def initialize(queue)
|
209
|
+
@thread = ::Thread.new do
|
210
|
+
while proxy = queue.pop
|
211
|
+
proxy.__send__(:__run)
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
# Join the thread, should only be called by the related finalizer.
|
217
|
+
def join
|
218
|
+
@thread.join
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
# Wrapper for exception instances raised by async jobs. The
|
223
|
+
# wrapped exception will be raised by the code getting the value
|
224
|
+
# of the job.
|
225
|
+
WrappedException = Struct.new(:exception)
|
226
|
+
|
227
|
+
# Base proxy object class for jobs processed by async threads and
|
228
|
+
# the returned result.
|
229
|
+
class BaseProxy < BasicObject
|
230
|
+
# Store a block that returns the result when called.
|
231
|
+
def initialize(&block)
|
232
|
+
::Kernel.raise Error, "must provide block for an async job" unless block
|
233
|
+
@block = block
|
234
|
+
end
|
235
|
+
|
236
|
+
# Pass all method calls to the returned result.
|
237
|
+
def method_missing(*args, &block)
|
238
|
+
__value.public_send(*args, &block)
|
239
|
+
end
|
240
|
+
# :nocov:
|
241
|
+
ruby2_keywords(:method_missing) if respond_to?(:ruby2_keywords, true)
|
242
|
+
# :nocov:
|
243
|
+
|
244
|
+
# Delegate respond_to? calls to the returned result.
|
245
|
+
def respond_to_missing?(*args)
|
246
|
+
__value.respond_to?(*args)
|
247
|
+
end
|
248
|
+
|
249
|
+
# Override some methods defined by default so they apply to the
|
250
|
+
# returned result and not the current object.
|
251
|
+
[:!, :==, :!=, :instance_eval, :instance_exec].each do |method|
|
252
|
+
define_method(method) do |*args, &block|
|
253
|
+
__value.public_send(method, *args, &block)
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
# Wait for the value to be loaded if it hasn't already been loaded.
|
258
|
+
# If the code to load the return value raised an exception that was
|
259
|
+
# wrapped, reraise the exception.
|
260
|
+
def __value
|
261
|
+
unless defined?(@value)
|
262
|
+
__get_value
|
263
|
+
end
|
264
|
+
|
265
|
+
if @value.is_a?(WrappedException)
|
266
|
+
::Kernel.raise @value
|
267
|
+
end
|
268
|
+
|
269
|
+
@value
|
270
|
+
end
|
271
|
+
|
272
|
+
private
|
273
|
+
|
274
|
+
# Run the block and return the block value. If the block call raises
|
275
|
+
# an exception, wrap the exception.
|
276
|
+
def __run_block
|
277
|
+
# This may not catch concurrent calls (unless surrounded by a mutex), but
|
278
|
+
# it's not worth trying to protect against that. It's enough to just check for
|
279
|
+
# multiple non-concurrent calls.
|
280
|
+
::Kernel.raise Error, "Cannot run async block multiple times" unless block = @block
|
281
|
+
|
282
|
+
@block = nil
|
283
|
+
|
284
|
+
begin
|
285
|
+
block.call
|
286
|
+
rescue ::Exception => e
|
287
|
+
WrappedException.new(e)
|
288
|
+
end
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
# Default object class for async job/proxy result. This uses a queue for
|
293
|
+
# synchronization. The JobProcessor will push a result until the queue,
|
294
|
+
# and the code to get the value will pop the result from that queue (and
|
295
|
+
# repush the result to handle thread safety).
|
296
|
+
class Proxy < BaseProxy
|
297
|
+
def initialize
|
298
|
+
super
|
299
|
+
@queue = ::Queue.new
|
300
|
+
end
|
301
|
+
|
302
|
+
private
|
303
|
+
|
304
|
+
def __run
|
305
|
+
@queue.push(__run_block)
|
306
|
+
end
|
307
|
+
|
308
|
+
def __get_value
|
309
|
+
@value = @queue.pop
|
310
|
+
|
311
|
+
# Handle thread-safety by repushing the popped value, so that
|
312
|
+
# concurrent calls will receive the same value
|
313
|
+
@queue.push(@value)
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
# Object class for async job/proxy result when the :preempt_async_thread
|
318
|
+
# Database option is used. Uses a mutex for synchronization, and either
|
319
|
+
# the JobProcessor or the calling thread can run code to get the value.
|
320
|
+
class PreemptableProxy < BaseProxy
|
321
|
+
def initialize
|
322
|
+
super
|
323
|
+
@mutex = ::Mutex.new
|
324
|
+
end
|
325
|
+
|
326
|
+
private
|
327
|
+
|
328
|
+
def __get_value
|
329
|
+
@mutex.synchronize do
|
330
|
+
unless defined?(@value)
|
331
|
+
@value = __run_block
|
332
|
+
end
|
333
|
+
end
|
334
|
+
end
|
335
|
+
alias __run __get_value
|
336
|
+
end
|
337
|
+
|
338
|
+
module DatabaseMethods
|
339
|
+
def self.extended(db)
|
340
|
+
db.instance_exec do
|
341
|
+
unless pool.pool_type == :threaded || pool.pool_type == :sharded_threaded
|
342
|
+
raise Error, "can only load async_thread_pool extension if using threaded or sharded_threaded connection pool"
|
343
|
+
end
|
344
|
+
|
345
|
+
num_async_threads = opts[:num_async_threads] ? typecast_value_integer(opts[:num_async_threads]) : (Integer(opts[:max_connections] || 4))
|
346
|
+
raise Error, "must have positive number for num_async_threads" if num_async_threads <= 0
|
347
|
+
|
348
|
+
proxy_klass = typecast_value_boolean(opts[:preempt_async_thread]) ? PreemptableProxy : Proxy
|
349
|
+
define_singleton_method(:async_job_class){proxy_klass}
|
350
|
+
|
351
|
+
queue = @async_thread_queue = Queue.new
|
352
|
+
pool = @async_thread_pool = num_async_threads.times.map{JobProcessor.new(queue)}
|
353
|
+
ObjectSpace.define_finalizer(db, JobProcessor.create_finalizer(queue, pool))
|
354
|
+
|
355
|
+
extend_datasets(DatasetMethods)
|
356
|
+
end
|
357
|
+
end
|
358
|
+
|
359
|
+
private
|
360
|
+
|
361
|
+
# Wrap the block in a job/proxy object and schedule it to run using the async thread pool.
|
362
|
+
def async_run(&block)
|
363
|
+
proxy = async_job_class.new(&block)
|
364
|
+
@async_thread_queue.push(proxy)
|
365
|
+
proxy
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
369
|
+
ASYNC_METHODS = ([:all?, :any?, :drop, :entries, :grep_v, :include?, :inject, :member?, :minmax, :none?, :one?, :reduce, :sort, :take, :tally, :to_a, :to_h, :uniq, :zip] & Enumerable.instance_methods) + (Dataset::ACTION_METHODS - [:map, :paged_each])
|
370
|
+
ASYNC_BLOCK_METHODS = ([:collect, :collect_concat, :detect, :drop_while, :each_cons, :each_entry, :each_slice, :each_with_index, :each_with_object, :filter_map, :find, :find_all, :find_index, :flat_map, :max_by, :min_by, :minmax_by, :partition, :reject, :reverse_each, :sort_by, :take_while] & Enumerable.instance_methods) + [:paged_each]
|
371
|
+
ASYNC_ARGS_OR_BLOCK_METHODS = [:map]
|
372
|
+
|
373
|
+
module DatasetMethods
|
374
|
+
# Define an method in the given module that will run the given method using an async thread
|
375
|
+
# if the current dataset is async.
|
376
|
+
def self.define_async_method(mod, method)
|
377
|
+
mod.send(:define_method, method) do |*args, &block|
|
378
|
+
if @opts[:async]
|
379
|
+
ds = sync
|
380
|
+
db.send(:async_run){ds.send(method, *args, &block)}
|
381
|
+
else
|
382
|
+
super(*args, &block)
|
383
|
+
end
|
384
|
+
end
|
385
|
+
end
|
386
|
+
|
387
|
+
# Define an method in the given module that will run the given method using an async thread
|
388
|
+
# if the current dataset is async and a block is provided.
|
389
|
+
def self.define_async_block_method(mod, method)
|
390
|
+
mod.send(:define_method, method) do |*args, &block|
|
391
|
+
if block && @opts[:async]
|
392
|
+
ds = sync
|
393
|
+
db.send(:async_run){ds.send(method, *args, &block)}
|
394
|
+
else
|
395
|
+
super(*args, &block)
|
396
|
+
end
|
397
|
+
end
|
398
|
+
end
|
399
|
+
|
400
|
+
# Define an method in the given module that will run the given method using an async thread
|
401
|
+
# if the current dataset is async and arguments or a block is provided.
|
402
|
+
def self.define_async_args_or_block_method(mod, method)
|
403
|
+
mod.send(:define_method, method) do |*args, &block|
|
404
|
+
if (block || !args.empty?) && @opts[:async]
|
405
|
+
ds = sync
|
406
|
+
db.send(:async_run){ds.send(method, *args, &block)}
|
407
|
+
else
|
408
|
+
super(*args, &block)
|
409
|
+
end
|
410
|
+
end
|
411
|
+
end
|
412
|
+
|
413
|
+
# Override all of the methods that return results to do the processing in an async thread
|
414
|
+
# if they have been marked to run async and should run async (i.e. they don't return an
|
415
|
+
# Enumerator).
|
416
|
+
ASYNC_METHODS.each{|m| define_async_method(self, m)}
|
417
|
+
ASYNC_BLOCK_METHODS.each{|m| define_async_block_method(self, m)}
|
418
|
+
ASYNC_ARGS_OR_BLOCK_METHODS.each{|m| define_async_args_or_block_method(self, m)}
|
419
|
+
|
420
|
+
# Return a cloned dataset that will load results using the async thread pool.
|
421
|
+
def async
|
422
|
+
cached_dataset(:_async) do
|
423
|
+
clone(:async=>true)
|
424
|
+
end
|
425
|
+
end
|
426
|
+
|
427
|
+
# Return a cloned dataset that will not load results using the async thread pool.
|
428
|
+
# Only used if the current dataset has been marked as using the async thread pool.
|
429
|
+
def sync
|
430
|
+
cached_dataset(:_sync) do
|
431
|
+
clone(:async=>false)
|
432
|
+
end
|
433
|
+
end
|
434
|
+
end
|
435
|
+
end
|
436
|
+
|
437
|
+
Database.register_extension(:async_thread_pool, Database::AsyncThreadPool::DatabaseMethods)
|
438
|
+
end
|