aerospike 2.24.0 → 2.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +27 -0
- data/lib/aerospike/cdt/context.rb +136 -69
- data/lib/aerospike/cdt/map_policy.rb +16 -2
- data/lib/aerospike/cdt/map_return_type.rb +9 -1
- data/lib/aerospike/client.rb +30 -32
- data/lib/aerospike/command/command.rb +104 -98
- data/lib/aerospike/command/operate_args.rb +99 -0
- data/lib/aerospike/command/operate_command.rb +6 -11
- data/lib/aerospike/exp/exp.rb +401 -334
- data/lib/aerospike/exp/exp_bit.rb +388 -0
- data/lib/aerospike/exp/exp_hll.rb +169 -0
- data/lib/aerospike/exp/exp_list.rb +403 -0
- data/lib/aerospike/exp/exp_map.rb +493 -0
- data/lib/aerospike/exp/operation.rb +56 -0
- data/lib/aerospike/features.rb +13 -0
- data/lib/aerospike/operation.rb +20 -22
- data/lib/aerospike/policy/policy.rb +25 -12
- data/lib/aerospike/query/filter.rb +44 -32
- data/lib/aerospike/query/query_executor.rb +7 -9
- data/lib/aerospike/query/query_partition_command.rb +32 -31
- data/lib/aerospike/query/recordset.rb +9 -9
- data/lib/aerospike/query/scan_executor.rb +7 -5
- data/lib/aerospike/task/execute_task.rb +17 -14
- data/lib/aerospike/utils/buffer.rb +46 -38
- data/lib/aerospike/utils/packer.rb +7 -6
- data/lib/aerospike/value/value.rb +21 -51
- data/lib/aerospike/version.rb +1 -1
- data/lib/aerospike.rb +156 -148
- metadata +8 -2
@@ -13,26 +13,44 @@
|
|
13
13
|
# See the License for the specific language governing permissions and
|
14
14
|
# limitations under the License.
|
15
15
|
|
16
|
-
require
|
17
|
-
require
|
18
|
-
require
|
19
|
-
|
16
|
+
require "aerospike/policy/priority"
|
17
|
+
require "aerospike/policy/consistency_level"
|
18
|
+
require "aerospike/policy/replica"
|
20
19
|
|
21
20
|
module Aerospike
|
22
21
|
|
23
22
|
# Container object for client policy command.
|
24
23
|
class Policy
|
25
|
-
|
26
|
-
attr_accessor :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
|
24
|
+
attr_accessor :filter_exp, :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
|
27
25
|
:predexp, :fail_on_filtered_out, :replica, :use_compression
|
28
26
|
|
29
27
|
alias total_timeout timeout
|
30
28
|
alias total_timeout= timeout=
|
31
29
|
|
32
|
-
def initialize(opt={})
|
30
|
+
def initialize(opt = {})
|
33
31
|
# Container object for transaction policy attributes used in all database
|
34
32
|
# operation calls.
|
35
33
|
|
34
|
+
# Optional expression filter. If filterExp exists and evaluates to false, the
|
35
|
+
# transaction is ignored.
|
36
|
+
#
|
37
|
+
# Default: nil
|
38
|
+
#
|
39
|
+
# ==== Examples:
|
40
|
+
#
|
41
|
+
# p = Policy.new
|
42
|
+
# p.filter_exp = Exp.build(Exp.eq(Exp.int_bin("a"), Exp.int_val(11)));
|
43
|
+
@filter_exp = opt[:filter_exp]
|
44
|
+
|
45
|
+
# Throw exception if {#filter_exp} is defined and that filter evaluates
|
46
|
+
# to false (transaction ignored). The {AerospikeException}
|
47
|
+
# will contain result code {ResultCode::FILTERED_OUT}.
|
48
|
+
#
|
49
|
+
# This field is not applicable to batch, scan or query commands.
|
50
|
+
#
|
51
|
+
# Default: false
|
52
|
+
@fail_on_filtered_out = opt[:fail_on_filtered_out] || false
|
53
|
+
|
36
54
|
# Priority of request relative to other transactions.
|
37
55
|
# Currently, only used for scans.
|
38
56
|
@priority = opt[:priority] || Priority::DEFAULT
|
@@ -74,7 +92,6 @@ module Aerospike
|
|
74
92
|
# ]
|
75
93
|
@predexp = opt[:predexp] || nil
|
76
94
|
|
77
|
-
|
78
95
|
# Throw exception if @predexp is defined and that filter evaluates
|
79
96
|
# to false (transaction ignored). The Aerospike::Exceptions::Aerospike
|
80
97
|
# will contain result code Aerospike::ResultCode::FILTERED_OUT.
|
@@ -86,7 +103,6 @@ module Aerospike
|
|
86
103
|
# read operation.
|
87
104
|
@consistency_level = opt[:consistency_level] || Aerospike::ConsistencyLevel::CONSISTENCY_ONE
|
88
105
|
|
89
|
-
|
90
106
|
# Send read commands to the node containing the key's partition replica type.
|
91
107
|
# Write commands are not affected by this setting, because all writes are directed
|
92
108
|
# to the node containing the key's master partition.
|
@@ -118,8 +134,5 @@ module Aerospike
|
|
118
134
|
# timeout was not exceeded. Enter zero to skip sleep.
|
119
135
|
@sleep_between_retries = opt[:sleep_between_retries] || 0.5
|
120
136
|
end
|
121
|
-
|
122
|
-
|
123
137
|
end # class
|
124
|
-
|
125
138
|
end # module
|
@@ -15,39 +15,51 @@
|
|
15
15
|
# the License.
|
16
16
|
|
17
17
|
module Aerospike
|
18
|
-
|
19
18
|
class Filter
|
19
|
+
attr_reader :packed_ctx
|
20
20
|
|
21
|
-
|
22
|
-
|
23
|
-
|
21
|
+
# open up the class to alias the class methods for naming consistency
|
22
|
+
class << self
|
23
|
+
def equal(bin_name, value, ctx: nil)
|
24
|
+
Filter.new(bin_name, value, value, nil, nil, ctx)
|
25
|
+
end
|
24
26
|
|
25
|
-
|
26
|
-
|
27
|
-
|
27
|
+
def contains(bin_name, value, col_type, ctx: nil)
|
28
|
+
Filter.new(bin_name, value, value, nil, col_type, ctx)
|
29
|
+
end
|
28
30
|
|
29
|
-
|
30
|
-
|
31
|
-
|
31
|
+
def range(bin_name, from, to, col_type = nil, ctx: nil)
|
32
|
+
Filter.new(bin_name, from, to, nil, col_type, ctx)
|
33
|
+
end
|
32
34
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
35
|
+
def geo_within_geo_region(bin_name, region, col_type = nil, ctx: nil)
|
36
|
+
region = region.to_json
|
37
|
+
Filter.new(bin_name, region, region, ParticleType::GEOJSON, col_type, ctx)
|
38
|
+
end
|
37
39
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
40
|
+
def geo_within_radius(bin_name, lon, lat, radius_meter, col_type = nil, ctx: nil)
|
41
|
+
region = GeoJSON.new({ type: "AeroCircle", coordinates: [[lon, lat], radius_meter] })
|
42
|
+
geo_within_geo_region(bin_name, region, col_type, ctx: ctx)
|
43
|
+
end
|
42
44
|
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
45
|
+
def geo_contains_geo_point(bin_name, point, col_type = nil, ctx: nil)
|
46
|
+
point = point.to_json
|
47
|
+
Filter.new(bin_name, point, point, ParticleType::GEOJSON, col_type, ctx)
|
48
|
+
end
|
47
49
|
|
48
|
-
|
49
|
-
|
50
|
-
|
50
|
+
def geo_contains_point(bin_name, lon, lat, col_type = nil, ctx: nil)
|
51
|
+
point = GeoJSON.new({ type: "Point", coordinates: [lon, lat] })
|
52
|
+
geo_contains_geo_point(bin_name, point, col_type, ctx: ctx)
|
53
|
+
end
|
54
|
+
|
55
|
+
# alias the old names for compatibility
|
56
|
+
alias :Equal :equal
|
57
|
+
alias :Contains :contains
|
58
|
+
alias :Range :range
|
59
|
+
alias :geoWithinGeoJSONRegion :geo_within_geo_region
|
60
|
+
alias :geoWithinRadius :geo_within_radius
|
61
|
+
alias :geoContainsGeoJSONPoint :geo_contains_geo_point
|
62
|
+
alias :geoContainsPoint :geo_contains_point
|
51
63
|
end
|
52
64
|
|
53
65
|
def estimate_size
|
@@ -56,21 +68,21 @@ module Aerospike
|
|
56
68
|
|
57
69
|
def write(buf, offset)
|
58
70
|
# Write name.
|
59
|
-
len = buf.write_binary(@name, offset+1)
|
71
|
+
len = buf.write_binary(@name, offset + 1)
|
60
72
|
buf.write_byte(len, offset)
|
61
73
|
offset += len + 1
|
62
74
|
|
63
75
|
# Write particle type.
|
64
76
|
buf.write_byte(@val_type, offset)
|
65
|
-
offset+=1
|
77
|
+
offset += 1
|
66
78
|
|
67
79
|
# Write filter begin.
|
68
|
-
len = @begin.write(buf, offset+4)
|
80
|
+
len = @begin.write(buf, offset + 4)
|
69
81
|
buf.write_int32(len, offset)
|
70
82
|
offset += len + 4
|
71
83
|
|
72
84
|
# Write filter end.
|
73
|
-
len = @end.write(buf, offset+4)
|
85
|
+
len = @end.write(buf, offset + 4)
|
74
86
|
buf.write_int32(len, offset)
|
75
87
|
offset += len + 4
|
76
88
|
|
@@ -98,7 +110,7 @@ module Aerospike
|
|
98
110
|
|
99
111
|
private
|
100
112
|
|
101
|
-
def initialize(bin_name, begin_value, end_value, val_type = nil, col_type = nil)
|
113
|
+
def initialize(bin_name, begin_value, end_value, val_type = nil, col_type = nil, ctx = nil)
|
102
114
|
@name = bin_name
|
103
115
|
@begin = Aerospike::Value.of(begin_value)
|
104
116
|
@end = Aerospike::Value.of(end_value)
|
@@ -107,8 +119,8 @@ module Aerospike
|
|
107
119
|
# but in certain cases caller can override the type.
|
108
120
|
@val_type = val_type || @begin.type
|
109
121
|
@col_type = col_type
|
110
|
-
end
|
111
122
|
|
123
|
+
@packed_ctx = CDT::Context.bytes(ctx)
|
124
|
+
end
|
112
125
|
end # class
|
113
|
-
|
114
126
|
end
|
@@ -17,28 +17,29 @@
|
|
17
17
|
|
18
18
|
module Aerospike
|
19
19
|
class QueryExecutor # :nodoc:
|
20
|
-
|
21
20
|
def self.query_partitions(cluster, policy, tracker, statement, recordset)
|
22
21
|
interval = policy.sleep_between_retries
|
23
22
|
|
24
23
|
should_retry = false
|
25
24
|
|
26
25
|
loop do
|
26
|
+
# reset last_expn
|
27
|
+
@last_expn = nil
|
28
|
+
|
27
29
|
list = tracker.assign_partitions_to_nodes(cluster, statement.namespace)
|
28
30
|
|
29
31
|
if policy.concurrent_nodes
|
30
32
|
threads = []
|
31
33
|
# Use a thread per node
|
32
34
|
list.each do |node_partition|
|
33
|
-
|
34
35
|
threads << Thread.new do
|
35
36
|
Thread.current.abort_on_exception = true
|
36
37
|
command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
|
37
38
|
begin
|
38
39
|
command.execute
|
39
40
|
rescue => e
|
41
|
+
@last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
|
40
42
|
should_retry ||= command.should_retry(e)
|
41
|
-
# puts "should retry: #{should_retry}"
|
42
43
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
|
43
44
|
end
|
44
45
|
end
|
@@ -51,23 +52,20 @@ module Aerospike
|
|
51
52
|
begin
|
52
53
|
command.execute
|
53
54
|
rescue => e
|
55
|
+
@last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
|
54
56
|
should_retry ||= command.should_retry(e)
|
55
57
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
|
56
58
|
end
|
57
59
|
end
|
58
60
|
end
|
59
61
|
|
60
|
-
|
61
|
-
|
62
|
-
if complete || !should_retry
|
63
|
-
recordset.thread_finished
|
62
|
+
if tracker.complete?(@cluster, policy) || !should_retry
|
63
|
+
recordset.thread_finished(@last_expn)
|
64
64
|
return
|
65
65
|
end
|
66
66
|
sleep(interval) if policy.sleep_between_retries > 0
|
67
67
|
statement.reset_task_id
|
68
68
|
end
|
69
69
|
end
|
70
|
-
|
71
70
|
end
|
72
|
-
|
73
71
|
end
|
@@ -14,15 +14,13 @@
|
|
14
14
|
# License for the specific language governing permissions and limitations under
|
15
15
|
# the License.
|
16
16
|
|
17
|
-
require
|
18
|
-
require
|
17
|
+
require "aerospike/query/stream_command"
|
18
|
+
require "aerospike/query/recordset"
|
19
19
|
|
20
20
|
module Aerospike
|
21
|
-
|
22
21
|
private
|
23
22
|
|
24
23
|
class QueryPartitionCommand < QueryCommand #:nodoc:
|
25
|
-
|
26
24
|
def initialize(node, tracker, policy, statement, recordset, node_partitions)
|
27
25
|
super(node, policy, statement, recordset, @node_partitions)
|
28
26
|
@node_partitions = node_partitions
|
@@ -39,29 +37,29 @@ module Aerospike
|
|
39
37
|
|
40
38
|
if @statement.namespace
|
41
39
|
@data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
|
42
|
-
field_count+=1
|
40
|
+
field_count += 1
|
43
41
|
end
|
44
42
|
|
45
43
|
if @statement.set_name
|
46
44
|
@data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
|
47
|
-
field_count+=1
|
45
|
+
field_count += 1
|
48
46
|
end
|
49
47
|
|
50
48
|
# Estimate recordsPerSecond field size. This field is used in new servers and not used
|
51
49
|
# (but harmless to add) in old servers.
|
52
50
|
if @policy.records_per_second > 0
|
53
51
|
@data_offset += 4 + FIELD_HEADER_SIZE
|
54
|
-
field_count+=1
|
52
|
+
field_count += 1
|
55
53
|
end
|
56
54
|
|
57
55
|
# Estimate socket timeout field size. This field is used in new servers and not used
|
58
56
|
# (but harmless to add) in old servers.
|
59
57
|
@data_offset += 4 + FIELD_HEADER_SIZE
|
60
|
-
field_count+=1
|
58
|
+
field_count += 1
|
61
59
|
|
62
60
|
# Estimate task_id field.
|
63
61
|
@data_offset += 8 + FIELD_HEADER_SIZE
|
64
|
-
field_count+=1
|
62
|
+
field_count += 1
|
65
63
|
|
66
64
|
filter = @statement.filters[0]
|
67
65
|
bin_names = @statement.bin_names
|
@@ -73,23 +71,22 @@ module Aerospike
|
|
73
71
|
# Estimate INDEX_TYPE field.
|
74
72
|
if col_type > 0
|
75
73
|
@data_offset += FIELD_HEADER_SIZE + 1
|
76
|
-
field_count+=1
|
74
|
+
field_count += 1
|
77
75
|
end
|
78
76
|
|
79
77
|
# Estimate INDEX_RANGE field.
|
80
78
|
@data_offset += FIELD_HEADER_SIZE
|
81
|
-
filter_size+=1 # num filters
|
79
|
+
filter_size += 1 # num filters
|
82
80
|
filter_size += filter.estimate_size
|
83
81
|
|
84
82
|
@data_offset += filter_size
|
85
|
-
field_count+=1
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
# end
|
83
|
+
field_count += 1
|
84
|
+
|
85
|
+
packed_ctx = filter.packed_ctx
|
86
|
+
if packed_ctx
|
87
|
+
@data_offset += FIELD_HEADER_SIZE + packed_ctx.length
|
88
|
+
field_count += 1
|
89
|
+
end
|
93
90
|
end
|
94
91
|
|
95
92
|
@statement.set_task_id
|
@@ -102,13 +99,18 @@ module Aerospike
|
|
102
99
|
field_count += 1
|
103
100
|
end
|
104
101
|
|
102
|
+
unless @policy.filter_exp.nil?
|
103
|
+
exp_size = estimate_expression_size(@policy.filter_exp)
|
104
|
+
field_count += 1 if exp_size > 0
|
105
|
+
end
|
106
|
+
|
105
107
|
# Estimate aggregation/background function size.
|
106
108
|
if @statement.function_name
|
107
109
|
@data_offset += FIELD_HEADER_SIZE + 1 # udf type
|
108
110
|
@data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
|
109
111
|
@data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
|
110
112
|
|
111
|
-
function_arg_buffer=
|
113
|
+
function_arg_buffer = ""
|
112
114
|
if @statement.function_args && @statement.function_args.length > 0
|
113
115
|
function_arg_buffer = Value.of(@statement.function_args).to_bytes
|
114
116
|
end
|
@@ -133,24 +135,24 @@ module Aerospike
|
|
133
135
|
|
134
136
|
if parts_full_size > 0
|
135
137
|
@data_offset += parts_full_size + FIELD_HEADER_SIZE
|
136
|
-
field_count+=1
|
138
|
+
field_count += 1
|
137
139
|
end
|
138
140
|
|
139
141
|
if parts_partial_digest_size > 0
|
140
142
|
@data_offset += parts_partial_digest_size + FIELD_HEADER_SIZE
|
141
|
-
field_count+=1
|
143
|
+
field_count += 1
|
142
144
|
end
|
143
145
|
|
144
146
|
if parts_partial_bval_size > 0
|
145
147
|
@data_offset += parts_partial_bval_size + FIELD_HEADER_SIZE
|
146
|
-
field_count+=1
|
148
|
+
field_count += 1
|
147
149
|
end
|
148
150
|
|
149
151
|
# Estimate max records field size. This field is used in new servers and not used
|
150
152
|
# (but harmless to add) in old servers.
|
151
153
|
if max_records > 0
|
152
154
|
@data_offset += 8 + FIELD_HEADER_SIZE
|
153
|
-
field_count+=1
|
155
|
+
field_count += 1
|
154
156
|
end
|
155
157
|
|
156
158
|
operation_count = 0
|
@@ -180,6 +182,8 @@ module Aerospike
|
|
180
182
|
# Write records per second.
|
181
183
|
write_field_int(@policy.records_per_second, FieldType::RECORDS_PER_SECOND) if @policy.records_per_second > 0
|
182
184
|
|
185
|
+
write_filter_exp(@policy.filter_exp, exp_size)
|
186
|
+
|
183
187
|
# Write socket idle timeout.
|
184
188
|
write_field_int(@policy.socket_timeout, FieldType::SOCKET_TIMEOUT)
|
185
189
|
|
@@ -205,11 +209,10 @@ module Aerospike
|
|
205
209
|
@data_offset += @data_buffer.write_byte(1, @data_offset)
|
206
210
|
@data_offset = filter.write(@data_buffer, @data_offset)
|
207
211
|
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
# end
|
212
|
+
if packed_ctx
|
213
|
+
write_field_header(packed_ctx.length, FieldType::INDEX_CONTEXT)
|
214
|
+
@data_offset += @data_buffer.write_binary(packed_ctx, @data_offset)
|
215
|
+
end
|
213
216
|
end
|
214
217
|
|
215
218
|
if @statement.function_name
|
@@ -260,7 +263,5 @@ module Aerospike
|
|
260
263
|
# !! converts nil to false
|
261
264
|
!!@tracker&.should_retry(@node_partitions, e)
|
262
265
|
end
|
263
|
-
|
264
266
|
end # class
|
265
|
-
|
266
267
|
end # module
|
@@ -22,7 +22,6 @@ module Aerospike
|
|
22
22
|
# so the production and the consumptoin are decoupled
|
23
23
|
# there can be an unlimited count of producer threads and consumer threads
|
24
24
|
class Recordset
|
25
|
-
|
26
25
|
attr_reader :records
|
27
26
|
|
28
27
|
def initialize(queue_size = 5000, thread_count = 1, type)
|
@@ -66,18 +65,21 @@ module Aerospike
|
|
66
65
|
|
67
66
|
# this is called by working threads to signal their job is finished
|
68
67
|
# it decreases the count of active threads and puts an EOF on queue when all threads are finished
|
69
|
-
|
68
|
+
# e is an exception that has happened in the exceutor, and outside of the threads themselves
|
69
|
+
def thread_finished(expn = nil)
|
70
70
|
@active_threads.update do |v|
|
71
71
|
v -= 1
|
72
72
|
@records.enq(nil) if v == 0
|
73
73
|
v
|
74
74
|
end
|
75
|
+
|
76
|
+
raise expn unless expn.nil?
|
75
77
|
end
|
76
78
|
|
77
79
|
# this is called by a thread who faced an exception to singnal to terminate the whole operation
|
78
80
|
# it also may be called by the user to terminate the command in the middle of fetching records from server nodes
|
79
81
|
# it clears the queue so that if any threads are waiting for the queue get unblocked and find out about the cancellation
|
80
|
-
def cancel(expn=nil)
|
82
|
+
def cancel(expn = nil)
|
81
83
|
set_exception(expn)
|
82
84
|
@cancelled.set(true)
|
83
85
|
@records.clear
|
@@ -104,18 +106,16 @@ module Aerospike
|
|
104
106
|
@filters.nil? || @filters.empty?
|
105
107
|
end
|
106
108
|
|
107
|
-
|
109
|
+
private
|
108
110
|
|
109
|
-
def set_exception(expn=nil)
|
111
|
+
def set_exception(expn = nil)
|
110
112
|
expn ||= (@type == :scan ? SCAN_TERMINATED_EXCEPTION : QUERY_TERMINATED_EXCEPTION)
|
111
113
|
@thread_exception.set(expn)
|
112
114
|
end
|
113
|
-
|
114
115
|
end
|
115
116
|
|
116
117
|
private
|
117
118
|
|
118
|
-
|
119
|
-
|
120
|
-
|
119
|
+
SCAN_TERMINATED_EXCEPTION = Aerospike::Exceptions::ScanTerminated.new()
|
120
|
+
QUERY_TERMINATED_EXCEPTION = Aerospike::Exceptions::QueryTerminated.new()
|
121
121
|
end
|
@@ -17,26 +17,28 @@
|
|
17
17
|
|
18
18
|
module Aerospike
|
19
19
|
class ScanExecutor # :nodoc:
|
20
|
-
|
21
20
|
def self.scan_partitions(policy, cluster, tracker, namespace, set_name, recordset, bin_names = nil)
|
22
21
|
interval = policy.sleep_between_retries
|
23
22
|
|
24
23
|
should_retry = false
|
25
24
|
|
26
25
|
loop do
|
26
|
+
# reset last_expn
|
27
|
+
@last_expn = nil
|
28
|
+
|
27
29
|
list = tracker.assign_partitions_to_nodes(cluster, namespace)
|
28
30
|
|
29
31
|
if policy.concurrent_nodes
|
30
32
|
threads = []
|
31
33
|
# Use a thread per node
|
32
34
|
list.each do |node_partition|
|
33
|
-
|
34
35
|
threads << Thread.new do
|
35
36
|
Thread.current.abort_on_exception = true
|
36
37
|
command = ScanPartitionCommand.new(policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
|
37
38
|
begin
|
38
39
|
command.execute
|
39
40
|
rescue => e
|
41
|
+
@last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
|
40
42
|
should_retry ||= command.should_retry(e)
|
41
43
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
|
42
44
|
end
|
@@ -50,6 +52,7 @@ module Aerospike
|
|
50
52
|
begin
|
51
53
|
command.execute
|
52
54
|
rescue => e
|
55
|
+
@last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
|
53
56
|
should_retry ||= command.should_retry(e)
|
54
57
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
|
55
58
|
end
|
@@ -57,13 +60,12 @@ module Aerospike
|
|
57
60
|
end
|
58
61
|
|
59
62
|
if tracker.complete?(@cluster, policy) || !should_retry
|
60
|
-
recordset.thread_finished
|
63
|
+
recordset.thread_finished(@last_expn)
|
61
64
|
return
|
62
65
|
end
|
63
66
|
sleep(interval) if policy.sleep_between_retries > 0
|
67
|
+
statement.reset_task_id
|
64
68
|
end
|
65
69
|
end
|
66
|
-
|
67
70
|
end
|
68
|
-
|
69
71
|
end
|
@@ -13,7 +13,6 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
15
|
module Aerospike
|
16
|
-
|
17
16
|
private
|
18
17
|
|
19
18
|
# ExecuteTask is used to poll for long running server execute job completion.
|
@@ -29,19 +28,24 @@ module Aerospike
|
|
29
28
|
self
|
30
29
|
end
|
31
30
|
|
32
|
-
#
|
31
|
+
# queries all nodes for task completion status.
|
33
32
|
def all_nodes_done?
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
command = 'query-list'
|
39
|
-
end
|
33
|
+
modul = @scan ? "scan" : "query"
|
34
|
+
cmd1 = "query-show:trid=#{@task_id}"
|
35
|
+
cmd2 = modul + "-show:trid=#{@task_id}"
|
36
|
+
cmd3 = "jobs:module=" + modul + ";cmd=get-job;trid=#{@task_id}"
|
40
37
|
|
41
38
|
nodes = @cluster.nodes
|
42
39
|
done = false
|
43
40
|
|
44
41
|
nodes.each do |node|
|
42
|
+
command = cmd3
|
43
|
+
if node.supports_feature?(Aerospike::Features::PARTITION_QUERY)
|
44
|
+
command = cmd1
|
45
|
+
elsif node.supports_feature?(Aerospike::Features::QUERY_SHOW)
|
46
|
+
command = cmd2
|
47
|
+
end
|
48
|
+
|
45
49
|
conn = node.get_connection(0)
|
46
50
|
responseMap, _ = Info.request(conn, command)
|
47
51
|
node.put_connection(conn)
|
@@ -58,28 +62,27 @@ module Aerospike
|
|
58
62
|
|
59
63
|
b = index + find.length
|
60
64
|
response = response[b, response.length]
|
61
|
-
find =
|
65
|
+
find = "job_status="
|
62
66
|
index = response.index(find)
|
63
67
|
|
64
68
|
next unless index
|
65
69
|
|
66
70
|
b = index + find.length
|
67
71
|
response = response[b, response.length]
|
68
|
-
e = response.index(
|
72
|
+
e = response.index(":")
|
69
73
|
status = response[0, e]
|
70
74
|
|
71
75
|
case status
|
72
|
-
when
|
76
|
+
when "ABORTED"
|
73
77
|
raise Aerospike::Exceptions::QueryTerminated
|
74
|
-
when
|
78
|
+
when "IN PROGRESS"
|
75
79
|
return false
|
76
|
-
when
|
80
|
+
when "DONE"
|
77
81
|
done = true
|
78
82
|
end
|
79
83
|
end
|
80
84
|
|
81
85
|
done
|
82
86
|
end
|
83
|
-
|
84
87
|
end
|
85
88
|
end
|