aerospike 2.23.0 → 2.25.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +321 -266
- data/lib/aerospike/cdt/map_policy.rb +16 -2
- data/lib/aerospike/cdt/map_return_type.rb +9 -1
- data/lib/aerospike/client.rb +52 -56
- data/lib/aerospike/command/command.rb +105 -97
- data/lib/aerospike/command/field_type.rb +25 -28
- data/lib/aerospike/command/operate_args.rb +99 -0
- data/lib/aerospike/command/operate_command.rb +6 -11
- data/lib/aerospike/exp/exp.rb +1329 -0
- data/lib/aerospike/exp/exp_bit.rb +388 -0
- data/lib/aerospike/exp/exp_hll.rb +169 -0
- data/lib/aerospike/exp/exp_list.rb +403 -0
- data/lib/aerospike/exp/exp_map.rb +493 -0
- data/lib/aerospike/exp/operation.rb +56 -0
- data/lib/aerospike/features.rb +13 -0
- data/lib/aerospike/operation.rb +20 -22
- data/lib/aerospike/policy/policy.rb +25 -12
- data/lib/aerospike/policy/query_policy.rb +35 -2
- data/lib/aerospike/policy/scan_policy.rb +0 -2
- data/lib/aerospike/query/query_command.rb +1 -1
- data/lib/aerospike/query/query_executor.rb +71 -0
- data/lib/aerospike/query/query_partition_command.rb +269 -0
- data/lib/aerospike/query/recordset.rb +9 -9
- data/lib/aerospike/query/scan_executor.rb +7 -5
- data/lib/aerospike/query/statement.rb +7 -0
- data/lib/aerospike/query/stream_command.rb +2 -1
- data/lib/aerospike/task/execute_task.rb +17 -14
- data/lib/aerospike/utils/buffer.rb +62 -35
- data/lib/aerospike/utils/packer.rb +7 -6
- data/lib/aerospike/value/value.rb +21 -51
- data/lib/aerospike/version.rb +1 -1
- data/lib/aerospike.rb +156 -146
- metadata +12 -3
@@ -13,26 +13,44 @@
|
|
13
13
|
# See the License for the specific language governing permissions and
|
14
14
|
# limitations under the License.
|
15
15
|
|
16
|
-
require
|
17
|
-
require
|
18
|
-
require
|
19
|
-
|
16
|
+
require "aerospike/policy/priority"
|
17
|
+
require "aerospike/policy/consistency_level"
|
18
|
+
require "aerospike/policy/replica"
|
20
19
|
|
21
20
|
module Aerospike
|
22
21
|
|
23
22
|
# Container object for client policy command.
|
24
23
|
class Policy
|
25
|
-
|
26
|
-
attr_accessor :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
|
24
|
+
attr_accessor :filter_exp, :priority, :timeout, :max_retries, :sleep_between_retries, :consistency_level,
|
27
25
|
:predexp, :fail_on_filtered_out, :replica, :use_compression
|
28
26
|
|
29
27
|
alias total_timeout timeout
|
30
28
|
alias total_timeout= timeout=
|
31
29
|
|
32
|
-
def initialize(opt={})
|
30
|
+
def initialize(opt = {})
|
33
31
|
# Container object for transaction policy attributes used in all database
|
34
32
|
# operation calls.
|
35
33
|
|
34
|
+
# Optional expression filter. If filterExp exists and evaluates to false, the
|
35
|
+
# transaction is ignored.
|
36
|
+
#
|
37
|
+
# Default: nil
|
38
|
+
#
|
39
|
+
# ==== Examples:
|
40
|
+
#
|
41
|
+
# p = Policy.new
|
42
|
+
# p.filter_exp = Exp.build(Exp.eq(Exp.int_bin("a"), Exp.int_val(11)));
|
43
|
+
@filter_exp = opt[:filter_exp]
|
44
|
+
|
45
|
+
# Throw exception if {#filter_exp} is defined and that filter evaluates
|
46
|
+
# to false (transaction ignored). The {AerospikeException}
|
47
|
+
# will contain result code {ResultCode::FILTERED_OUT}.
|
48
|
+
#
|
49
|
+
# This field is not applicable to batch, scan or query commands.
|
50
|
+
#
|
51
|
+
# Default: false
|
52
|
+
@fail_on_filtered_out = opt[:fail_on_filtered_out] || false
|
53
|
+
|
36
54
|
# Priority of request relative to other transactions.
|
37
55
|
# Currently, only used for scans.
|
38
56
|
@priority = opt[:priority] || Priority::DEFAULT
|
@@ -74,7 +92,6 @@ module Aerospike
|
|
74
92
|
# ]
|
75
93
|
@predexp = opt[:predexp] || nil
|
76
94
|
|
77
|
-
|
78
95
|
# Throw exception if @predexp is defined and that filter evaluates
|
79
96
|
# to false (transaction ignored). The Aerospike::Exceptions::Aerospike
|
80
97
|
# will contain result code Aerospike::ResultCode::FILTERED_OUT.
|
@@ -86,7 +103,6 @@ module Aerospike
|
|
86
103
|
# read operation.
|
87
104
|
@consistency_level = opt[:consistency_level] || Aerospike::ConsistencyLevel::CONSISTENCY_ONE
|
88
105
|
|
89
|
-
|
90
106
|
# Send read commands to the node containing the key's partition replica type.
|
91
107
|
# Write commands are not affected by this setting, because all writes are directed
|
92
108
|
# to the node containing the key's master partition.
|
@@ -118,8 +134,5 @@ module Aerospike
|
|
118
134
|
# timeout was not exceeded. Enter zero to skip sleep.
|
119
135
|
@sleep_between_retries = opt[:sleep_between_retries] || 0.5
|
120
136
|
end
|
121
|
-
|
122
|
-
|
123
137
|
end # class
|
124
|
-
|
125
138
|
end # module
|
@@ -22,20 +22,45 @@ module Aerospike
|
|
22
22
|
# Container object for query policy command.
|
23
23
|
class QueryPolicy < Policy
|
24
24
|
|
25
|
+
attr_accessor :concurrent_nodes
|
26
|
+
attr_accessor :max_records
|
25
27
|
attr_accessor :include_bin_data
|
26
28
|
attr_accessor :record_queue_size
|
27
29
|
attr_accessor :records_per_second
|
30
|
+
attr_accessor :socket_timeout
|
31
|
+
attr_accessor :short_query
|
28
32
|
|
29
33
|
def initialize(opt={})
|
30
34
|
super(opt)
|
31
35
|
|
32
|
-
@max_retries = 0
|
33
|
-
|
34
36
|
# Indicates if bin data is retrieved. If false, only record digests (and
|
35
37
|
# user keys if stored on the server) are retrieved.
|
36
38
|
# Default is true.
|
37
39
|
@include_bin_data = opt.fetch(:include_bin_data, true)
|
38
40
|
|
41
|
+
# Approximates the number of records to return to the client. This number is divided by the
|
42
|
+
# number of nodes involved in the query. The actual number of records returned
|
43
|
+
# may be less than MaxRecords if node record counts are small and unbalanced across
|
44
|
+
# nodes.
|
45
|
+
#
|
46
|
+
# This field is supported on server versions >= 4.9.
|
47
|
+
#
|
48
|
+
# Default: 0 (do not limit record count)
|
49
|
+
@max_records = opt.fetch(:max_records) { 0 }
|
50
|
+
|
51
|
+
# Issue scan requests in parallel or serially.
|
52
|
+
@concurrent_nodes = opt.fetch(:concurrent_nodes) { true }
|
53
|
+
|
54
|
+
# Determines network timeout for each attempt.
|
55
|
+
#
|
56
|
+
# If socket_timeout is not zero and socket_timeout is reached before an attempt completes,
|
57
|
+
# the Timeout above is checked. If Timeout is not exceeded, the transaction
|
58
|
+
# is retried. If both socket_timeout and Timeout are non-zero, socket_timeout must be less
|
59
|
+
# than or equal to Timeout, otherwise Timeout will also be used for socket_timeout.
|
60
|
+
#
|
61
|
+
# Default: 30s
|
62
|
+
@socket_timeout = opt[:socket_timeout] || 30000
|
63
|
+
|
39
64
|
# Number of records to place in queue before blocking. Records received
|
40
65
|
# from multiple server nodes will be placed in a queue. A separate thread
|
41
66
|
# consumes these records in parallel. If the queue is full, the producer
|
@@ -49,6 +74,14 @@ module Aerospike
|
|
49
74
|
# Default is 0
|
50
75
|
@records_per_second = opt[:records_per_second] || 0
|
51
76
|
|
77
|
+
# Detemine wether query expected to return less than 100 records.
|
78
|
+
# If true, the server will optimize the query for a small record set.
|
79
|
+
# This field is ignored for aggregation queries, background queries
|
80
|
+
# and server versions 6.0+.
|
81
|
+
#
|
82
|
+
# Default: false
|
83
|
+
@short_query = opt[:short_query] ||false
|
84
|
+
|
52
85
|
self
|
53
86
|
end
|
54
87
|
|
@@ -34,8 +34,6 @@ module Aerospike
|
|
34
34
|
def initialize(opt={})
|
35
35
|
super(opt)
|
36
36
|
|
37
|
-
@max_retries = 0
|
38
|
-
|
39
37
|
# Approximates the number of records to return to the client. This number is divided by the
|
40
38
|
# number of nodes involved in the query. The actual number of records returned
|
41
39
|
# may be less than MaxRecords if node record counts are small and unbalanced across
|
@@ -0,0 +1,71 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2014-2020 Aerospike, Inc.
|
4
|
+
#
|
5
|
+
# Portions may be licensed to Aerospike, Inc. under one or more contributor
|
6
|
+
# license agreements.
|
7
|
+
#
|
8
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
9
|
+
# use this file except in compliance with the License. You may obtain a copy of
|
10
|
+
# the License at http:#www.apache.org/licenses/LICENSE-2.0
|
11
|
+
#
|
12
|
+
# Unless required by applicable law or agreed to in writing, software
|
13
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
14
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
15
|
+
# License for the specific language governing permissions and limitations under
|
16
|
+
# the License.
|
17
|
+
|
18
|
+
module Aerospike
|
19
|
+
class QueryExecutor # :nodoc:
|
20
|
+
def self.query_partitions(cluster, policy, tracker, statement, recordset)
|
21
|
+
interval = policy.sleep_between_retries
|
22
|
+
|
23
|
+
should_retry = false
|
24
|
+
|
25
|
+
loop do
|
26
|
+
# reset last_expn
|
27
|
+
@last_expn = nil
|
28
|
+
|
29
|
+
list = tracker.assign_partitions_to_nodes(cluster, statement.namespace)
|
30
|
+
|
31
|
+
if policy.concurrent_nodes
|
32
|
+
threads = []
|
33
|
+
# Use a thread per node
|
34
|
+
list.each do |node_partition|
|
35
|
+
threads << Thread.new do
|
36
|
+
Thread.current.abort_on_exception = true
|
37
|
+
command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
|
38
|
+
begin
|
39
|
+
command.execute
|
40
|
+
rescue => e
|
41
|
+
@last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
|
42
|
+
should_retry ||= command.should_retry(e)
|
43
|
+
Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
threads.each(&:join)
|
48
|
+
else
|
49
|
+
# Use a single thread for all nodes for all node
|
50
|
+
list.each do |node_partition|
|
51
|
+
command = QueryPartitionCommand.new(node_partition.node, tracker, policy, statement, recordset, node_partition)
|
52
|
+
begin
|
53
|
+
command.execute
|
54
|
+
rescue => e
|
55
|
+
@last_expn = e unless e == QUERY_TERMINATED_EXCEPTION
|
56
|
+
should_retry ||= command.should_retry(e)
|
57
|
+
Aerospike.logger.error(e.backtrace.join("\n")) unless e == QUERY_TERMINATED_EXCEPTION
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
if tracker.complete?(@cluster, policy) || !should_retry
|
63
|
+
recordset.thread_finished(@last_expn)
|
64
|
+
return
|
65
|
+
end
|
66
|
+
sleep(interval) if policy.sleep_between_retries > 0
|
67
|
+
statement.reset_task_id
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
@@ -0,0 +1,269 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
# Copyright 2014-2020 Aerospike, Inc.
|
3
|
+
#
|
4
|
+
# Portions may be licensed to Aerospike, Inc. under one or more contributor
|
5
|
+
# license agreements.
|
6
|
+
#
|
7
|
+
# Licensed under the Apache License, Version 2.0 (the "License") you may not
|
8
|
+
# use this file except in compliance with the License. You may obtain a copy of
|
9
|
+
# the License at http:#www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
13
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
14
|
+
# License for the specific language governing permissions and limitations under
|
15
|
+
# the License.
|
16
|
+
|
17
|
+
require "aerospike/query/stream_command"
|
18
|
+
require "aerospike/query/recordset"
|
19
|
+
|
20
|
+
module Aerospike
|
21
|
+
private
|
22
|
+
|
23
|
+
class QueryPartitionCommand < QueryCommand #:nodoc:
|
24
|
+
def initialize(node, tracker, policy, statement, recordset, node_partitions)
|
25
|
+
super(node, policy, statement, recordset, @node_partitions)
|
26
|
+
@node_partitions = node_partitions
|
27
|
+
@tracker = tracker
|
28
|
+
end
|
29
|
+
|
30
|
+
def write_buffer
|
31
|
+
function_arg_buffer = nil
|
32
|
+
field_count = 0
|
33
|
+
filter_size = 0
|
34
|
+
bin_name_size = 0
|
35
|
+
|
36
|
+
begin_cmd
|
37
|
+
|
38
|
+
if @statement.namespace
|
39
|
+
@data_offset += @statement.namespace.bytesize + FIELD_HEADER_SIZE
|
40
|
+
field_count += 1
|
41
|
+
end
|
42
|
+
|
43
|
+
if @statement.set_name
|
44
|
+
@data_offset += @statement.set_name.bytesize + FIELD_HEADER_SIZE
|
45
|
+
field_count += 1
|
46
|
+
end
|
47
|
+
|
48
|
+
# Estimate recordsPerSecond field size. This field is used in new servers and not used
|
49
|
+
# (but harmless to add) in old servers.
|
50
|
+
if @policy.records_per_second > 0
|
51
|
+
@data_offset += 4 + FIELD_HEADER_SIZE
|
52
|
+
field_count += 1
|
53
|
+
end
|
54
|
+
|
55
|
+
# Estimate socket timeout field size. This field is used in new servers and not used
|
56
|
+
# (but harmless to add) in old servers.
|
57
|
+
@data_offset += 4 + FIELD_HEADER_SIZE
|
58
|
+
field_count += 1
|
59
|
+
|
60
|
+
# Estimate task_id field.
|
61
|
+
@data_offset += 8 + FIELD_HEADER_SIZE
|
62
|
+
field_count += 1
|
63
|
+
|
64
|
+
filter = @statement.filters[0]
|
65
|
+
bin_names = @statement.bin_names
|
66
|
+
packed_ctx = nil
|
67
|
+
|
68
|
+
if filter
|
69
|
+
col_type = filter.collection_type
|
70
|
+
|
71
|
+
# Estimate INDEX_TYPE field.
|
72
|
+
if col_type > 0
|
73
|
+
@data_offset += FIELD_HEADER_SIZE + 1
|
74
|
+
field_count += 1
|
75
|
+
end
|
76
|
+
|
77
|
+
# Estimate INDEX_RANGE field.
|
78
|
+
@data_offset += FIELD_HEADER_SIZE
|
79
|
+
filter_size += 1 # num filters
|
80
|
+
filter_size += filter.estimate_size
|
81
|
+
|
82
|
+
@data_offset += filter_size
|
83
|
+
field_count += 1
|
84
|
+
|
85
|
+
# TODO: Implement
|
86
|
+
# packed_ctx = filter.packed_ctx
|
87
|
+
# if packed_ctx
|
88
|
+
# @data_offset += FIELD_HEADER_SIZE + packed_ctx.length
|
89
|
+
# field_count+=1
|
90
|
+
# end
|
91
|
+
end
|
92
|
+
|
93
|
+
@statement.set_task_id
|
94
|
+
predexp = @policy.predexp || @statement.predexp
|
95
|
+
|
96
|
+
if predexp
|
97
|
+
@data_offset += FIELD_HEADER_SIZE
|
98
|
+
pred_size = Aerospike::PredExp.estimate_size(predexp)
|
99
|
+
@data_offset += pred_size
|
100
|
+
field_count += 1
|
101
|
+
end
|
102
|
+
|
103
|
+
unless @policy.filter_exp.nil?
|
104
|
+
exp_size = estimate_expression_size(@policy.filter_exp)
|
105
|
+
field_count += 1 if exp_size > 0
|
106
|
+
end
|
107
|
+
|
108
|
+
# Estimate aggregation/background function size.
|
109
|
+
if @statement.function_name
|
110
|
+
@data_offset += FIELD_HEADER_SIZE + 1 # udf type
|
111
|
+
@data_offset += @statement.package_name.bytesize + FIELD_HEADER_SIZE
|
112
|
+
@data_offset += @statement.function_name.bytesize + FIELD_HEADER_SIZE
|
113
|
+
|
114
|
+
function_arg_buffer = ""
|
115
|
+
if @statement.function_args && @statement.function_args.length > 0
|
116
|
+
function_arg_buffer = Value.of(@statement.function_args).to_bytes
|
117
|
+
end
|
118
|
+
@data_offset += FIELD_HEADER_SIZE + function_arg_buffer.bytesize
|
119
|
+
field_count += 4
|
120
|
+
end
|
121
|
+
|
122
|
+
max_records = 0
|
123
|
+
parts_full_size = 0
|
124
|
+
parts_partial_digest_size = 0
|
125
|
+
parts_partial_bval_size = 0
|
126
|
+
|
127
|
+
unless @node_partitions.nil?
|
128
|
+
parts_full_size = @node_partitions.parts_full.length * 2
|
129
|
+
parts_partial_digest_size = @node_partitions.parts_partial.length * 20
|
130
|
+
|
131
|
+
unless filter.nil?
|
132
|
+
parts_partial_bval_size = @node_partitions.parts_partial.length * 8
|
133
|
+
end
|
134
|
+
max_records = @node_partitions.record_max
|
135
|
+
end
|
136
|
+
|
137
|
+
if parts_full_size > 0
|
138
|
+
@data_offset += parts_full_size + FIELD_HEADER_SIZE
|
139
|
+
field_count += 1
|
140
|
+
end
|
141
|
+
|
142
|
+
if parts_partial_digest_size > 0
|
143
|
+
@data_offset += parts_partial_digest_size + FIELD_HEADER_SIZE
|
144
|
+
field_count += 1
|
145
|
+
end
|
146
|
+
|
147
|
+
if parts_partial_bval_size > 0
|
148
|
+
@data_offset += parts_partial_bval_size + FIELD_HEADER_SIZE
|
149
|
+
field_count += 1
|
150
|
+
end
|
151
|
+
|
152
|
+
# Estimate max records field size. This field is used in new servers and not used
|
153
|
+
# (but harmless to add) in old servers.
|
154
|
+
if max_records > 0
|
155
|
+
@data_offset += 8 + FIELD_HEADER_SIZE
|
156
|
+
field_count += 1
|
157
|
+
end
|
158
|
+
|
159
|
+
operation_count = 0
|
160
|
+
unless bin_names.empty?
|
161
|
+
# Estimate size for selected bin names (query bin names already handled for old servers).
|
162
|
+
bin_names.each do |bin_name|
|
163
|
+
estimate_operation_size_for_bin_name(bin_name)
|
164
|
+
end
|
165
|
+
operation_count = bin_names.length
|
166
|
+
end
|
167
|
+
|
168
|
+
projected_offset = @data_offset
|
169
|
+
|
170
|
+
size_buffer
|
171
|
+
|
172
|
+
read_attr = INFO1_READ
|
173
|
+
read_attr |= INFO1_NOBINDATA if !@policy.include_bin_data
|
174
|
+
read_attr |= INFO1_SHORT_QUERY if @policy.short_query
|
175
|
+
|
176
|
+
infoAttr = INFO3_PARTITION_DONE
|
177
|
+
|
178
|
+
write_header(@policy, read_attr, 0, field_count, operation_count)
|
179
|
+
|
180
|
+
write_field_string(@statement.namespace, FieldType::NAMESPACE) if @statement.namespace
|
181
|
+
write_field_string(@statement.set_name, FieldType::TABLE) if @statement.set_name
|
182
|
+
|
183
|
+
# Write records per second.
|
184
|
+
write_field_int(@policy.records_per_second, FieldType::RECORDS_PER_SECOND) if @policy.records_per_second > 0
|
185
|
+
|
186
|
+
write_filter_exp(@policy.filter_exp, exp_size)
|
187
|
+
|
188
|
+
# Write socket idle timeout.
|
189
|
+
write_field_int(@policy.socket_timeout, FieldType::SOCKET_TIMEOUT)
|
190
|
+
|
191
|
+
# Write task_id field
|
192
|
+
write_field_int64(@statement.task_id, FieldType::TRAN_ID)
|
193
|
+
|
194
|
+
unless predexp.nil?
|
195
|
+
write_field_header(pred_size, Aerospike::FieldType::PREDEXP)
|
196
|
+
@data_offset = Aerospike::PredExp.write(
|
197
|
+
predexp, @data_buffer, @data_offset
|
198
|
+
)
|
199
|
+
end
|
200
|
+
|
201
|
+
if filter
|
202
|
+
type = filter.collection_type
|
203
|
+
|
204
|
+
if type > 0
|
205
|
+
write_field_header(1, FieldType::INDEX_TYPE)
|
206
|
+
@data_offset += @data_buffer.write_byte(type, @data_offset)
|
207
|
+
end
|
208
|
+
|
209
|
+
write_field_header(filter_size, FieldType::INDEX_RANGE)
|
210
|
+
@data_offset += @data_buffer.write_byte(1, @data_offset)
|
211
|
+
@data_offset = filter.write(@data_buffer, @data_offset)
|
212
|
+
|
213
|
+
# TODO: Implement
|
214
|
+
# if packed_ctx
|
215
|
+
# write_field_header(packed_ctx.length, FieldType::INDEX_CONTEXT)
|
216
|
+
# @data_buffer.write_binary(packed_ctx, @data_offset)
|
217
|
+
# end
|
218
|
+
end
|
219
|
+
|
220
|
+
if @statement.function_name
|
221
|
+
write_field_header(1, FieldType::UDF_OP)
|
222
|
+
@data_offset += @data_buffer.write_byte(1, @data_offset)
|
223
|
+
write_field_string(@statement.package_name, FieldType::UDF_PACKAGE_NAME)
|
224
|
+
write_field_string(@statement.function_name, FieldType::UDF_FUNCTION)
|
225
|
+
write_field_string(function_arg_buffer, FieldType::UDF_ARGLIST)
|
226
|
+
end
|
227
|
+
|
228
|
+
if parts_full_size > 0
|
229
|
+
write_field_header(parts_full_size, FieldType::PID_ARRAY)
|
230
|
+
@node_partitions.parts_full.each do |part|
|
231
|
+
@data_offset += @data_buffer.write_uint16_little_endian(part.id, @data_offset)
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
if parts_partial_digest_size > 0
|
236
|
+
write_field_header(parts_partial_digest_size, FieldType::DIGEST_ARRAY)
|
237
|
+
@node_partitions.parts_partial.each do |part|
|
238
|
+
@data_offset += @data_buffer.write_binary(part.digest, @data_offset)
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
if parts_partial_bval_size > 0
|
243
|
+
write_field_header(parts_partial_bval_size, FieldType::BVAL_ARRAY)
|
244
|
+
@node_partitions.parts_partial.each do |part|
|
245
|
+
@data_offset += @data_buffer.write_uint64_little_endian(part.bval, @data_offset)
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
if max_records > 0
|
250
|
+
write_field(max_records, FieldType::MAX_RECORDS)
|
251
|
+
end
|
252
|
+
|
253
|
+
unless bin_names.empty?
|
254
|
+
bin_names.each do |bin_name|
|
255
|
+
write_operation_for_bin_name(bin_name, Operation::READ)
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
end_cmd
|
260
|
+
|
261
|
+
nil
|
262
|
+
end
|
263
|
+
|
264
|
+
def should_retry(e)
|
265
|
+
# !! converts nil to false
|
266
|
+
!!@tracker&.should_retry(@node_partitions, e)
|
267
|
+
end
|
268
|
+
end # class
|
269
|
+
end # module
|
@@ -22,7 +22,6 @@ module Aerospike
|
|
22
22
|
# so the production and the consumptoin are decoupled
|
23
23
|
# there can be an unlimited count of producer threads and consumer threads
|
24
24
|
class Recordset
|
25
|
-
|
26
25
|
attr_reader :records
|
27
26
|
|
28
27
|
def initialize(queue_size = 5000, thread_count = 1, type)
|
@@ -66,18 +65,21 @@ module Aerospike
|
|
66
65
|
|
67
66
|
# this is called by working threads to signal their job is finished
|
68
67
|
# it decreases the count of active threads and puts an EOF on queue when all threads are finished
|
69
|
-
|
68
|
+
# e is an exception that has happened in the exceutor, and outside of the threads themselves
|
69
|
+
def thread_finished(expn = nil)
|
70
70
|
@active_threads.update do |v|
|
71
71
|
v -= 1
|
72
72
|
@records.enq(nil) if v == 0
|
73
73
|
v
|
74
74
|
end
|
75
|
+
|
76
|
+
raise expn unless expn.nil?
|
75
77
|
end
|
76
78
|
|
77
79
|
# this is called by a thread who faced an exception to singnal to terminate the whole operation
|
78
80
|
# it also may be called by the user to terminate the command in the middle of fetching records from server nodes
|
79
81
|
# it clears the queue so that if any threads are waiting for the queue get unblocked and find out about the cancellation
|
80
|
-
def cancel(expn=nil)
|
82
|
+
def cancel(expn = nil)
|
81
83
|
set_exception(expn)
|
82
84
|
@cancelled.set(true)
|
83
85
|
@records.clear
|
@@ -104,18 +106,16 @@ module Aerospike
|
|
104
106
|
@filters.nil? || @filters.empty?
|
105
107
|
end
|
106
108
|
|
107
|
-
|
109
|
+
private
|
108
110
|
|
109
|
-
def set_exception(expn=nil)
|
111
|
+
def set_exception(expn = nil)
|
110
112
|
expn ||= (@type == :scan ? SCAN_TERMINATED_EXCEPTION : QUERY_TERMINATED_EXCEPTION)
|
111
113
|
@thread_exception.set(expn)
|
112
114
|
end
|
113
|
-
|
114
115
|
end
|
115
116
|
|
116
117
|
private
|
117
118
|
|
118
|
-
|
119
|
-
|
120
|
-
|
119
|
+
SCAN_TERMINATED_EXCEPTION = Aerospike::Exceptions::ScanTerminated.new()
|
120
|
+
QUERY_TERMINATED_EXCEPTION = Aerospike::Exceptions::QueryTerminated.new()
|
121
121
|
end
|
@@ -17,26 +17,28 @@
|
|
17
17
|
|
18
18
|
module Aerospike
|
19
19
|
class ScanExecutor # :nodoc:
|
20
|
-
|
21
20
|
def self.scan_partitions(policy, cluster, tracker, namespace, set_name, recordset, bin_names = nil)
|
22
21
|
interval = policy.sleep_between_retries
|
23
22
|
|
24
23
|
should_retry = false
|
25
24
|
|
26
25
|
loop do
|
26
|
+
# reset last_expn
|
27
|
+
@last_expn = nil
|
28
|
+
|
27
29
|
list = tracker.assign_partitions_to_nodes(cluster, namespace)
|
28
30
|
|
29
31
|
if policy.concurrent_nodes
|
30
32
|
threads = []
|
31
33
|
# Use a thread per node
|
32
34
|
list.each do |node_partition|
|
33
|
-
|
34
35
|
threads << Thread.new do
|
35
36
|
Thread.current.abort_on_exception = true
|
36
37
|
command = ScanPartitionCommand.new(policy, tracker, node_partition, namespace, set_name, bin_names, recordset)
|
37
38
|
begin
|
38
39
|
command.execute
|
39
40
|
rescue => e
|
41
|
+
@last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
|
40
42
|
should_retry ||= command.should_retry(e)
|
41
43
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
|
42
44
|
end
|
@@ -50,6 +52,7 @@ module Aerospike
|
|
50
52
|
begin
|
51
53
|
command.execute
|
52
54
|
rescue => e
|
55
|
+
@last_expn = e unless e == SCAN_TERMINATED_EXCEPTION
|
53
56
|
should_retry ||= command.should_retry(e)
|
54
57
|
Aerospike.logger.error(e.backtrace.join("\n")) unless e == SCAN_TERMINATED_EXCEPTION
|
55
58
|
end
|
@@ -57,13 +60,12 @@ module Aerospike
|
|
57
60
|
end
|
58
61
|
|
59
62
|
if tracker.complete?(@cluster, policy) || !should_retry
|
60
|
-
recordset.thread_finished
|
63
|
+
recordset.thread_finished(@last_expn)
|
61
64
|
return
|
62
65
|
end
|
63
66
|
sleep(interval) if policy.sleep_between_retries > 0
|
67
|
+
statement.reset_task_id
|
64
68
|
end
|
65
69
|
end
|
66
|
-
|
67
70
|
end
|
68
|
-
|
69
71
|
end
|